GindaChen commited on
Commit
e538362
·
verified ·
1 Parent(s): e06b20a

Upload folder using huggingface_hub

Browse files
attnserver.run_attnserver.slurm.sh.343374.err.log ADDED
@@ -0,0 +1,657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ + source /mnt/weka/home/hao.zhang/conda/miniconda/bin/activate
2
+ ++ _CONDA_ROOT=/mnt/weka/home/hao.zhang/conda/miniconda
3
+ ++ . /mnt/weka/home/hao.zhang/conda/miniconda/etc/profile.d/conda.sh
4
+ +++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
5
+ +++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
6
+ +++ export _CE_M=
7
+ +++ _CE_M=
8
+ +++ export _CE_CONDA=
9
+ +++ _CE_CONDA=
10
+ +++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
11
+ +++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
12
+ +++ '[' -z x ']'
13
+ ++ conda activate
14
+ ++ local cmd=activate
15
+ ++ case "$cmd" in
16
+ ++ __conda_activate activate
17
+ ++ '[' -n '' ']'
18
+ ++ local ask_conda
19
+ +++ PS1=
20
+ +++ __conda_exe shell.posix activate
21
+ +++ '[' -n '' ']'
22
+ +++ /mnt/weka/home/hao.zhang/conda/miniconda/bin/conda shell.posix activate
23
+ ++ ask_conda='unset _CE_M
24
+ unset _CE_CONDA
25
+ PS1='\''(base) '\''
26
+ export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
27
+ export CONDA_PREFIX='\''/mnt/weka/home/hao.zhang/conda/miniconda'\''
28
+ export CONDA_SHLVL='\''3'\''
29
+ export CONDA_DEFAULT_ENV='\''base'\''
30
+ export CONDA_PROMPT_MODIFIER='\''(base) '\''
31
+ export CONDA_PREFIX_2='\''/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver'\''
32
+ export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
33
+ export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
34
+ ++ eval 'unset _CE_M
35
+ unset _CE_CONDA
36
+ PS1='\''(base) '\''
37
+ export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
38
+ export CONDA_PREFIX='\''/mnt/weka/home/hao.zhang/conda/miniconda'\''
39
+ export CONDA_SHLVL='\''3'\''
40
+ export CONDA_DEFAULT_ENV='\''base'\''
41
+ export CONDA_PROMPT_MODIFIER='\''(base) '\''
42
+ export CONDA_PREFIX_2='\''/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver'\''
43
+ export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
44
+ export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
45
+ +++ unset _CE_M
46
+ +++ unset _CE_CONDA
47
+ +++ PS1='(base) '
48
+ +++ export PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
49
+ +++ PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
50
+ +++ export CONDA_PREFIX=/mnt/weka/home/hao.zhang/conda/miniconda
51
+ +++ CONDA_PREFIX=/mnt/weka/home/hao.zhang/conda/miniconda
52
+ +++ export CONDA_SHLVL=3
53
+ +++ CONDA_SHLVL=3
54
+ +++ export CONDA_DEFAULT_ENV=base
55
+ +++ CONDA_DEFAULT_ENV=base
56
+ +++ export 'CONDA_PROMPT_MODIFIER=(base) '
57
+ +++ CONDA_PROMPT_MODIFIER='(base) '
58
+ +++ export CONDA_PREFIX_2=/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver
59
+ +++ CONDA_PREFIX_2=/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver
60
+ +++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
61
+ +++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
62
+ +++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
63
+ +++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
64
+ ++ __conda_hashr
65
+ ++ '[' -n '' ']'
66
+ ++ '[' -n '' ']'
67
+ ++ hash -r
68
+ + conda activate junda-attnserver
69
+ + local cmd=activate
70
+ + case "$cmd" in
71
+ + __conda_activate activate junda-attnserver
72
+ + '[' -n '' ']'
73
+ + local ask_conda
74
+ ++ PS1='(base) '
75
+ ++ __conda_exe shell.posix activate junda-attnserver
76
+ ++ '[' -n '' ']'
77
+ ++ /mnt/weka/home/hao.zhang/conda/miniconda/bin/conda shell.posix activate junda-attnserver
78
+ + ask_conda='unset _CE_M
79
+ unset _CE_CONDA
80
+ PS1='\''(junda-attnserver) '\''
81
+ export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
82
+ export CONDA_PREFIX='\''/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver'\''
83
+ export CONDA_SHLVL='\''4'\''
84
+ export CONDA_DEFAULT_ENV='\''junda-attnserver'\''
85
+ export CONDA_PROMPT_MODIFIER='\''(junda-attnserver) '\''
86
+ export CONDA_PREFIX_3='\''/mnt/weka/home/hao.zhang/conda/miniconda'\''
87
+ export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
88
+ export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
89
+ + eval 'unset _CE_M
90
+ unset _CE_CONDA
91
+ PS1='\''(junda-attnserver) '\''
92
+ export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
93
+ export CONDA_PREFIX='\''/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver'\''
94
+ export CONDA_SHLVL='\''4'\''
95
+ export CONDA_DEFAULT_ENV='\''junda-attnserver'\''
96
+ export CONDA_PROMPT_MODIFIER='\''(junda-attnserver) '\''
97
+ export CONDA_PREFIX_3='\''/mnt/weka/home/hao.zhang/conda/miniconda'\''
98
+ export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
99
+ export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
100
+ ++ unset _CE_M
101
+ ++ unset _CE_CONDA
102
+ ++ PS1='(junda-attnserver) '
103
+ ++ export PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
104
+ ++ PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
105
+ ++ export CONDA_PREFIX=/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver
106
+ ++ CONDA_PREFIX=/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver
107
+ ++ export CONDA_SHLVL=4
108
+ ++ CONDA_SHLVL=4
109
+ ++ export CONDA_DEFAULT_ENV=junda-attnserver
110
+ ++ CONDA_DEFAULT_ENV=junda-attnserver
111
+ ++ export 'CONDA_PROMPT_MODIFIER=(junda-attnserver) '
112
+ ++ CONDA_PROMPT_MODIFIER='(junda-attnserver) '
113
+ ++ export CONDA_PREFIX_3=/mnt/weka/home/hao.zhang/conda/miniconda
114
+ ++ CONDA_PREFIX_3=/mnt/weka/home/hao.zhang/conda/miniconda
115
+ ++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
116
+ ++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
117
+ ++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
118
+ ++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
119
+ + __conda_hashr
120
+ + '[' -n '' ']'
121
+ + '[' -n '' ']'
122
+ + hash -r
123
+ + export CHROME_TRACE_PREFIX=/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v6
124
+ + CHROME_TRACE_PREFIX=/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v6
125
+ + export GPT_CHECKPOINT_PREFIX=/mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint
126
+ + GPT_CHECKPOINT_PREFIX=/mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint
127
+ + export CHECKPOINT_PATH=/mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs
128
+ + CHECKPOINT_PATH=/mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs
129
+ + mkdir -p /mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v6
130
+ + mkdir -p /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs
131
+ + export PROF_TP_SIZE=8
132
+ + PROF_TP_SIZE=8
133
+ + export PROF_CP_SIZE=8
134
+ + PROF_CP_SIZE=8
135
+ + export PROF_BS=1
136
+ + PROF_BS=1
137
+ + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
138
+ + export PROF_CTX_LENGTH=1024
139
+ + PROF_CTX_LENGTH=1024
140
+ + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v6/mytrace.L1024*tp8.cp8.bs1.json'
141
+ + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v6/mytrace.L1024*tp8.cp8.bs1.json' ']'
142
+ + echo 'Running ctx_length=1024, TP_SIZE=8, CP_SIZE=8, BATCH_SIZE=1'
143
+ + srun bash ./attnserver.sh
144
+ + which python3
145
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 8 --node_rank 0 --rdzv_id 343374 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-296:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --load /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
146
+ + which python3
147
+ + which python3
148
+ + which python3
149
+ + which python3
150
+ + which python3
151
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 8 --node_rank 5 --rdzv_id 343374 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-296:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --load /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
152
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 8 --node_rank 4 --rdzv_id 343374 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-296:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --load /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
153
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 8 --node_rank 7 --rdzv_id 343374 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-296:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --load /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
154
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 8 --node_rank 1 --rdzv_id 343374 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-296:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --load /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
155
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 8 --node_rank 2 --rdzv_id 343374 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-296:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --load /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
156
+ + which python3
157
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 8 --node_rank 6 --rdzv_id 343374 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-296:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --load /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
158
+ + which python3
159
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 8 --node_rank 3 --rdzv_id 343374 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-296:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --load /mnt/sharefs/users/hao.zhang/junda/gpt-checkpoint/ctx-tp-cp-bs --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
160
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
161
+ and will be removed in future. Use torchrun.
162
+ Note that --use-env is set by default in torchrun.
163
+ If your script expects `--local-rank` argument to be set, please
164
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
165
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
166
+ further instructions
167
+
168
+ main()
169
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
170
+ and will be removed in future. Use torchrun.
171
+ Note that --use-env is set by default in torchrun.
172
+ If your script expects `--local-rank` argument to be set, please
173
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
174
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
175
+ further instructions
176
+
177
+ main()
178
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
179
+ and will be removed in future. Use torchrun.
180
+ Note that --use-env is set by default in torchrun.
181
+ If your script expects `--local-rank` argument to be set, please
182
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
183
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
184
+ further instructions
185
+
186
+ main()
187
+ W0621 22:41:21.974000 1717812 site-packages/torch/distributed/run.py:766]
188
+ W0621 22:41:21.974000 1717812 site-packages/torch/distributed/run.py:766] *****************************************
189
+ W0621 22:41:21.974000 1717812 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
190
+ W0621 22:41:21.974000 1717812 site-packages/torch/distributed/run.py:766] *****************************************
191
+ W0621 22:41:21.974000 560583 site-packages/torch/distributed/run.py:766]
192
+ W0621 22:41:21.974000 560583 site-packages/torch/distributed/run.py:766] *****************************************
193
+ W0621 22:41:21.974000 560583 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
194
+ W0621 22:41:21.974000 560583 site-packages/torch/distributed/run.py:766] *****************************************
195
+ W0621 22:41:21.973000 2014685 site-packages/torch/distributed/run.py:766]
196
+ W0621 22:41:21.973000 2014685 site-packages/torch/distributed/run.py:766] *****************************************
197
+ W0621 22:41:21.973000 2014685 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
198
+ W0621 22:41:21.973000 2014685 site-packages/torch/distributed/run.py:766] *****************************************
199
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
200
+ and will be removed in future. Use torchrun.
201
+ Note that --use-env is set by default in torchrun.
202
+ If your script expects `--local-rank` argument to be set, please
203
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
204
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
205
+ further instructions
206
+
207
+ main()
208
+ W0621 22:41:21.995000 1736156 site-packages/torch/distributed/run.py:766]
209
+ W0621 22:41:21.995000 1736156 site-packages/torch/distributed/run.py:766] *****************************************
210
+ W0621 22:41:21.995000 1736156 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
211
+ W0621 22:41:21.995000 1736156 site-packages/torch/distributed/run.py:766] *****************************************
212
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
213
+ and will be removed in future. Use torchrun.
214
+ Note that --use-env is set by default in torchrun.
215
+ If your script expects `--local-rank` argument to be set, please
216
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
217
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
218
+ further instructions
219
+
220
+ main()
221
+ W0621 22:41:22.061000 4132561 site-packages/torch/distributed/run.py:766]
222
+ W0621 22:41:22.061000 4132561 site-packages/torch/distributed/run.py:766] *****************************************
223
+ W0621 22:41:22.061000 4132561 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
224
+ W0621 22:41:22.061000 4132561 site-packages/torch/distributed/run.py:766] *****************************************
225
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
226
+ and will be removed in future. Use torchrun.
227
+ Note that --use-env is set by default in torchrun.
228
+ If your script expects `--local-rank` argument to be set, please
229
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
230
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
231
+ further instructions
232
+
233
+ main()
234
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
235
+ and will be removed in future. Use torchrun.
236
+ Note that --use-env is set by default in torchrun.
237
+ If your script expects `--local-rank` argument to be set, please
238
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
239
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
240
+ further instructions
241
+
242
+ main()
243
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
244
+ and will be removed in future. Use torchrun.
245
+ Note that --use-env is set by default in torchrun.
246
+ If your script expects `--local-rank` argument to be set, please
247
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
248
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
249
+ further instructions
250
+
251
+ main()
252
+ W0621 22:41:23.086000 1883919 site-packages/torch/distributed/run.py:766]
253
+ W0621 22:41:23.086000 1883919 site-packages/torch/distributed/run.py:766] *****************************************
254
+ W0621 22:41:23.086000 1883919 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
255
+ W0621 22:41:23.086000 1883919 site-packages/torch/distributed/run.py:766] *****************************************
256
+ W0621 22:41:23.085000 860283 site-packages/torch/distributed/run.py:766]
257
+ W0621 22:41:23.085000 860283 site-packages/torch/distributed/run.py:766] *****************************************
258
+ W0621 22:41:23.085000 860283 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
259
+ W0621 22:41:23.085000 860283 site-packages/torch/distributed/run.py:766] *****************************************
260
+ W0621 22:41:23.084000 1887036 site-packages/torch/distributed/run.py:766]
261
+ W0621 22:41:23.084000 1887036 site-packages/torch/distributed/run.py:766] *****************************************
262
+ W0621 22:41:23.084000 1887036 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
263
+ W0621 22:41:23.084000 1887036 site-packages/torch/distributed/run.py:766] *****************************************
264
+ [rank56]:[W621 22:41:50.896887452 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 56] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
265
+ [rank32]:[W621 22:41:50.388412655 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 32] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
266
+ [rank48]:[W621 22:41:50.915915939 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 48] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
267
+ [rank24]:[W621 22:41:50.924930994 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
268
+ [rank40]:[W621 22:41:50.102572144 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 40] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
269
+ [rank0]:[W621 22:41:50.816859038 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
270
+ [rank8]:[W621 22:41:50.233549317 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
271
+ [rank4]:[W621 22:41:51.235067539 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
272
+ [rank7]:[W621 22:41:51.235079011 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
273
+ [rank63]:[W621 22:41:51.387223726 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 63] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
274
+ [rank60]:[W621 22:41:51.387327275 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 60] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
275
+ [rank15]:[W621 22:41:51.528338056 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
276
+ [rank28]:[W621 22:41:51.404135841 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
277
+ [rank31]:[W621 22:41:51.404159462 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
278
+ [rank55]:[W621 22:41:51.395991319 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 55] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
279
+ [rank23]:[W621 22:41:51.962990771 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
280
+ [rank20]:[W621 22:41:51.963065422 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
281
+ [rank47]:[W621 22:41:51.579689087 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 47] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
282
+ [rank44]:[W621 22:41:51.579694628 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 44] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
283
+ [rank12]:[W621 22:41:51.532965591 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
284
+ [rank39]:[W621 22:41:51.873498007 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 39] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
285
+ [rank36]:[W621 22:41:51.873594703 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 36] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
286
+ [rank52]:[W621 22:41:51.399677484 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 52] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
287
+ [rank27]:[W621 22:41:51.473488221 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
288
+ [rank11]:[W621 22:41:51.602442306 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
289
+ [rank19]:[W621 22:41:51.035163763 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
290
+ [rank59]:[W621 22:41:51.472598034 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 59] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
291
+ [rank3]:[W621 22:41:51.322075454 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
292
+ [rank51]:[W621 22:41:51.479080285 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 51] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
293
+ [rank43]:[W621 22:41:51.663369700 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 43] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
294
+ [rank35]:[W621 22:41:51.958436738 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 35] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
295
+ [rank61]:[W621 22:41:51.483879074 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 61] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
296
+ [rank17]:[W621 22:41:51.058730375 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
297
+ [rank25]:[W621 22:41:51.502696907 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
298
+ [rank41]:[W621 22:41:51.676102187 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 41] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
299
+ [rank13]:[W621 22:41:51.629966888 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
300
+ [rank9]:[W621 22:41:51.630000009 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
301
+ [rank5]:[W621 22:41:51.339319828 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
302
+ [rank53]:[W621 22:41:51.495758148 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 53] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
303
+ [rank1]:[W621 22:41:51.339506763 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
304
+ [rank33]:[W621 22:41:51.971752065 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 33] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
305
+ [rank37]:[W621 22:41:51.971946994 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 37] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
306
+ [rank29]:[W621 22:41:51.505611081 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
307
+ [rank49]:[W621 22:41:51.496585417 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 49] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
308
+ [rank21]:[W621 22:41:51.063688724 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
309
+ [rank16]:[W621 22:41:51.064025038 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
310
+ [rank45]:[W621 22:41:51.683934352 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 45] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
311
+ [rank57]:[W621 22:41:51.498963847 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 57] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
312
+ [rank58]:[W621 22:41:51.499939716 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 58] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
313
+ [rank34]:[W621 22:41:51.983030459 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 34] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
314
+ [rank50]:[W621 22:41:51.507298711 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 50] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
315
+ [rank10]:[W621 22:41:51.645436735 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
316
+ [rank2]:[W621 22:41:51.353946872 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
317
+ [rank26]:[W621 22:41:51.519170568 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
318
+ [rank18]:[W621 22:41:51.077568400 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
319
+ [rank42]:[W621 22:41:51.695778868 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 42] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
320
+ [rank38]:[W621 22:41:51.014723419 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 38] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
321
+ [rank22]:[W621 22:41:51.106677336 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
322
+ [rank14]:[W621 22:41:51.676518544 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
323
+ [rank6]:[W621 22:41:51.384613561 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
324
+ [rank30]:[W621 22:41:51.549938598 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
325
+ [rank46]:[W621 22:41:51.724590652 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 46] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
326
+ [rank54]:[W621 22:41:51.543951716 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 54] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
327
+ [rank62]:[W621 22:41:51.544946982 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 62] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
328
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
329
+ warnings.warn(
330
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
331
+ warnings.warn(
332
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
333
+ warnings.warn(
334
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
335
+ warnings.warn(
336
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
337
+ warnings.warn(
338
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
339
+ warnings.warn(
340
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
341
+ warnings.warn(
342
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
343
+ warnings.warn(
344
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
345
+ warnings.warn(
346
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
347
+ warnings.warn(
348
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
349
+ warnings.warn(
350
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
351
+ warnings.warn(
352
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
353
+ warnings.warn(
354
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
355
+ warnings.warn(
356
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
357
+ warnings.warn(
358
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
359
+ warnings.warn(
360
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
361
+ warnings.warn(
362
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
363
+ warnings.warn(
364
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
365
+ warnings.warn(
366
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
367
+ warnings.warn(
368
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
369
+ warnings.warn(
370
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
371
+ warnings.warn(
372
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
373
+ warnings.warn(
374
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
375
+ warnings.warn(
376
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
377
+ warnings.warn(
378
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
379
+ warnings.warn(
380
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
381
+ warnings.warn(
382
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
383
+ warnings.warn(
384
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
385
+ warnings.warn(
386
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
387
+ warnings.warn(
388
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
389
+ warnings.warn(
390
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
391
+ warnings.warn(
392
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
393
+ warnings.warn(
394
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
395
+ warnings.warn(
396
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
397
+ warnings.warn(
398
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
399
+ warnings.warn(
400
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
401
+ warnings.warn(
402
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
403
+ warnings.warn(
404
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
405
+ warnings.warn(
406
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
407
+ warnings.warn(
408
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
409
+ warnings.warn(
410
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
411
+ warnings.warn(
412
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
413
+ warnings.warn(
414
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
415
+ warnings.warn(
416
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
417
+ warnings.warn(
418
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
419
+ warnings.warn(
420
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
421
+ warnings.warn(
422
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
423
+ warnings.warn(
424
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
425
+ warnings.warn(
426
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
427
+ warnings.warn(
428
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
429
+ warnings.warn(
430
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
431
+ warnings.warn(
432
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
433
+ warnings.warn(
434
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
435
+ warnings.warn(
436
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
437
+ warnings.warn(
438
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
439
+ warnings.warn(
440
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
441
+ warnings.warn(
442
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
443
+ warnings.warn(
444
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
445
+ warnings.warn(
446
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
447
+ warnings.warn(
448
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
449
+ warnings.warn(
450
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
451
+ warnings.warn(
452
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
453
+ warnings.warn(
454
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
455
+ warnings.warn(
456
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
457
+ warnings.warn(
458
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
459
+ warnings.warn(
460
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
461
+ warnings.warn(
462
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
463
+ warnings.warn(
464
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
465
+ warnings.warn(
466
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
467
+ warnings.warn(
468
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
469
+ warnings.warn(
470
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
471
+ warnings.warn(
472
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
473
+ warnings.warn(
474
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
475
+ warnings.warn(
476
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
477
+ warnings.warn(
478
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
479
+ warnings.warn(
480
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
481
+ warnings.warn(
482
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
483
+ warnings.warn(
484
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
485
+ warnings.warn(
486
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
487
+ warnings.warn(
488
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
489
+ warnings.warn(
490
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
491
+ warnings.warn(
492
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
493
+ warnings.warn(
494
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
495
+ warnings.warn(
496
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
497
+ warnings.warn(
498
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
499
+ warnings.warn(
500
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
501
+ warnings.warn(
502
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
503
+ warnings.warn(
504
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
505
+ warnings.warn(
506
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
507
+ warnings.warn(
508
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
509
+ warnings.warn(
510
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
511
+ warnings.warn(
512
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
513
+ warnings.warn(
514
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
515
+ warnings.warn(
516
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
517
+ warnings.warn(
518
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
519
+ warnings.warn(
520
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
521
+ warnings.warn(
522
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
523
+ warnings.warn(
524
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
525
+ warnings.warn(
526
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
527
+ warnings.warn(
528
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
529
+ warnings.warn(
530
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
531
+ warnings.warn(
532
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
533
+ warnings.warn(
534
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
535
+ warnings.warn(
536
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
537
+ warnings.warn(
538
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
539
+ warnings.warn(
540
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
541
+ warnings.warn(
542
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
543
+ warnings.warn(
544
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
545
+ warnings.warn(
546
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
547
+ warnings.warn(
548
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
549
+ warnings.warn(
550
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
551
+ warnings.warn(
552
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
553
+ warnings.warn(
554
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
555
+ warnings.warn(
556
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
557
+ warnings.warn(
558
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
559
+ warnings.warn(
560
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
561
+ warnings.warn(
562
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
563
+ warnings.warn(
564
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
565
+ warnings.warn(
566
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
567
+ warnings.warn(
568
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
569
+ warnings.warn(
570
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
571
+ warnings.warn(
572
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
573
+ warnings.warn(
574
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
575
+ warnings.warn(
576
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
577
+ warnings.warn(
578
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
579
+ warnings.warn(
580
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
581
+ warnings.warn(
582
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
583
+ warnings.warn(
584
+ [rank7]:[W621 22:42:26.147877617 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
585
+ [rank3]:[W621 22:42:26.156442495 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
586
+ [rank1]:[W621 22:42:27.241595182 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
587
+ [rank5]:[W621 22:42:27.268258607 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
588
+ [rank2]:[W621 22:42:27.462610852 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
589
+ [rank4]:[W621 22:42:27.462967515 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
590
+ [rank6]:[W621 22:42:27.506092034 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
591
+ [rank0]:[W621 22:42:27.566326613 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
592
+ [rank33]:[W621 22:42:27.485304633 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
593
+ [rank55]:[W621 22:42:27.041002499 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
594
+ [rank57]:[W621 22:42:27.045317439 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
595
+ [rank62]:[W621 22:42:27.046891693 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
596
+ [rank25]:[W621 22:42:27.069776461 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
597
+ [rank17]:[W621 22:42:27.637404293 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
598
+ [rank49]:[W621 22:42:27.070599760 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
599
+ [rank21]:[W621 22:42:27.665885575 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
600
+ [rank45]:[W621 22:42:27.301473321 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
601
+ [rank53]:[W621 22:42:27.119622540 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
602
+ [rank37]:[W621 22:42:27.595446140 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
603
+ [rank52]:[W621 22:42:27.120726213 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
604
+ [rank28]:[W621 22:42:27.130102995 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
605
+ [rank56]:[W621 22:42:27.120188966 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
606
+ [rank61]:[W621 22:42:27.124416075 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
607
+ [rank29]:[W621 22:42:27.138602922 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
608
+ [rank36]:[W621 22:42:27.617008888 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
609
+ [rank60]:[W621 22:42:27.136023586 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
610
+ [rank27]:[W621 22:42:27.156169913 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
611
+ [rank26]:[W621 22:42:27.170505888 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
612
+ [rank63]:[W621 22:42:27.156882913 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
613
+ [rank22]:[W621 22:42:27.739970185 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
614
+ [rank11]:[W621 22:42:27.313289543 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
615
+ [rank30]:[W621 22:42:27.192065315 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
616
+ [rank19]:[W621 22:42:27.754498628 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
617
+ [rank9]:[W621 22:42:27.327216814 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
618
+ [rank58]:[W621 22:42:27.186867445 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
619
+ [rank12]:[W621 22:42:27.330904674 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
620
+ [rank14]:[W621 22:42:27.339096730 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
621
+ [rank43]:[W621 22:42:27.390267627 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
622
+ [rank41]:[W621 22:42:27.394101948 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
623
+ [rank51]:[W621 22:42:27.218128552 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
624
+ [rank23]:[W621 22:42:27.789078937 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
625
+ [rank31]:[W621 22:42:27.241408446 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
626
+ [rank20]:[W621 22:42:27.802265886 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
627
+ [rank50]:[W621 22:42:27.242422377 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
628
+ [rank42]:[W621 22:42:27.434705579 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
629
+ [rank18]:[W621 22:42:27.829311691 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
630
+ [rank10]:[W621 22:42:27.398407555 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
631
+ [rank24]:[W621 22:42:27.275294074 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
632
+ [rank34]:[W621 22:42:27.749077089 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
633
+ [rank16]:[W621 22:42:27.843026015 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
634
+ [rank35]:[W621 22:42:27.754800084 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
635
+ [rank39]:[W621 22:42:27.769060639 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
636
+ [rank59]:[W621 22:42:27.294026421 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
637
+ [rank46]:[W621 22:42:27.486821567 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
638
+ [rank40]:[W621 22:42:27.489141685 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
639
+ [rank38]:[W621 22:42:27.790781698 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
640
+ [rank32]:[W621 22:42:27.792833469 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
641
+ [rank8]:[W621 22:42:27.462832827 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
642
+ [rank54]:[W621 22:42:27.335053014 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
643
+ [rank13]:[W621 22:42:27.476862226 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
644
+ [rank44]:[W621 22:42:28.559000987 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
645
+ [rank48]:[W621 22:42:28.377904937 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
646
+ [rank47]:[W621 22:42:28.606296350 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
647
+ [rank15]:[W621 22:42:28.580070241 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
648
+ + set +x
649
+ + set +x
650
+ + set +x
651
+ + set +x
652
+ + set +x
653
+ + set +x
654
+ + set +x
655
+ + set +x
656
+ + '[' '!' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v6/mytrace.L1024*tp8.cp8.bs1.json' ']'
657
+ + exit 0
attnserver.run_attnserver.slurm.sh.343374.out.log ADDED
The diff for this file is too large to render. See raw diff