mikegarts's picture
First Push
e09ad3d
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.592154026031494,
"min": 2.5570156574249268,
"max": 2.682386875152588,
"count": 110
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 47695.6328125,
"min": 35092.65625,
"max": 60226.41796875,
"count": 110
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 66.3108108108108,
"min": 44.252252252252255,
"max": 84.64912280701755,
"count": 110
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19628.0,
"min": 10740.0,
"max": 20296.0,
"count": 110
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1635.4284855194612,
"min": 1618.2282061389467,
"max": 1655.5389640626079,
"count": 110
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 242043.41585688025,
"min": 145387.45546365986,
"max": 364862.61244870845,
"count": 110
},
"SoccerTwos.Step.mean": {
"value": 15909956.0,
"min": 14819786.0,
"max": 15909956.0,
"count": 110
},
"SoccerTwos.Step.sum": {
"value": 15909956.0,
"min": 14819786.0,
"max": 15909956.0,
"count": 110
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.05109145864844322,
"min": -0.07738807052373886,
"max": 0.06313871592283249,
"count": 110
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -7.561535835266113,
"min": -12.595863342285156,
"max": 11.806940078735352,
"count": 110
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04883280396461487,
"min": -0.07554891705513,
"max": 0.061777472496032715,
"count": 110
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.227254867553711,
"min": -13.37215805053711,
"max": 11.552387237548828,
"count": 110
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 110
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 110
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1395540523367959,
"min": -0.3436413769064278,
"max": 0.2235848073717914,
"count": 110
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -20.653999745845795,
"min": -55.84479999542236,
"max": 35.32639956474304,
"count": 110
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1395540523367959,
"min": -0.3436413769064278,
"max": 0.2235848073717914,
"count": 110
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -20.653999745845795,
"min": -55.84479999542236,
"max": 35.32639956474304,
"count": 110
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 110
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 110
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.013933316600741819,
"min": 0.013209843584384847,
"max": 0.022192529074285025,
"count": 53
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.013933316600741819,
"min": 0.013209843584384847,
"max": 0.022192529074285025,
"count": 53
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0994767519334952,
"min": 0.09347442363699278,
"max": 0.1207024835050106,
"count": 53
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0994767519334952,
"min": 0.09347442363699278,
"max": 0.1207024835050106,
"count": 53
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10295372903347015,
"min": 0.09738679577906927,
"max": 0.12896027714014052,
"count": 53
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10295372903347015,
"min": 0.09738679577906927,
"max": 0.12896027714014052,
"count": 53
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 2.9999999999999994e-05,
"min": 2.9999999999999994e-05,
"max": 2.9999999999999994e-05,
"count": 53
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 2.9999999999999994e-05,
"min": 2.9999999999999994e-05,
"max": 2.9999999999999994e-05,
"count": 53
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 53
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 53
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 53
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 53
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677651710",
"python_version": "3.9.16 (main, Jan 11 2023, 10:02:19) \n[Clang 14.0.6 ]",
"command_line_arguments": "/opt/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.app --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1677659095"
},
"total": 4636.692252209001,
"count": 1,
"self": 0.2680945010006326,
"children": {
"run_training.setup": {
"total": 0.010993665999999958,
"count": 1,
"self": 0.010993665999999958
},
"TrainerController.start_learning": {
"total": 4636.413164042,
"count": 1,
"self": 0.9376224609386554,
"children": {
"TrainerController._reset_env": {
"total": 1.923663834000647,
"count": 7,
"self": 1.923663834000647
},
"TrainerController.advance": {
"total": 4633.36911587206,
"count": 76282,
"self": 0.8122176161059542,
"children": {
"env_step": {
"total": 3528.3658031689088,
"count": 76282,
"self": 3380.255637585978,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.47876810502208,
"count": 76282,
"self": 3.950851717101102,
"children": {
"TorchPolicy.evaluate": {
"total": 143.52791638792098,
"count": 137748,
"self": 143.52791638792098
}
}
},
"workers": {
"total": 0.6313974779087421,
"count": 76282,
"self": 0.0,
"children": {
"worker_root": {
"total": 4632.816362555932,
"count": 76282,
"is_parallel": true,
"self": 1360.0335225539134,
"children": {
"steps_from_proto": {
"total": 0.011200664999839516,
"count": 14,
"is_parallel": true,
"self": 0.0013162899999592526,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.009884374999880263,
"count": 56,
"is_parallel": true,
"self": 0.009884374999880263
}
}
},
"UnityEnvironment.step": {
"total": 3272.7716393370188,
"count": 76282,
"is_parallel": true,
"self": 9.818880324090514,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 60.83900737293471,
"count": 76282,
"is_parallel": true,
"self": 60.83900737293471
},
"communicator.exchange": {
"total": 3073.5130374290043,
"count": 76282,
"is_parallel": true,
"self": 3073.5130374290043
},
"steps_from_proto": {
"total": 128.60071421098945,
"count": 152564,
"is_parallel": true,
"self": 14.18474648790729,
"children": {
"_process_rank_one_or_two_observation": {
"total": 114.41596772308216,
"count": 610256,
"is_parallel": true,
"self": 114.41596772308216
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1104.191095087046,
"count": 76282,
"self": 7.467344646987385,
"children": {
"process_trajectory": {
"total": 209.25597747706252,
"count": 76282,
"self": 208.95552810206254,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30044937499997104,
"count": 2,
"self": 0.30044937499997104
}
}
},
"_update_policy": {
"total": 887.4677729629962,
"count": 53,
"self": 88.07458638399214,
"children": {
"TorchPOCAOptimizer.update": {
"total": 799.393186579004,
"count": 1590,
"self": 799.393186579004
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.000001692678779e-07,
"count": 1,
"self": 5.000001692678779e-07
},
"TrainerController._save_models": {
"total": 0.18276137500015466,
"count": 1,
"self": 0.0017623749999984284,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18099900000015623,
"count": 1,
"self": 0.18099900000015623
}
}
}
}
}
}
}