PyramidsRND1 / run_logs /timers.json
rbpg39's picture
first push
228b643 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43619248270988464,
"min": 0.43619248270988464,
"max": 1.4520533084869385,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13239.314453125,
"min": 13239.314453125,
"max": 44049.48828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989971.0,
"min": 29952.0,
"max": 989971.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989971.0,
"min": 29952.0,
"max": 989971.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.14326219260692596,
"min": -0.12083493918180466,
"max": 0.18571296334266663,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 35.529022216796875,
"min": -29.121219635009766,
"max": 47.17109298706055,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.027769923210144043,
"min": 0.015405929647386074,
"max": 0.5684710144996643,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.886940956115723,
"min": 3.743640899658203,
"max": 134.72763061523438,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07137393387885967,
"min": 0.06533262936839591,
"max": 0.07363117916134393,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9992350743040354,
"min": 0.4770290960628678,
"max": 1.1044676874201589,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.009305816587864795,
"min": 0.0010180396564200662,
"max": 0.01850194055816085,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.13028143223010713,
"min": 0.011105572171099889,
"max": 0.17425681570874113,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.427090381478569e-06,
"min": 7.427090381478569e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010397926534069996,
"min": 0.00010397926534069996,
"max": 0.0035073905308698997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247566428571428,
"min": 0.10247566428571428,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346592999999999,
"min": 1.3691136000000002,
"max": 2.5691301,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002573188621428571,
"min": 0.0002573188621428571,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036024640699999994,
"min": 0.0036024640699999994,
"max": 0.11693609699000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01684480719268322,
"min": 0.01684480719268322,
"max": 0.6035431623458862,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23582731187343597,
"min": 0.23582731187343597,
"max": 4.224802017211914,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 715.1052631578947,
"min": 650.58,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27174.0,
"min": 15984.0,
"max": 32529.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.5197351025165738,
"min": -1.0000000521540642,
"max": 0.844234118919547,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 19.23019879311323,
"min": -32.000001668930054,
"max": 34.61359887570143,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.5197351025165738,
"min": -1.0000000521540642,
"max": 0.844234118919547,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 19.23019879311323,
"min": -32.000001668930054,
"max": 34.61359887570143,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1270365567399642,
"min": 0.11415854611434043,
"max": 12.884078552946448,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.700352599378675,
"min": 4.700352599378675,
"max": 206.14525684714317,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1764036507",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1764038597"
},
"total": 2089.6059063190005,
"count": 1,
"self": 0.4775345130005917,
"children": {
"run_training.setup": {
"total": 0.02319891999968604,
"count": 1,
"self": 0.02319891999968604
},
"TrainerController.start_learning": {
"total": 2089.105172886,
"count": 1,
"self": 1.4078762900430775,
"children": {
"TrainerController._reset_env": {
"total": 2.0119572559997323,
"count": 1,
"self": 2.0119572559997323
},
"TrainerController.advance": {
"total": 2085.6093560669583,
"count": 63336,
"self": 1.499776870122787,
"children": {
"env_step": {
"total": 1430.2468238299302,
"count": 63336,
"self": 1275.5251404809578,
"children": {
"SubprocessEnvManager._take_step": {
"total": 153.8734406499957,
"count": 63336,
"self": 4.626334661030796,
"children": {
"TorchPolicy.evaluate": {
"total": 149.2471059889649,
"count": 62577,
"self": 149.2471059889649
}
}
},
"workers": {
"total": 0.8482426989767191,
"count": 63336,
"self": 0.0,
"children": {
"worker_root": {
"total": 2082.5258967409936,
"count": 63336,
"is_parallel": true,
"self": 925.778975602092,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018667859999368375,
"count": 1,
"is_parallel": true,
"self": 0.0005955330002507253,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012712529996861122,
"count": 8,
"is_parallel": true,
"self": 0.0012712529996861122
}
}
},
"UnityEnvironment.step": {
"total": 0.04726257299989811,
"count": 1,
"is_parallel": true,
"self": 0.0005651699993904913,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047712000014143996,
"count": 1,
"is_parallel": true,
"self": 0.00047712000014143996
},
"communicator.exchange": {
"total": 0.04451461600001494,
"count": 1,
"is_parallel": true,
"self": 0.04451461600001494
},
"steps_from_proto": {
"total": 0.0017056670003512409,
"count": 1,
"is_parallel": true,
"self": 0.000362816001597821,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013428509987534198,
"count": 8,
"is_parallel": true,
"self": 0.0013428509987534198
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1156.7469211389016,
"count": 63335,
"is_parallel": true,
"self": 33.7487428308973,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.68552457414353,
"count": 63335,
"is_parallel": true,
"self": 23.68552457414353
},
"communicator.exchange": {
"total": 989.7077681439832,
"count": 63335,
"is_parallel": true,
"self": 989.7077681439832
},
"steps_from_proto": {
"total": 109.60488558987754,
"count": 63335,
"is_parallel": true,
"self": 23.048047569910977,
"children": {
"_process_rank_one_or_two_observation": {
"total": 86.55683801996656,
"count": 506680,
"is_parallel": true,
"self": 86.55683801996656
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 653.8627553669053,
"count": 63336,
"self": 2.7058591068839632,
"children": {
"process_trajectory": {
"total": 124.38750293001567,
"count": 63336,
"self": 124.20568530001538,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18181763000029605,
"count": 2,
"self": 0.18181763000029605
}
}
},
"_update_policy": {
"total": 526.7693933300056,
"count": 445,
"self": 291.5442858189581,
"children": {
"TorchPPOOptimizer.update": {
"total": 235.22510751104755,
"count": 22824,
"self": 235.22510751104755
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.839995982474647e-07,
"count": 1,
"self": 9.839995982474647e-07
},
"TrainerController._save_models": {
"total": 0.0759822889995121,
"count": 1,
"self": 0.0009287140001106309,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07505357499940146,
"count": 1,
"self": 0.07505357499940146
}
}
}
}
}
}
}