{ "name": "root", "gauges": { "SnowballTarget.Policy.Entropy.mean": { "value": 0.8965212106704712, "min": 0.8965212106704712, "max": 2.8476932048797607, "count": 20 }, "SnowballTarget.Policy.Entropy.sum": { "value": 8599.431640625, "min": 8599.431640625, "max": 29225.875, "count": 20 }, "SnowballTarget.Step.mean": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Step.sum": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": { "value": 13.056207656860352, "min": 0.42233338952064514, "max": 13.056207656860352, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": { "value": 2545.96044921875, "min": 81.93267822265625, "max": 2645.7431640625, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.mean": { "value": 199.0, "min": 199.0, "max": 199.0, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.sum": { "value": 10945.0, "min": 8756.0, "max": 10945.0, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.mean": { "value": 0.06428697935609984, "min": 0.06219308111547291, "max": 0.07283064166891992, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.sum": { "value": 0.25714791742439935, "min": 0.253794996680774, "max": 0.36415320834459963, "count": 20 }, "SnowballTarget.Losses.ValueLoss.mean": { "value": 0.2016227744227531, "min": 0.12117704873055439, "max": 0.2690818102920757, "count": 20 }, "SnowballTarget.Losses.ValueLoss.sum": { "value": 0.8064910976910123, "min": 0.48470819492221756, "max": 1.3454090514603783, "count": 20 }, "SnowballTarget.Policy.LearningRate.mean": { "value": 8.082097306000005e-06, "min": 8.082097306000005e-06, "max": 0.000291882002706, "count": 20 }, "SnowballTarget.Policy.LearningRate.sum": { "value": 3.232838922400002e-05, "min": 3.232838922400002e-05, "max": 0.00138516003828, "count": 20 }, "SnowballTarget.Policy.Epsilon.mean": { "value": 0.10269400000000001, "min": 0.10269400000000001, "max": 0.19729400000000002, "count": 20 }, "SnowballTarget.Policy.Epsilon.sum": { "value": 0.41077600000000003, "min": 0.41077600000000003, "max": 0.96172, "count": 20 }, "SnowballTarget.Policy.Beta.mean": { "value": 0.0001444306000000001, "min": 0.0001444306000000001, "max": 0.0048649706, "count": 20 }, "SnowballTarget.Policy.Beta.sum": { "value": 0.0005777224000000004, "min": 0.0005777224000000004, "max": 0.023089828, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.mean": { "value": 25.727272727272727, "min": 3.6136363636363638, "max": 25.772727272727273, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.sum": { "value": 1132.0, "min": 159.0, "max": 1412.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.mean": { "value": 25.727272727272727, "min": 3.6136363636363638, "max": 25.772727272727273, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.sum": { "value": 1132.0, "min": 159.0, "max": 1412.0, "count": 20 }, "SnowballTarget.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 }, "SnowballTarget.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1683736707", "python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1683737094" }, "total": 386.96057435299997, "count": 1, "self": 0.2759785100000727, "children": { "run_training.setup": { "total": 0.05108021999990342, "count": 1, "self": 0.05108021999990342 }, "TrainerController.start_learning": { "total": 386.633515623, "count": 1, "self": 0.5664840070039645, "children": { "TrainerController._reset_env": { "total": 3.9870886520000113, "count": 1, "self": 3.9870886520000113 }, "TrainerController.advance": { "total": 381.9465025749962, "count": 18206, "self": 0.2514111849936853, "children": { "env_step": { "total": 381.6950913900025, "count": 18206, "self": 274.02313459801167, "children": { "SubprocessEnvManager._take_step": { "total": 107.42731698699731, "count": 18206, "self": 1.4536046400044143, "children": { "TorchPolicy.evaluate": { "total": 105.9737123469929, "count": 18206, "self": 105.9737123469929 } } }, "workers": { "total": 0.24463980499353966, "count": 18206, "self": 0.0, "children": { "worker_root": { "total": 385.77453873600143, "count": 18206, "is_parallel": true, "self": 191.51265106801736, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0021259689999624243, "count": 1, "is_parallel": true, "self": 0.0007368509998286754, "children": { "_process_rank_one_or_two_observation": { "total": 0.0013891180001337489, "count": 10, "is_parallel": true, "self": 0.0013891180001337489 } } }, "UnityEnvironment.step": { "total": 0.03470568800003093, "count": 1, "is_parallel": true, "self": 0.0005321800000501753, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0002825029999939943, "count": 1, "is_parallel": true, "self": 0.0002825029999939943 }, "communicator.exchange": { "total": 0.03205879500001174, "count": 1, "is_parallel": true, "self": 0.03205879500001174 }, "steps_from_proto": { "total": 0.0018322099999750208, "count": 1, "is_parallel": true, "self": 0.0003201850000777995, "children": { "_process_rank_one_or_two_observation": { "total": 0.0015120249998972213, "count": 10, "is_parallel": true, "self": 0.0015120249998972213 } } } } } } }, "UnityEnvironment.step": { "total": 194.26188766798407, "count": 18205, "is_parallel": true, "self": 9.018999922975695, "children": { "UnityEnvironment._generate_step_input": { "total": 4.745022356002096, "count": 18205, "is_parallel": true, "self": 4.745022356002096 }, "communicator.exchange": { "total": 151.68447986701074, "count": 18205, "is_parallel": true, "self": 151.68447986701074 }, "steps_from_proto": { "total": 28.813385521995542, "count": 18205, "is_parallel": true, "self": 5.225260672029208, "children": { "_process_rank_one_or_two_observation": { "total": 23.588124849966334, "count": 182050, "is_parallel": true, "self": 23.588124849966334 } } } } } } } } } } } } }, "trainer_threads": { "total": 0.00014355099983731634, "count": 1, "self": 0.00014355099983731634, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 380.5500214819773, "count": 236739, "is_parallel": true, "self": 4.794163842028638, "children": { "process_trajectory": { "total": 210.62826672294773, "count": 236739, "is_parallel": true, "self": 209.7859311519478, "children": { "RLTrainer._checkpoint": { "total": 0.8423355709999214, "count": 4, "is_parallel": true, "self": 0.8423355709999214 } } }, "_update_policy": { "total": 165.12759091700093, "count": 90, "is_parallel": true, "self": 43.92360788700239, "children": { "TorchPPOOptimizer.update": { "total": 121.20398302999854, "count": 4587, "is_parallel": true, "self": 121.20398302999854 } } } } } } } } }, "TrainerController._save_models": { "total": 0.13329683799997838, "count": 1, "self": 0.0008723779999399994, "children": { "RLTrainer._checkpoint": { "total": 0.13242446000003838, "count": 1, "self": 0.13242446000003838 } } } } } } }