Module tests.runtime_benchmark
Run benchmarks and print benchmark report.
This file times various aspects of the environment, such as the physics engine and the renderer, given a task config. It is useful to benchmark new task configs.
Note: To run this file, you must install the tqdm package.
Functions
def main(_)
-
Expand source code
def main(_): """Run benchmarking script.""" config = importlib.import_module(FLAGS.config) print('Benchmarking config: {}'.format(FLAGS.config)) config = config.get_config(FLAGS.level) ############################################################################ # Benchmark without rendering, using random actions ############################################################################ config['observers'] = {'image': lambda _: None} env = environment.Environment(**config) print('Environment steps without rendering:') def _step_env_function(): obs = env.step(action=env.action_space.random_action()) if obs.last(): return True else: return False _time_env_function(env, _step_env_function) ############################################################################ # Benchmark only resets, without rendering, using random actions ############################################################################ config['observers'] = {'image': lambda _: None} env = environment.Environment(**config) print('Environment resets, without rendering:') def _step_env_function(): obs = env.reset() _time_env_function(env, _step_env_function) ############################################################################ # Benchmark physics only ############################################################################ config['observers'] = {'image': lambda _: None} env = environment.Environment(**config) print('Physics steps only:') def _physics_env_function(): env.physics.step(env.state) return False _time_env_function(env, _physics_env_function) ############################################################################ # Benchmark renderer only ############################################################################ def _get_render_env_function(env): def _render_env_function(): env.observation() return False return _render_env_function for image_size, anti_aliasing in _IMAGE_SIZE_ANTI_ALIASING: renderer = pil_renderer.PILRenderer( image_size=(image_size, image_size), anti_aliasing=anti_aliasing, color_to_rgb=getattr(color_maps, FLAGS.color_map) ) config['observers'] = {'image': renderer} env = environment.Environment(**config) print('Renderer steps only, image_size {}, anti_aliasing {}:'.format( image_size, anti_aliasing)) _time_env_function(env, _get_render_env_function(env)) ############################################################################ # Benchmark full steps with rendering ############################################################################ def _get_env_function(env): def _env_function(): obs = env.step(action=env.action_space.random_action()) if obs.last(): return True else: return False return _env_function for image_size, anti_aliasing in _IMAGE_SIZE_ANTI_ALIASING: renderer = pil_renderer.PILRenderer( image_size=(image_size, image_size), anti_aliasing=anti_aliasing, color_to_rgb=getattr(color_maps, FLAGS.color_map) ) config['observers'] = {'image': renderer} env = environment.Environment(**config) print( 'Full steps with rendering, image_size {}, anti_aliasing ' '{}:'.format(image_size, anti_aliasing)) _time_env_function(env, _get_env_function(env))
Run benchmarking script.