def main(_):
  gin.parse_config_files_and_bindings(FLAGS.gin_config, FLAGS.gin_params)
  print('********')
  print(FLAGS.mode)
  print(FLAGS.gin_config)
  print(FLAGS.gin_params)

  env = active_vision_dataset_env.ActiveVisionDatasetEnv(modality_types=[
      task_env.ModalityTypes.IMAGE,
      task_env.ModalityTypes.SEMANTIC_SEGMENTATION,
      task_env.ModalityTypes.OBJECT_DETECTION, task_env.ModalityTypes.DEPTH,
      task_env.ModalityTypes.DISTANCE
  ])

  if FLAGS.mode == BENCHMARK_MODE:
    benchmark(env, env.possible_targets)
  elif FLAGS.mode == GRAPH_MODE:
    for loc in env.worlds:
      env.check_scene_graph(loc, 'fridge')
  elif FLAGS.mode == HUMAN_MODE:
    human(env, env.possible_targets)
  elif FLAGS.mode == VIS_MODE:
    visualize_random_step_sequence(env)
  elif FLAGS.mode == EVAL_MODE:
    evaluate_folder(env, FLAGS.eval_folder)
Example #2
0
def main(argv):
    args = flags.FLAGS
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    if args.test:
        args.envs = 4
        args.batch_sz = 4
        args.log_freq = 10
        args.restore = True

    expt = rvr.utils.Experiment(args.results_dir, args.env, args.agent, args.experiment, args.restore)

    base_path = os.path.dirname(os.path.abspath(__file__))
    gin_files = gin_configs.get(args.env, [])
    gin_files = [base_path + '/configs/' + fl for fl in gin_files]
    if args.restore:
        gin_files += [expt.config_path]
    gin_files += args.gin_files

    if not args.gpu:
        args.gin_bindings.append("build_cnn_nature.data_format = 'channels_last'")
        args.gin_bindings.append("build_fully_conv.data_format = 'channels_last'")

    gin.parse_config_files_and_bindings(gin_files, args.gin_bindings)

    # TODO: do this the other way around - put these as gin bindings
    if not args.traj_len:
        args.traj_len = int(gin.query_parameter('AdvantageActorCriticAgent.traj_len'))

    if not args.batch_sz:
        args.batch_sz = int(gin.query_parameter('AdvantageActorCriticAgent.batch_sz'))

    env_cls = rvr.envs.GymEnv if '-v' in args.env else rvr.envs.SC2Env
    env = env_cls(args.env, args.render, max_ep_len=args.max_ep_len)

    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    sess_mgr = rvr.utils.tensorflow.SessionManager(sess, expt.path, args.ckpt_freq, training_enabled=not args.test)

    agent = agent_cls[args.agent](env.obs_spec(), env.act_spec(), sess_mgr=sess_mgr,
                                  n_envs=args.envs, traj_len=args.traj_len, batch_sz=args.batch_sz)
    agent.logger = rvr.utils.StreamLogger(args.envs, args.log_freq, args.eps_avg, sess_mgr, expt.log_path)

    if sess_mgr.training_enabled:
        expt.save_gin_config()
        expt.save_model_summary(agent.model)

    agent.run(env, args.updates * args.traj_len * args.batch_sz // args.envs)
Example #3
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    gin.parse_config_files_and_bindings(
        [os.path.join(mon_minigrid.GIN_FILES_PREFIX, 'classic_fourrooms.gin')],
        bindings=FLAGS.gin_bindings,
        skip_unknown=False)
    env_id = mon_minigrid.register_environment()
    env = gym.make(env_id)
    env = RGBImgObsWrapper(env)  # Get pixel observations
    # Get tabular observation and drop the 'mission' field:
    env = tabular_wrapper.TabularWrapper(env, get_rgb=True)
    env.reset()

    num_frames = 0
    max_num_frames = 500

    if not tf.io.gfile.exists(FLAGS.file_path):
        tf.io.gfile.makedirs(FLAGS.file_path)

    undisc_return = 0
    while num_frames < max_num_frames:
        # Act randomly
        obs, reward, done, _ = env.step(env.action_space.sample())
        undisc_return += reward
        num_frames += 1

        print('t:', num_frames, '   s:', obs['state'])
        # Draw environment frame just for simple visualization
        plt.imshow(obs['image'])
        path = os.path.join(FLAGS.file_path, 'obs_{}.png'.format(num_frames))
        plt.savefig(path)
        plt.clf()

        if done:
            break

    print('Undiscounted return: %.2f' % undisc_return)
    env.close()
Example #4
0
def main(unused_argv):
  tf.random.set_seed(FLAGS.seed)
  init_timer = timer.Timer()
  init_timer.Start()

  if FLAGS.mode == 'hessian':
    # Load default values from the original experiment.
    FLAGS.preload_gin_config = os.path.join(FLAGS.logdir,
                                            'operative_config.gin')

  # Maybe preload a gin config.
  if FLAGS.preload_gin_config:
    config_path = FLAGS.preload_gin_config
    gin.parse_config_file(config_path)
    logging.info('Gin configuration pre-loaded from: %s', config_path)

  gin.parse_config_files_and_bindings(FLAGS.gin_config, FLAGS.gin_bindings)
  ds_train, ds_test, info = utils.get_dataset()
  input_shape = info.features['image'].shape
  num_classes = info.features['label'].num_classes
  logging.info('Input Shape: %s', input_shape)
  logging.info('train samples: %s', info.splits['train'].num_examples)
  logging.info('test samples: %s', info.splits['test'].num_examples)

  pruning_params = utils.get_pruning_params()
  model = utils.get_network(pruning_params, input_shape, num_classes)
  model.summary(print_fn=logging.info)
  if FLAGS.mode == 'train_eval':
    train_model(model, ds_train, ds_test, FLAGS.logdir)
  elif FLAGS.mode == 'hessian':
    test_model(model, ds_test)
    hessian(model, ds_train, FLAGS.logdir)
  logging.info('Total runtime: %.3f s', init_timer.GetDuration())

  logconfigfile_path = os.path.join(
      FLAGS.logdir,
      'hessian_' if FLAGS.mode == 'hessian' else '' + 'operative_config.gin')
  with tf.io.gfile.GFile(logconfigfile_path, 'w') as f:
    f.write('# Gin-Config:\n %s' % gin.config.operative_config_str())
Example #5
0
def get_inference_model(ckpt):
  """Restore model from checkpoint using global FLAGS.

  Use --gin_param for any custom kwargs for model constructors.
  Args:
    ckpt: Path to the checkpoint.

  Returns:
    Inference model, built and restored from checkpoint.
  """
  # Parse model kwargs from --gin_param.
  print('Parsing --gin_param flags:', FLAGS.gin_param)
  with gin.unlock_config():
    gin.parse_config_files_and_bindings(None, FLAGS.gin_param)

  models = {
      'autoencoder': inference.AutoencoderInference,
      'vst_extract_features': inference.VSTExtractFeatures,
      'vst_predict_controls': inference.VSTPredictControls,
      'vst_synthesize': inference.VSTSynthesize,
  }
  return models[FLAGS.inference_model](ckpt)
Example #6
0
def parse_gin(model_dir):
    """Parse gin config from --gin_file, --gin_param, and the model directory."""
    # Add user folders to the gin search path.
    for gin_search_path in [GIN_PATH] + FLAGS.gin_search_path:
        gin.add_config_file_search_path(gin_search_path)

    # Parse gin configs, later calls override earlier ones.
    with gin.unlock_config():
        # Optimization defaults.
        use_tpu = bool(FLAGS.tpu)
        opt_default = 'base.gin' if not use_tpu else 'base_tpu.gin'
        gin.parse_config_file(os.path.join('optimization', opt_default))

        # Load operative_config if it exists (model has already trained).
        operative_config = os.path.join(model_dir, 'operative_config-0.gin')
        if tf.io.gfile.exists(operative_config):
            gin.parse_config_file(operative_config, skip_unknown=True)

        # User gin config and user hyperparameters from flags.
        gin.parse_config_files_and_bindings(FLAGS.gin_file,
                                            FLAGS.gin_param,
                                            skip_unknown=True)
Example #7
0
def main(unused_argv):
  logging.info("Gin config: %s\nGin bindings: %s",
               FLAGS.gin_config, FLAGS.gin_bindings)
  gin.parse_config_files_and_bindings(FLAGS.gin_config, FLAGS.gin_bindings)


  if FLAGS.use_tpu is None:
    FLAGS.use_tpu = bool(os.environ.get("TPU_NAME", ""))
    if FLAGS.use_tpu:
      logging.info("Found TPU %s.", os.environ["TPU_NAME"])
  run_config = _get_run_config()
  task_manager = _get_task_manager()
  options = runner_lib.get_options_dict()
  runner_lib.run_with_schedule(
      schedule=FLAGS.schedule,
      run_config=run_config,
      task_manager=task_manager,
      options=options,
      use_tpu=FLAGS.use_tpu,
      num_eval_averaging_runs=FLAGS.num_eval_averaging_runs,
      eval_every_steps=FLAGS.eval_every_steps)
  logging.info("I\"m done with my work, ciao!")
Example #8
0
def evaluate(save_path, exp_name):
    print(save_path, exp_name)
    exp_path = os.path.join(save_path, exp_name)
    # Load config
    config = parse_gin_config(os.path.join(save_path, "config.gin"))
    gin.parse_config_files_and_bindings(
        [os.path.join(os.path.join(save_path, "config.gin"))], bindings=[""])

    if not os.path.exists(exp_path):
        logger.info("Creating folder " + exp_path)
        os.system("mkdir -p " + exp_path)
    else:
        raise Error("There already exists a folder with this name")

    weights_init = torch.load(os.path.join(save_path, "initial.pth"))
    weights_val = torch.load(os.path.join(save_path, "best_valid_acc81.pth"))
    weights_final = torch.load(os.path.join(save_path, "final103.pth"))

    results_val = combine_weights(weights_init, weights_val, config)
    save_to_csv(results_val, exp_path, "interpolation_val.csv")
    results_train = combine_weights(weights_init, weights_final, config)
    save_to_csv(results_train, exp_path, "interpolation_final.csv")
Example #9
0
def main(ctx, log_dir, debug, config, binding):
    """
    Parses the GIN config file given as the sole arguemnt to main
    and commences training.

    Creates a log directory if the option --log-dir was unspecified.
    """
    ctx.ensure_object(dict)
    logging.basicConfig(level=logging.INFO)

    # Parse config
    gin.parse_config_files_and_bindings(config, binding)

    slug = "#%s" % project_revision()
    if log_dir is None:
        # Generate new directory if not resuming training
        host = platform.node()

        version = 0
        prefix = os.path.join("logdir", "%s-%s" % (host, slug))
        if len(binding) > 0:
            prefix += "-%s" % "_".join(binding).strip(" ").replace("/", "\\")
        directory = "%s-%d" % (prefix, version)
        while tf.io.gfile.isdir(directory):
            version += 1
            directory = "%s-%d" % (prefix, version)
        log_dir = directory

    if not tf.io.gfile.isdir(log_dir):
        tf.io.gfile.makedirs(log_dir)
    atexit.register(gin_log_config, log_dir, slug)

    if debug:
        logging.info("Debug enabled, disabling graph compilation")

    ctx.obj["log_dir"] = log_dir
    ctx.obj["debug"] = debug

    return ctx
Example #10
0
def main(argv):
    del argv
    logging.info('Starting RL training.')

    gin_configs = FLAGS.config or []
    gin.parse_config_files_and_bindings(FLAGS.config_file, gin_configs)

    logging.info('Gin cofig:')
    logging.info(gin_configs)

    train_rl(
        output_dir=FLAGS.output_dir,
        train_batch_size=FLAGS.train_batch_size,
        eval_batch_size=FLAGS.eval_batch_size,
        trajectory_dump_dir=(FLAGS.trajectory_dump_dir or None),
    )

    # TODO(afrozm): This is for debugging.
    logging.info('Dumping stack traces of all stacks.')
    faulthandler.dump_traceback(all_threads=True)

    logging.info('Training is done, should exit.')
  def test_weight_copying(self, distribution):
    with distribution.scope():
      prog_masked_lm = progressive_masked_lm.ProgressiveMaskedLM(
          self.task_config)
      old_model = prog_masked_lm.get_model(stage_id=0)
      for w in old_model.trainable_weights:
        w.assign(tf.zeros_like(w) + 0.12345)
      new_model = prog_masked_lm.get_model(stage_id=1, old_model=old_model)
      for w in new_model.trainable_weights:
        self.assertAllClose(w, tf.zeros_like(w) + 0.12345)

    gin.parse_config_files_and_bindings(
        None, "encoders.build_encoder.encoder_cls = @EncoderScaffold")
    with distribution.scope():
      prog_masked_lm = progressive_masked_lm.ProgressiveMaskedLM(
          self.task_config)
      old_model = prog_masked_lm.get_model(stage_id=0)
      for w in old_model.trainable_weights:
        w.assign(tf.zeros_like(w) + 0.12345)
      new_model = prog_masked_lm.get_model(stage_id=1, old_model=old_model)
      for w in new_model.trainable_weights:
        self.assertAllClose(w, tf.zeros_like(w) + 0.12345)
Example #12
0
def main(_):
    logging.set_verbosity(logging.INFO)
    gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_bindings)
    if FLAGS.sub_dir == "auto":
        sub_dir = utils.get_datetime()
    else:
        sub_dir = FLAGS.sub_dir
    log_dir = os.path.join(
        FLAGS.root_dir,
        FLAGS.env_name,
        FLAGS.agent_name,
        sub_dir,
    )
    utils.maybe_makedirs(log_dir)
    train_eval_online.train_eval_online(
        log_dir=log_dir,
        agent_module=agents.AGENT_MODULES_DICT[FLAGS.agent_name],
        env_name=FLAGS.env_name,
        total_train_steps=FLAGS.total_train_steps,
        n_eval_episodes=FLAGS.n_eval_episodes,
        eval_target=FLAGS.eval_target,
    )
Example #13
0
def main(_):
    gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
    params = train_utils.parse_configuration(FLAGS)
    model_dir = FLAGS.model_dir
    if 'train' in FLAGS.mode:
        # Pure eval modes do not output yaml files. Otherwise continuous eval job
        # may race against the train job for writing the same file.
        train_utils.serialize_config(params, model_dir)

    # Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
    # can have significant impact on model speeds by utilizing float16 in case of
    # GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
    # dtype is float16
    if params.runtime.mixed_precision_dtype:
        performance.set_mixed_precision_policy(
            params.runtime.mixed_precision_dtype)
    distribution_strategy = distribute_utils.get_distribution_strategy(
        distribution_strategy=params.runtime.distribution_strategy,
        all_reduce_alg=params.runtime.all_reduce_alg,
        num_gpus=params.runtime.num_gpus,
        tpu_address=params.runtime.tpu,
        **params.runtime.model_parallelism())
    with distribution_strategy.scope():
        task = task_factory.get_task(params.task,
                                     label_spec=('relevance',
                                                 tf.io.FixedLenFeature(
                                                     shape=[
                                                         1,
                                                     ],
                                                     dtype=tf.int64,
                                                     default_value=-1)),
                                     logging_dir=model_dir)

    train_lib.run_experiment(distribution_strategy=distribution_strategy,
                             task=task,
                             mode=FLAGS.mode,
                             params=params,
                             model_dir=model_dir)
Example #14
0
def main(_):
    gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
    params = train_utils.parse_configuration(FLAGS)
    model_dir = FLAGS.model_dir
    if 'train' in FLAGS.mode:
        # Pure eval modes do not output yaml files. Otherwise continuous eval job
        # may race against the train job for writing the same file.
        train_utils.serialize_config(params, model_dir)

    if 'train_and_eval' in FLAGS.mode:
        assert (
            params.task.train_data.feature_shape ==
            params.task.validation_data.feature_shape), (
                f'train {params.task.train_data.feature_shape} != validate '
                f'{params.task.validation_data.feature_shape}')

    # Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
    # can have significant impact on model speeds by utilizing float16 in case of
    # GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
    # dtype is float16
    if params.runtime.mixed_precision_dtype:
        performance.set_mixed_precision_policy(
            params.runtime.mixed_precision_dtype)
    distribution_strategy = distribute_utils.get_distribution_strategy(
        distribution_strategy=params.runtime.distribution_strategy,
        all_reduce_alg=params.runtime.all_reduce_alg,
        num_gpus=params.runtime.num_gpus,
        tpu_address=params.runtime.tpu)
    with distribution_strategy.scope():
        task = task_factory.get_task(params.task, logging_dir=model_dir)

    train_lib.run_experiment(distribution_strategy=distribution_strategy,
                             task=task,
                             mode=FLAGS.mode,
                             params=params,
                             model_dir=model_dir)

    train_utils.save_gin_config(FLAGS.mode, model_dir)
Example #15
0
File: play.py Project: soychanq/alf
def main(_):
    seed = common.set_random_seed(FLAGS.random_seed)
    gin_file = common.get_gin_file()
    gin.parse_config_files_and_bindings(gin_file, FLAGS.gin_param)
    algorithm_ctor = gin.query_parameter(
        'TrainerConfig.algorithm_ctor').scoped_configurable_fn
    env = create_environment(nonparallel=True, seed=seed)
    env.reset()
    common.set_global_env(env)
    config = policy_trainer.TrainerConfig(root_dir="")
    data_transformer = create_data_transformer(config.data_transformer_ctor,
                                               env.observation_spec())
    config.data_transformer = data_transformer
    observation_spec = data_transformer.transformed_observation_spec
    common.set_transformed_observation_spec(observation_spec)
    algorithm = algorithm_ctor(
        observation_spec=observation_spec,
        action_spec=env.action_spec(),
        config=config)
    try:
        policy_trainer.play(
            FLAGS.root_dir,
            env,
            algorithm,
            checkpoint_step=FLAGS.checkpoint_step or "latest",
            epsilon_greedy=FLAGS.epsilon_greedy,
            num_episodes=FLAGS.num_episodes,
            max_episode_length=FLAGS.max_episode_length,
            sleep_time_per_step=FLAGS.sleep_time_per_step,
            record_file=FLAGS.record_file,
            future_steps=FLAGS.future_steps,
            append_blank_frames=FLAGS.append_blank_frames,
            render=FLAGS.render,
            render_prediction=FLAGS.render_prediction,
            ignored_parameter_prefixes=FLAGS.ignored_parameter_prefixes.split(
                ",") if FLAGS.ignored_parameter_prefixes else [])
    finally:
        env.close()
Example #16
0
def main():
    # Parse command line arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', '--dataset_name', type=str, default='tmp')
    parser.add_argument('-r', '--is_ray_process', action='store_true')
    parser.add_argument(
        '-g',
        '--gin_config',
        nargs='+',
        default=['render_single'],
        help='Set of config files for gin (separated by spaces) '
        'e.g. --gin_config f1 f2 (exclude .gin from path)')
    parser.add_argument(
        '-p',
        '--gin_param',
        nargs='+',
        default=[],
        help='Parameter settings that override config defaults '
        'e.g. --gin_param \'module_1.a = 2\' \'module_2.b = 3\'')

    argv = sys.argv[sys.argv.index("--") + 1:]
    flags = parser.parse_args(argv)

    if flags.is_ray_process:
        print("Starting ray process:")
        print(', '.join(flags.gin_param))
        sys.stdout.flush()
        util.suppress_output()

    # Parse config file
    gin_files = [f'{paths.CONFIG_DIR}/{g}.gin' for g in flags.gin_config]
    gin.parse_config_files_and_bindings(gin_files, flags.gin_param)
    with gin.unlock_config():
        gin.bind_parameter('sample.out_dir', flags.dataset_name)

    # Start rendering
    initialize_color_ref()
    render_batch()
Example #17
0
def main(unused_argv):
  if FLAGS.no_tf_function:
    tf.config.experimental_run_functions_eagerly(True)
    print('TFFUNCTION DISABLED')

  gin.parse_config_files_and_bindings(FLAGS.config_file, FLAGS.gin_bindings)

  # Make directories if they do not exist yet.
  if FLAGS.checkpoint_dir and not tf.io.gfile.exists(FLAGS.checkpoint_dir):
    logging.info('Making new checkpoint directory %s', FLAGS.checkpoint_dir)
    tf.io.gfile.makedirs(FLAGS.checkpoint_dir)
  if FLAGS.plot_dir and not tf.io.gfile.exists(FLAGS.plot_dir):
    logging.info('Making new plot directory %s', FLAGS.plot_dir)
    tf.io.gfile.makedirs(FLAGS.plot_dir)

  if FLAGS.no_tf_function:
    tf.config.experimental_run_functions_eagerly(True)
    logging.info('TFFUNCTION DISABLED')

  if FLAGS.eval_on:
    evaluate()
  else:
    raise ValueError('evaluation needs --eval_on <dataset>.')
def main(_):
  logging.set_verbosity(logging.INFO)
  if FLAGS.debug:
    logging.set_verbosity(logging.DEBUG)
  if os.environ.get('CONFIG_DIR'):
    gin.add_config_file_search_path(os.environ.get('CONFIG_DIR'))
  config = wandb.config
  if not wandb.run.resumed:  # do not make changes
    root_path = []
    if os.environ.get('EXP_DIR'):
      root_path.append(os.environ.get('EXP_DIR'))
    root_path.append(config.root_dir)
    root_path.append(str(os.environ.get('WANDB_RUN_ID', 0)))
    config.update(dict(root_dir=osp.join(*root_path)), allow_val_change=True)
  else:
    config.update(dict(num_steps=FLAGS.num_steps), allow_val_change=True)
  gin_files = config.gin_files
  gin_bindings = gin_bindings_from_config(config)
  gin.parse_config_files_and_bindings(gin_files, gin_bindings)
  # tf.config.threading.set_inter_op_parallelism_threads(12)
  trainer.train_eval(config.root_dir, batch_size=config.batch_size, seed=FLAGS.seed,
                     train_metrics_callback=wandb.log, eager_debug=FLAGS.eager_debug,
                     monitor=FLAGS.monitor)
Example #19
0
def main(_):
  logging.set_verbosity(logging.INFO)
  tf.enable_v2_behavior()

  gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_bindings)

  # Wait for the collect policy to become available, then load it.
  collect_policy_dir = os.path.join(FLAGS.root_dir,
                                    learner.POLICY_SAVED_MODEL_DIR,
                                    learner.COLLECT_POLICY_SAVED_MODEL_DIR)
  collect_policy = train_utils.wait_for_policy(
      collect_policy_dir, load_specs_from_pbtxt=True)

  # Prepare summary directory.
  summary_dir = os.path.join(FLAGS.root_dir, learner.TRAIN_DIR, str(FLAGS.task))

  # Perform collection.
  collect(
      summary_dir=summary_dir,
      environment_name=gin.REQUIRED,
      collect_policy=collect_policy,
      replay_buffer_server_address=FLAGS.replay_buffer_server_address,
      variable_container_server_address=FLAGS.variable_container_server_address)
def main(_):
  logging.set_verbosity(logging.INFO)
  tf.compat.v1.enable_resource_variables()
  tf.compat.v2.enable_v2_behavior()

  gin_file = FLAGS.gin_file
  if FLAGS.add_root_dir_gin_file:
      gin_file.append(os.path.join(FLAGS.root_dir, "train/operative_config-0.gin"))

  gin.parse_config_files_and_bindings(gin_file, FLAGS.gin_bindings,
                                      skip_unknown=True)

  root_dir = FLAGS.root_dir

  score_acc = evaluator.WindowedScoreAccumulator()

  with gin.unlock_config():
    gin.bind_parameter('%ROOT_DIR', root_dir)

  agent_evaluator = evaluator.Evaluator(
      root_dir,
      eval_metrics_callback=None)
  agent_evaluator.watch_until()
Example #21
0
def main(argv):

    # generate folder structures
    run_paths = utils_params.gen_run_folder()
    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings([
        r'D:\Uni Stuttgart\Deep learning lab\Diabetic Retinopathy Detection\dl-lab-2020-team08\diabetic_retinopathy\configs\config.gin'
    ], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    train_ds, valid_ds, test_ds = datasets.load()

    # training including fine tuning
    if FLAGS.train:
        # model
        if FLAGS.train:
            model = DenseNet121(IMG_SIZE=256)
            model.summary()

            # training and fine tuning
            trainer = Trainer(model=model,
                              ds_train=train_ds,
                              ds_val=valid_ds,
                              run_paths=run_paths)
            for _ in trainer.train():
                continue

    else:
        # evaluation
        # model dir should be replaced by saved model dir
        model_dir = r"\diabetic_retinopathy\logs\20201221-225335\saved_model_ft"
        model = tf.keras.models.load_model(model_dir)
        evaluate(model, valid_ds)
Example #22
0
def evaluate_enc_on_targets(enc, dset, s_dim, original_file, original_bindings,
                            target_metrics):
    # Disentanglement Lib Metrics
    evals = {}
    eval_bindings_list = get_eval_bindings_list()
    metrics = ("factor", "mig", "beta", "dci", "modularity", "sap")

    for metric, eval_bindings in zip(metrics, eval_bindings_list):
        if metric in target_metrics:
            gin.parse_config_files_and_bindings([],
                                                eval_bindings,
                                                finalize_config=False)
            evaluation_fn = get_evaluation()
            tf.logging.info("Reset eval func to {}".format(
                evaluation_fn.__name__))
            result = evaluation_fn(dset, enc, np.random.RandomState(0))
            ut.log(result)

            if metric == "factor":
                evals[metric] = result["eval_accuracy"]
            elif metric == "mig":
                evals[metric] = result["discrete_mig"]
            elif metric == "beta":
                evals[metric] = result["eval_accuracy"]
            elif metric == "dci":
                evals[metric] = result["disentanglement"]
            elif metric == "modularity":
                evals[metric] = result["modularity_score"]
            elif metric == "sap":
                evals[metric] = result["SAP_score"]

    # Clean up: resetting gin configs to original bindings
    gin.parse_config_files_and_bindings([original_file],
                                        original_bindings,
                                        finalize_config=False)

    return evals
Example #23
0
def evaluate_metrics_on_permuted_runs():
    """Evaluates metrics on permuted runs, for across-run metrics only."""
    gin_bindings = [
        ('eval_metrics.Evaluator.metrics = '
         '[@IqrAcrossRuns/singleton(), @LowerCVaROnAcross/singleton()]')
    ]
    n_permutations_per_worker = int(p.n_random_samples / p.n_worker)

    # Parse gin config.
    gin.parse_config_files_and_bindings([p.gin_file], gin_bindings)

    for algo1 in p.algos:
        for algo2 in p.algos:
            for task in p.tasks:
                for i_worker in range(p.n_worker):
                    # Get the subdirectories corresponding to each run.
                    summary_path_1 = os.path.join(p.data_dir, algo1, task)
                    summary_path_2 = os.path.join(p.data_dir, algo2, task)
                    run_dirs_1 = eval_metrics.get_run_dirs(
                        summary_path_1, 'train', p.runs)
                    run_dirs_2 = eval_metrics.get_run_dirs(
                        summary_path_2, 'train', p.runs)

                    # Evaluate the metrics.
                    outfile_prefix = os.path.join(p.metric_values_dir_permuted,
                                                  '%s_%s' %
                                                  (algo1, algo2), task) + '/'
                    evaluator = eval_metrics.Evaluator(metrics=gin.REQUIRED)
                    evaluator.write_metric_params(outfile_prefix)
                    evaluator.evaluate_with_permutations(
                        run_dirs_1=run_dirs_1,
                        run_dirs_2=run_dirs_2,
                        outfile_prefix=outfile_prefix,
                        n_permutations=n_permutations_per_worker,
                        permutation_start_idx=(n_permutations_per_worker *
                                               i_worker),
                        random_seed=i_worker)
Example #24
0
def main(argv):
    gin.parse_config_files_and_bindings(FLAGS.gin_file,
                                        FLAGS.gin_param,
                                        skip_unknown=True)
    op_config_str = gin.config._CONFIG

    use_neptune = "NEPTUNE_API_TOKEN" in os.environ
    if use_neptune:

        params = utils.get_gin_params_as_dict(gin.config._CONFIG)
        neptune.init(project_qualified_name="csadrian/global-autoencoders")

        exp = neptune.create_experiment(params=params, name="exp")
        #ONLY WORKS FOR ONE GIN-CONFIG FILE
        with open(FLAGS.gin_file[0]) as ginf:
            param = ginf.readline()
            while param:
                param = param.replace('.', '-').replace('=', '-').replace(
                    ' ', '').replace('\'', '').replace('\n',
                                                       '').replace('@', '')
                #neptune.append_tag(param)
                param = ginf.readline()
        #for tag in opts['tags'].split(','):
        #  neptune.append_tag(tag)
    else:
        neptune.init('shared/onboarding',
                     api_token='ANONYMOUS',
                     backend=neptune.OfflineBackend())

    er = ExperimentRunner(prefix=exp.id)
    er.train()

    params = utils.get_gin_params_as_dict(gin.config._OPERATIVE_CONFIG)
    for k, v in params.items():
        neptune.set_property(k, v)
    neptune.stop()
    print('fin')
def main(_):
  logging.set_verbosity(logging.INFO)
  gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_bindings)
  # Setup data file path.
  data_dir = os.path.join(
      FLAGS.data_root_dir,
      FLAGS.env_name,
      FLAGS.data_name,
      FLAGS.data_sub_dir,
      )
  data_file = os.path.join(
      data_dir, FLAGS.data_file_name)

  # Setup log dir.
  if FLAGS.sub_dir == 'auto':
    sub_dir = utils.get_datetime()
  else:
    sub_dir = FLAGS.sub_dir
  log_dir = os.path.join(
      FLAGS.root_dir,
      FLAGS.env_name,
      FLAGS.data_name,
      'n'+str(FLAGS.n_train),
      FLAGS.agent_name,
      sub_dir,
      str(FLAGS.seed),
      )
  utils.maybe_makedirs(log_dir)
  train_eval_offline.train_eval_offline(
      log_dir=log_dir,
      data_file=data_file,
      agent_module=agents.AGENT_MODULES_DICT[FLAGS.agent_name],
      env_name=FLAGS.env_name,
      n_train=FLAGS.n_train,
      total_train_steps=FLAGS.total_train_steps,
      n_eval_episodes=FLAGS.n_eval_episodes,
      )
Example #26
0
def main(_):
    gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
    print(FLAGS.experiment)
    params = train_utils.parse_configuration(FLAGS)

    model_dir = FLAGS.model_dir
    if 'train' in FLAGS.mode:
        # Pure eval modes do not output yaml files. Otherwise continuous eval job
        # may race against the train job for writing the same file.
        train_utils.serialize_config(params, model_dir)

    # Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
    # can have significant impact on model speeds by utilizing float16 in case of
    # GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
    # dtype is float16
    if params.runtime.mixed_precision_dtype:
        performance.set_mixed_precision_policy(
            params.runtime.mixed_precision_dtype, params.runtime.loss_scale)
    if params.runtime.worker_hosts != '' and params.runtime.worker_hosts is not None:
        num_workers = distribute_utils.configure_cluster(
            worker_hosts=params.runtime.worker_hosts,
            task_index=params.runtime.task_index)
        print(num_workers)
    distribution_strategy = distribute_utils.get_distribution_strategy(
        distribution_strategy=params.runtime.distribution_strategy,
        all_reduce_alg=params.runtime.all_reduce_alg,
        num_gpus=params.runtime.num_gpus,
        tpu_address=params.runtime.tpu)

    with distribution_strategy.scope():
        task = task_factory.get_task(params.task, logging_dir=model_dir)

    train_lib.run_experiment(distribution_strategy=distribution_strategy,
                             task=task,
                             mode=FLAGS.mode,
                             params=params,
                             model_dir=model_dir)
Example #27
0
def main(argv):
    del argv
    logging.set_verbosity(logging.INFO)
    if FLAGS.seed is not None:
        set_random_seed(FLAGS.seed)
        logging.info('Random seed %d', FLAGS.seed)
        trial_suffix = f'{FLAGS.trial_id}/seed_{FLAGS.seed}'
    else:
        trial_suffix = str(FLAGS.trial_id)

    expanded_root_dir = os.path.join(FLAGS.root_dir, FLAGS.env_name,
                                     trial_suffix)
    if FLAGS.load_pretrained and (FLAGS.pretrained_model_dir is not None):
        pretrained_model_dir = os.path.join(FLAGS.pretrained_model_dir,
                                            FLAGS.env_name, trial_suffix)
    else:
        pretrained_model_dir = None
    if FLAGS.debugging:
        tf.debugging.set_log_device_placement(True)
        tf.config.experimental_run_functions_eagerly(True)

    gin.parse_config_files_and_bindings(FLAGS.gin_files, FLAGS.gin_bindings)

    pse_drq_train_eval.train_eval(
        expanded_root_dir,
        FLAGS.env_name,
        num_train_steps=FLAGS.num_train_steps,
        policy_save_interval=FLAGS.policy_save_interval,
        checkpoint_interval=FLAGS.checkpoint_interval,
        load_pretrained=FLAGS.load_pretrained,
        pretrained_model_dir=pretrained_model_dir,
        contrastive_loss_weight=FLAGS.contrastive_loss_weight,
        contrastive_loss_temperature=FLAGS.contrastive_loss_temperature,
        image_encoder_representation=FLAGS.image_encoder_representation,
        reverb_port=FLAGS.reverb_port,
        eval_interval=FLAGS.eval_interval)
def main(unused_argv):
  if not FLAGS.plot_dir:
    raise ValueError('apply_smurf needs plot directory.')
  if not tf.io.gfile.exists(FLAGS.plot_dir):
    print('Making new plot directory', FLAGS.plot_dir)
    tf.io.gfile.makedirs(FLAGS.plot_dir)
  gin.parse_config_files_and_bindings(FLAGS.config_file, FLAGS.gin_bindings)
  smurf = smurf_evaluator.build_network(batch_size=1)
  smurf.update_checkpoint_dir(FLAGS.checkpoint_dir)
  smurf.restore()
  for i, (image1, image2) in enumerate(get_image_iterator()):
    sys.stdout.write(':')
    sys.stdout.flush()
    flow_forward, occlusion, flow_backward = smurf.infer(
        image1, image2, input_height=FLAGS.height, input_width=FLAGS.width,
        infer_occlusion=True, infer_bw=True)
    occlusion = 1. - occlusion
    smurf_plotting.complete_paper_plot(plot_dir=FLAGS.plot_dir, index=i,
                                       image1=image1, image2=image2,
                                       flow_uv=flow_forward,
                                       ground_truth_flow_uv=None,
                                       flow_valid_occ=None,
                                       predicted_occlusion=occlusion,
                                       ground_truth_occlusion=None)
Example #29
0
def parse_config(config_dir=DEFAULT,
                 configs=DEFAULT,
                 bindings=DEFAULT,
                 finalize_config=True):
    """Parse config from flags."""
    import gin
    FLAGS = flags.FLAGS
    if config_dir == DEFAULT:
        config_dir = get_config_dir()
    if configs == DEFAULT:
        configs = getattr(FLAGS, 'configs', [])
    elif isinstance(configs, six.string_types):
        configs = [configs]
    configs = np.concatenate([c.split(',') for c in configs])
    configs = [c if c.endswith('.gin') else '{}.gin'.format(c) for c in configs]
    if bindings == DEFAULT:
        bindings = getattr(FLAGS, 'bindings', [])
    elif isinstance(bindings, six.string_types):
        bindings = [bindings]

    # log
    log_strs = ['Parsing config', 'config_dir: {}'.format(config_dir)]
    if configs:
        log_strs.append('Files:')
        log_strs.extend(('  ' + c for c in configs))
    if bindings:
        log_strs.append('Bindings:')
        log_strs.extend(('  ' + b for b in bindings))
    logging.info('\n'.join(log_strs))

    context = nullcontext() if config_dir is None else change_dir_context(
        config_dir)
    with context:
        gin.parse_config_files_and_bindings(configs,
                                            bindings,
                                            finalize_config=finalize_config)
Example #30
0
def load_config(args) -> Tuple[ExperimentConfig, Dict[str, Tuple[str, str]]]:
    path = os.path.abspath(os.path.normpath(args.experiment_base))
    sys.path.insert(0, os.path.dirname(path))
    importlib.invalidate_caches()
    module_path = ".{}".format(args.experiment)

    importlib.import_module(os.path.basename(path))
    module = importlib.import_module(module_path,
                                     package=os.path.basename(path))

    experiments = [
        m[1] for m in inspect.getmembers(module, inspect.isclass)
        if m[1].__module__ == module.__name__
        and issubclass(m[1], ExperimentConfig)
    ]
    assert (
        len(experiments) == 1
    ), "Too many or two few experiments defined in {}".format(module_path)

    gin.parse_config_files_and_bindings(None, args.gp)

    config = experiments[0]()
    sources = _config_source(args)
    return config, sources
def main(_):
    gin.parse_config_files_and_bindings(FLAGS.gin_config, FLAGS.gin_params)
    print('********')
    print(FLAGS.mode)
    print(FLAGS.gin_config)
    print(FLAGS.gin_params)

    env = active_vision_dataset_env.ActiveVisionDatasetEnv(modality_types=[
        task_env.ModalityTypes.IMAGE, task_env.ModalityTypes.
        SEMANTIC_SEGMENTATION, task_env.ModalityTypes.OBJECT_DETECTION,
        task_env.ModalityTypes.DEPTH, task_env.ModalityTypes.DISTANCE
    ])

    if FLAGS.mode == BENCHMARK_MODE:
        benchmark(env, env.possible_targets)
    elif FLAGS.mode == GRAPH_MODE:
        for loc in env.worlds:
            env.check_scene_graph(loc, 'fridge')
    elif FLAGS.mode == HUMAN_MODE:
        human(env, env.possible_targets)
    elif FLAGS.mode == VIS_MODE:
        visualize_random_step_sequence(env)
    elif FLAGS.mode == EVAL_MODE:
        evaluate_folder(env, FLAGS.eval_folder)
Example #32
0
    def test_compress_image(self):
        if not common.has_eager_been_enabled():
            self.skipTest("Image compression only supported in TF2.x")

        gin.parse_config_files_and_bindings([], """
    _get_feature_encoder.compress_image=True
    _get_feature_parser.compress_image=True
    """)
        spec = {
            "image": array_spec.ArraySpec((128, 128, 3), np.uint8),
            "mask": array_spec.ArraySpec((128, 128, 1), np.uint8)
        }
        serializer = example_encoding.get_example_serializer(spec)
        decoder = example_encoding.get_example_decoder(spec)

        sample = {
            "image": 128 * np.ones([128, 128, 3], dtype=np.uint8),
            "mask": 128 * np.ones([128, 128, 1], dtype=np.uint8)
        }
        example_proto = serializer(sample)

        recovered = self.evaluate(decoder(example_proto))
        tf.nest.map_structure(np.testing.assert_almost_equal, sample,
                              recovered)
def main(_):
  gin.parse_config_files_and_bindings(FLAGS.gin_config, FLAGS.gin_params)
  if FLAGS.mode == 'train':
    train()
  else:
    test()