def testVAE(self):
     gin.clear_config()
     gin.bind_parameter("dense_encoder.hidden_layer_sizes", [2, 2])
     gin.bind_parameter("ais.num_steps", 1)
     dataset = utils.FakeMNISTDataset()
     model_fn = lambda: vae.VAE(
         z_dims=2, bijector_type="shift_scale", dataset=dataset)
     with tf.Graph().as_default():
         self.evaluate(tf.global_variables_initializer())
         vae.Train(model_fn(),
                   dataset=dataset,
                   train_dir=self.temp_dir,
                   master=None,
                   epochs=1)
     with tf.Graph().as_default():
         vae.Eval(model_fn(),
                  dataset=dataset,
                  train_dir=self.temp_dir,
                  eval_dir=self.temp_dir,
                  master=None,
                  max_number_of_evaluations=1)
     with tf.Graph().as_default():
         writer = tf.summary.FileWriter(self.temp_dir)
         vae.AISEvalShard(shard=0,
                          master=None,
                          num_workers=1,
                          num_chains=1,
                          dataset=dataset,
                          use_polyak_averaging=False,
                          writer=writer,
                          train_dir=self.temp_dir,
                          model_fn=model_fn,
                          batch=250)
Beispiel #2
0
 def setUp(self):
   super(EvalGanLibTest, self).setUp()
   gin.clear_config()
   FLAGS.data_fake_dataset = True
   self.mock_get_graph = mock.patch.object(
       eval_utils, "get_inception_graph_def").start()
   self.mock_get_graph.return_value = create_fake_inception_graph()
 def setUp(self):
   super().setUp()
   gin.clear_config()
   gin.parse_config(GIN_CONFIG)
   self.addCleanup(mock.patch.stopall)
   self.mock_load = mock.patch.object(
       loaders.TFDSLoader, 'load', autospec=True).start()
Beispiel #4
0
  def test_autoregressive_sample_reformer2_lsh_attn_quality(self):
    gin.add_config_file_search_path(_CONFIG_DIR)
    max_len = 32  # 32 is the max length we trained the checkpoint for.
    test_lengths = [8, 16, 32]
    vocab_size = 13
    # The checkpoint is correct on ~90% sequences, set random seed to deflake.
    np.random.seed(0)
    for test_len in test_lengths:
      gin.clear_config()
      gin.parse_config_file('reformer2_copy.gin')
      gin.bind_parameter('LSHSelfAttention.predict_mem_len', 2 * max_len)
      gin.bind_parameter('LSHSelfAttention.predict_drop_len', 2 * max_len)

      pred_model = models.Reformer2(mode='predict')

      shape11 = shapes.ShapeDtype((1, 1), dtype=np.int32)
      shape1l = shapes.ShapeDtype((1, max_len), dtype=np.int32)

      model_path = os.path.join(_TESTDATA, 'reformer2_copy_lsh_attn.pkl.gz')
      pred_model.init_from_file(model_path, weights_only=True,
                                input_signature=(shape1l, shape11))
      initial_state = pred_model.state

      for _ in range(2):  # Set low to make the test run reasonably fast.
        # Pick a length in [1, test_len] at random.
        inp_len = np.random.randint(low=1, high=test_len + 1)
        inputs = np.random.randint(low=1, high=vocab_size-1, size=(1, inp_len))
        inputs = np.pad(inputs, [(0, 0), (0, max_len - inp_len)],
                        mode='constant', constant_values=0)
        s = decoding.autoregressive_sample(
            pred_model, inputs=inputs, eos_id=-1, max_length=inp_len,
            temperature=0.0)
        np.testing.assert_equal(s[0], inputs[0, :inp_len])
        pred_model.state = initial_state
    gin.clear_config()  # Make sure to not affect other tests.
Beispiel #5
0
def parse_gin_config(
    gin_config: str = None,
    gin_config_dir: Union[Path, str] = '$CONFIG',
) -> None:
    """ Parse a gin config file by path.

    Args:
        gin_config (str, optional): Name of gin config.
        gin_config_dir (Union[Path, str], optional): Directory with gin configs.

    Raises:
        zpy.requests.InvalidRequest: Cannot find gin config at path.
    """
    if gin_config is None:
        log.info(f'No gin file to parse.')
    else:
        if not gin_config.endswith('.gin'):
            gin_config = gin_config + '.gin'
        gin_config_filename = Path(gin_config)
        gin_config_dir = zpy.files.verify_path(gin_config_dir, check_dir=True)
        gin_config_path = gin_config_dir / gin_config_filename
        log.info(f'Parsing gin config at {gin_config_path}')
        if not gin_config_path.exists():
            raise zpy.requests.InvalidRequest(
                f'Could not find gin config at {gin_config_path}')
        gin.clear_config()
        gin.parse_config_file(str(gin_config_path))
Beispiel #6
0
 def setUp(self):
     super(TestCase, self).setUp()
     tf.compat.v1.enable_resource_variables()
     # Guard against tests calling gin.parse_config() without calling
     # gin.clear_config(), which can cause nasty bugs that show up in a
     # completely different test. See b/139088071 for example.
     gin.clear_config()
Beispiel #7
0
    def test_dry_run(self, config):
        """Dry-runs all gin configs."""
        gin.clear_config(clear_constants=True)
        gin.parse_config_file(config)

        def run_config():
            try:
                rl_trainer.train_rl(
                    output_dir=self.create_tempdir().full_path,
                    # Don't run any actual training, just initialize all classes.
                    n_epochs=0,
                    train_batch_size=1,
                    eval_batch_size=1,
                )
            except Exception as e:
                raise AssertionError('Error in gin config {}.'.format(
                    os.path.basename(config))) from e

        # Some tests, ex: DM suite can't be run in OSS - so skip them.
        should_skip = False
        try:
            should_skip = should_skip or gin.query_parameter('RLTask.dm_suite')
        except ValueError as e:
            pass
        try:
            env_name = gin.query_parameter('RLTask.env')
            should_skip = (should_skip or env_name.startswith('DM-')
                           or env_name.startswith('LunarLander'))
        except ValueError as e:
            pass

        if should_skip:
            pass
        else:
            run_config()
def test_abstract_learner_create():
    f = os.path.join(os.path.dirname(abstract_learner.__file__), '..',
                     'configs', 'base_learner.gin')
    load_config_files([f])
    l = EmptyLearner(config.Config())
    l.train()
    gin.clear_config()
Beispiel #9
0
  def test_build_layer(self, kwarg_modules):
    """Tests if layer builds properly and produces outputs of correct shape."""
    gin_config = (self.gin_config_kwarg_modules if kwarg_modules else
                  self.gin_config_dag_modules)
    with gin.unlock_config():
      gin.clear_config()
      gin.parse_config(gin_config)

    dag_layer = ConfigurableDAGLayer()
    outputs = dag_layer(self.inputs)
    self.assertIsInstance(outputs, dict)

    z = outputs['bottleneck']['z_bottleneck']
    x_rec = outputs['decoder']['reconstruction']
    x_rec2 = outputs['out']['reconstruction']

    # Confirm that layer generates correctly sized tensors.
    self.assertEqual(outputs['test_data'].shape, self.x.shape)
    self.assertEqual(outputs['inputs']['test_data'].shape, self.x.shape)
    self.assertEqual(x_rec.shape, self.x.shape)
    self.assertEqual(z.shape[-1], self.z_dims)
    self.assertAllClose(x_rec, x_rec2)

    # Confirm that variables are inherited by DAGLayer.
    self.assertLen(dag_layer.trainable_variables, 6)  # 3 weights, 3 biases.
Beispiel #10
0
def load_sim_config(render=True):
  """Builds the environment for the quadruped robot.

  Args:
    render: Enable/disable rendering.
  """
  gin.clear_config(clear_constants=False)
  config_file = CONFIG_FILE
  gin.parse_config_file(config_file)

  # Sim bindings
  # Overwrite a few parameters.

  action_repeat = 4
  gin.bind_parameter("SimulationParameters.num_action_repeat", action_repeat)
  gin.bind_parameter("laikago_v2.Laikago.action_repeat", action_repeat)

  # Control latency is NOT modeled properly for inverse kinematics and
  # jacobians, as we are directly calling the pybullet API. We will try to fix
  # this by loading a separate pybullet instance, set the pose and joint
  # angles which has latency in them, and then run the jacobian/IK.
  gin.bind_parameter("laikago_v2.Laikago.motor_control_mode",
                     robot_config.MotorControlMode.HYBRID)
  # Bump up a bit the adduction/abduction motor d gain for a better tracking.
  gin.bind_parameter("hybrid_motor_model.HybridMotorModel.kd", _MOTOR_KD)
  gin.bind_parameter("SimulationParameters.enable_rendering", render)
  gin.bind_parameter("env_loader.load.wrapper_classes", [])
Beispiel #11
0
 def test_gin_files(self):
     for root, dirs, files in os.walk(BASE_DIR):
         for file in files:
             path = os.path.join(root, file)
             if file.endswith('.gin') and os.path.isfile(path):
                 load_gin_config(path)
                 gin.clear_config()
Beispiel #12
0
def main(_):
    logging.set_verbosity(logging.INFO)

    for env_type, model, training_id, lin_thresh in itertools.product(
            ENVS, MODELS, TRAINING_IDS, [True, False]):
        if lin_thresh and not "threshold" in env_type:
            continue  # Can't evaluate a non threshold agent on threshold utilities.
        if (not lin_thresh) and ("linear_threshold" in env_type
                                 or "linear_dual_threshold" in env_type):
            continue  # Can't sample out of distribution for a linear (dual) threshold agent.

        experiment_dir = os.path.join(FLAGS.root_dir,
                                      "-".join([model, env_type, training_id]))
        model_path = os.path.join(experiment_dir, 'model', 'dqn_model.h5')
        results_path = generate_results_path(Path(FLAGS.results_dir),
                                             env=env_type,
                                             model=model,
                                             training_id=training_id,
                                             lin_thresh=lin_thresh)

        if not check_lock(results_path=results_path):
            continue

        acquire_lock(results_path=results_path)

        print(f"\n\n{results_path}\n")

        # Loading appropriate gin configs for the environment and this experiment
        gin.clear_config()
        qnet_gin, env_gin = experiment_dir.split("/")[-1].split("-")[:2]
        gin_files = [
            Path("tunable-agents-MORL/configs/envs/fixed_env.gin"),
            Path("tunable-agents-MORL/configs/qnets/" + qnet_gin + ".gin")
        ]
        utility.load_gin_configs(gin_files, [])

        # Loading trained agent model
        env_kwargs = copy.copy(ENV_KWARGS[env_gin])
        if lin_thresh and (not "linear" in env_kwargs["utility_type"]):
            env_kwargs["utility_type"] = "linear_" + env_kwargs["utility_type"]
        elif (not lin_thresh) and ("linear" in env_kwargs["utility_type"]
                                   ) and ("threshold" in env_type):
            env_kwargs["utility_type"] = env_kwargs["utility_type"][7:]
        env = utility.create_environment(**env_kwargs)
        tf_agent = agent.DQNAgent(epsilon=0, obs_spec=env.observation_spec())
        tf_agent.load_model(model_path)

        # Selecting the utilities to run on
        utilities = utility_list(env_kwargs["utility_type"])

        # Evaluating the agent on the fixed environment
        results = fixed_env_eval(tf_agent, utilities, **env_kwargs)

        # Save results
        if not results_path.parent.exists():
            results_path.parent.mkdir(parents=True)
        np.save(results_path, results)

        release_lock(results_path=results_path)
Beispiel #13
0
def test_train_regions(tmp_path, data_dir, config_gin, dataspec_bias, regions):
    K.clear_session()
    gin.clear_config()
    bpnet_train(dataspec=str(dataspec_bias),
                output_dir=str(tmp_path),
                premade='bpnet9',
                config=str(config_gin),
                override=f'bpnet_data.intervals_file="{regions}"',
                num_workers=2)
Beispiel #14
0
def test_trained_model_bed6(tmp_path, data_dir, config_gin, dataspec_bed6):
    K.clear_session()
    gin.clear_config()
    bpnet_train(dataspec=str(dataspec_bed6),
                output_dir=str(tmp_path),
                premade='bpnet9',
                config=str(config_gin),
                override='seq_width=100;train.batch_size=8',
                num_workers=2)
Beispiel #15
0
def test_trained_model_vmtouch(tmp_path, data_dir, config_gin, dataspec_bias):
    K.clear_session()
    gin.clear_config()
    bpnet_train(dataspec=str(dataspec_bias),
                output_dir=str(tmp_path),
                premade='bpnet9',
                config=str(config_gin),
                vmtouch=True,
                num_workers=1)
Beispiel #16
0
def test_trained_model_premade_pyspec(tmp_path, data_dir, config_gin,
                                      dataspec_bias):
    K.clear_session()
    gin.clear_config()
    bpnet_train(dataspec=str(dataspec_bias),
                output_dir=str(tmp_path),
                premade='bpnet9-pyspec',
                config=str(config_gin),
                num_workers=2)
def test_load_file(f_conf_123):
    with tempfile.NamedTemporaryFile(mode='w+') as tmp_config_file:
        tmp_config = tmp_config_file.name
        tmp_config_file.write(f_conf_123)
        tmp_config_file.flush()

        gin.parse_config_file(tmp_config)
    assert f() == 123
    gin.clear_config()
Beispiel #18
0
    def wrapped_run_func(bind_uri: str) -> None:
        # This is the main entrypoint for request based communication
        log.info('Configuring zmq socket...')
        context = zmq.Context()
        socket = context.socket(zmq.REP)
        socket.bind(bind_uri)
        signal.signal(signal.SIGTERM, handle_signal)
        global abort, waiting, reply
        abort = False
        while not abort:
            log.info('Waiting for requests...')
            waiting = True
            request = json.loads(socket.recv_json())
            zpy.logging.linebreaker_log('new request')
            log.info(f'New request: {pformat(request)}')
            waiting = False

            # Reply will include duration of request
            start_time = time.time()
            try:
                # Request can set a log level
                log_level = request.get('log_level', None)
                if log_level is not None:
                    zpy.logging.set_log_levels(level=log_level)

                # Default reply will include a message and an error code
                reply = {
                    'request': request,
                    'code': 200,
                }

                # Reset any gin configs
                try:
                    gin.enter_interactive_mode()
                    gin.clear_config()
                except Exception as e:
                    log.warning(
                        f'Could not reset gin configs before request: {e}')

                # Call the function that was given
                run_func(request)

            except Exception as e:
                reply['exception'] = str(e)
                reply['code'] = 400

            # Duration of request is logged and sent in reply
            duration = time.time() - start_time
            reply['duration'] = duration

            # Send reply message back through the socket
            zpy.logging.linebreaker_log('reply')
            log.info(f'{pformat(reply)}')
            socket.send_json(json.dumps(reply))

        log.info('Exiting launcher.')
 def setUp(self):
     super(ModularGANTest, self).setUp()
     FLAGS.data_fake_dataset = True
     gin.clear_config()
     self.model_dir = os.path.join(FLAGS.test_tmpdir, "model_dir")
     if tf.gfile.Exists(self.model_dir):
         tf.gfile.DeleteRecursively(self.model_dir)
     self.run_config = tf.contrib.tpu.RunConfig(
         model_dir=self.model_dir,
         tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
Beispiel #20
0
 def setUp(self):
     super(S3GANTest, self).setUp()
     FLAGS.data_fake_dataset = True
     gin.clear_config()
     unused_sub_dir = str(datetime.datetime.now().microsecond)
     self.model_dir = os.path.join(FLAGS.test_tmpdir, unused_sub_dir)
     assert not tf.gfile.Exists(self.model_dir)
     self.run_config = tf.contrib.tpu.RunConfig(
         model_dir=self.model_dir,
         tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
def test_load_file_via_fcn(f_conf_123):
    with tempfile.NamedTemporaryFile(mode='w+',
                                     delete=False) as tmp_config_file:
        tmp_config = tmp_config_file.name
        tmp_config_file.write(f_conf_123)

    load_config_files([tmp_config])
    assert f() == 123
    os.unlink(tmp_config)
    gin.clear_config()
Beispiel #22
0
    def testTargetSpec(self, target_name):
        gin.clear_config()
        gin.bind_parameter("cloud.path", self.test_cloud)
        gin.bind_parameter("german.path", self.test_german)

        target, spec = neutra.GetTargetSpec(target_name,
                                            num_dims=5,
                                            regression_dataset="german",
                                            regression_type="gamma_scales2")
        lp = self.evaluate(target.log_prob(tf.ones([2, spec.num_dims])))
        self.assertAllEqual([2], lp.shape)
Beispiel #23
0
def test_modisco_run(tmp_path, contrib_score_grad, modisco_config_gin,
                     expected_modisco_files):
    gin.clear_config()
    bpnet_modisco_run(contrib_file=str(contrib_score_grad),
                      output_dir=tmp_path,
                      config=str(modisco_config_gin),
                      )
    output_files = os.listdir(tmp_path)
    for f in expected_modisco_files:
        assert f in output_files
    assert np.all(np.load(tmp_path / 'modisco-run.subset-contrib-file.npy') == 1)
Beispiel #24
0
  def testNeuTraExperiment(self):
    gin.clear_config()
    gin.bind_parameter("target_spec.name", "ill_conditioned_gaussian")
    gin.bind_parameter("chain_stats.compute_stats_over_time", True)
    exp = neutra.NeuTraExperiment(bijector="affine", log_dir=self.temp_dir)

    exp.Train(4, batch_size=2)
    exp.Eval(batch_size=2)
    exp.Benchmark(test_num_steps=100, test_batch_size=2, batch_size=2)
    exp.TuneObjective(
        1, 0.1, batch_size=2, test_num_steps=600, f_name="first_moment_mean")
Beispiel #25
0
    def test_dummyrun_featureless_learner(self):
        gin.bind_parameter(
            "FeaturelessMedianPreferenceAverageRegularizationAggregator.epochs",
            10)
        learner_obj = DatabasePreferenceLearnerFeatureless(load=False)
        learner_obj.fit()
        learner_obj.update_features()
        gin.clear_config()

        # now should have video ratings for the aggregated model
        self._check_output()
Beispiel #26
0
def trained_model(data_dir, dataspec_task1, config_gin):
    K.clear_session()
    gin.clear_config()
    bpnet_train(dataspec=dataspec_task1,
                output_dir=data_dir,
                run_id='trained_model',
                premade='bpnet9',
                config=str(config_gin),
                num_workers=1,
                overwrite=True
                )
    return data_dir / 'trained_model'
 def setUp(self):
     super(CompareGanTestCase, self).setUp()
     # Use fake datasets instead of reading real files.
     FLAGS.data_fake_dataset = True
     # Clear the gin cofiguration.
     gin.clear_config()
     # Mock the inception graph.
     fake_inception_graph = create_fake_inception_graph()
     self.inception_graph_def_mock = mock.patch.object(
         eval_utils,
         "get_inception_graph_def",
         return_value=fake_inception_graph).start()
Beispiel #28
0
def trained_model_w_bias(config_gin, data_dir, dataspec_bias):
    K.clear_session()
    gin.clear_config()
    bpnet_train(dataspec=str(dataspec_bias),
                output_dir=str(data_dir),
                run_id='trained_model_w_bias',
                premade='bpnet9',
                config=str(config_gin),
                num_workers=1,
                overwrite=True,
                )
    return data_dir / 'trained_model_w_bias'
def test_learn_vectorincrement():
    ve_config_path = os.path.join(os.path.dirname(vectorincrement.__file__),
                                  'config', 've5.gin')
    learner_config_path = os.path.join(os.path.dirname(learner.__file__), '..',
                                       'configs', 'test.gin')
    print(ve_config_path, learner_config_path)

    load_config_files([ve_config_path, learner_config_path])

    l = learner.CausalModelLearnerRL(config.Config())
    l.train()
    gin.clear_config()
  def setUp(self):
    super().setUp()
    gin.clear_config()
    gin.parse_config(GIN_CONFIG_MULTI)
    self.addCleanup(mock.patch.stopall)
    self.mock_load1 = mock.patch.object(
        loaders.TFDSLoader, 'load', autospec=True).start()
    self.mock_load2 = mock.patch.object(
        loaders.CSVLoader, 'load', autospec=True).start()

    self.batch = (32, 16)
    self.lengths = (1024, 512)  # from GIN_CONFIG_MULTI.