def main(unused_argv):
    del unused_argv  # Unused

    tokenizer = get_tokenizer()

    input_fn = input_func_builder.get_input_fn(
        doc_dir=FLAGS.doc_dir,
        semi_dir=FLAGS.semi_dir,
        sent_dir=FLAGS.sent_dir,
        split=FLAGS.split,
        uncased=FLAGS.uncased,
        seq_len=FLAGS.seq_len,
        bsz_per_host=FLAGS.bsz_per_host,
        num_hosts=1,
        num_core_per_host=FLAGS.num_core_per_host,
    )

    bsz_per_core = FLAGS.bsz_per_host // FLAGS.num_core_per_host
    params = {"batch_size": bsz_per_core}

    dataset = input_fn(params)
    example = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
    for k, v in example.items():
        print(k, v.shape)

    with tf.Session() as sess:
        for _ in range(FLAGS.num_example):
            example_np = sess.run(example)
            print("=" * 160)
            for k, v in example_np.items():
                if v.ndim == 2:
                    for i in range(v.shape[0]):
                        if k in [
                                "gen_inp", "gen_tgt", "dec_inp", "dec_tgt",
                                "inputs", "dec_masked_tgt"
                        ]:
                            print(k, v[i].shape,
                                  tokenizer.convert_ids_to_text(v[i].tolist()))
                        else:
                            print(k, v[i].shape,
                                  " ".join([str(j) for j in v[i].tolist()]))
                elif v.ndim == 1:
                    if k in [
                            "gen_inp", "gen_tgt", "dec_inp", "dec_tgt",
                            "inputs", "dec_masked_tgt"
                    ]:
                        print(k, v.shape,
                              tokenizer.convert_ids_to_text(v.tolist()))
                    else:
                        print(k, v.shape,
                              " ".join([str(j) for j in v.tolist()]))
                elif v.ndim > 3:
                    for i in range(v.shape[0]):
                        print(k, v.shape, v[i])
예제 #2
0
def Read(record_file):
    keys_to_features = {
        'view1/image/encoded':
        tf.FixedLenFeature((), dtype=tf.string, default_value=''),
        'view1/image/format':
        tf.FixedLenFeature([], dtype=tf.string, default_value='png'),
        'view1/image/height':
        tf.FixedLenFeature([1], dtype=tf.int64, default_value=64),
        'view1/image/width':
        tf.FixedLenFeature([1], dtype=tf.int64, default_value=64),
        'view2/image/encoded':
        tf.FixedLenFeature((), dtype=tf.string, default_value=''),
        'view2/image/format':
        tf.FixedLenFeature([], dtype=tf.string, default_value='png'),
        'view2/image/height':
        tf.FixedLenFeature([1], dtype=tf.int64, default_value=64),
        'view2/image/width':
        tf.FixedLenFeature([1], dtype=tf.int64, default_value=64),
        'image/encoded':
        tf.FixedLenFeature([2], dtype=tf.string, default_value=['', '']),
        'same_object':
        tf.FixedLenFeature([1], dtype=tf.int64, default_value=-1),
        'relative_pos':
        tf.FixedLenFeature([3], dtype=tf.float32),
    }
    with tf.Graph().as_default():
        filename_queue = tf.train.string_input_producer([record_file],
                                                        capacity=10)
        reader = tf.TFRecordReader()
        _, serialized_example = reader.read(filename_queue)
        example = tf.parse_single_example(serialized_example, keys_to_features)
        #png1 = example['view1/image/encoded']
        #png2 = example['view2/image/encoded']
        png = example['image/encoded']
        coord = tf.train.Coordinator()
        print 'Reading images:'
        with tf.Session() as sess:
            queue_threads = tf.start_queue_runners(sess=sess, coord=coord)
            #image1, image2 = sess.run([png1, png2])
            image1, image2 = sess.run([png[0], png[1]])
            publish.image(encoded_image=image1, width=20)
            publish.image(encoded_image=image2, width=20)
        coord.request_stop()
        coord.join(queue_threads)
예제 #3
0
def main(unused_argv):
    del unused_argv  # Unused

    tf.logging.set_verbosity(tf.logging.INFO)

    #### Tokenizer
    tokenizer = get_tokenizer()

    #### Get corpus info
    n_token = tokenizer.get_vocab_size()
    tf.logging.info("n_token %d", n_token)

    # test data
    inputs_np = [
        3933, 7752, 15179, 893, 24249, 703, 19119, 4, 2919, 335, 8511, 1094,
        43, 1661, 669, 5481, 1106, 7029, 891, 891
    ]
    type_id_np = [0] * len(inputs_np)
    inputs_np = np.array(inputs_np)[None]
    type_id_np = np.array(type_id_np)[None]

    # tensorflow graph
    inputs = tf.placeholder(tf.int64, [1, None])
    type_id = tf.placeholder(tf.int64, [1, None])
    hiddens = model_func_builder.extract_hiddens(inputs,
                                                 type_id,
                                                 n_token,
                                                 is_training=False)

    # run session
    saver = tf.train.Saver()
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=False)) as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, FLAGS.init_checkpoint)

        feed_dict = {
            inputs: inputs_np,
            type_id: type_id_np,
        }

        hiddens_np = sess.run(hiddens, feed_dict=feed_dict)
        tf.logging.info(len(hiddens_np))
def main(unused_argv):
    del unused_argv  # Unused

    tokenizer = get_tokenizer()

    input_fn, _ = input_func_builder.get_input_fn(
        tfrecord_dir=FLAGS.record_dir,
        split=FLAGS.split,
        max_length=FLAGS.max_length,
        num_hosts=1,
        uncased=FLAGS.uncased,
        num_threads=FLAGS.num_threads,
    )

    bsz_per_core = FLAGS.bsz_per_host // FLAGS.num_core_per_host
    params = {"batch_size": bsz_per_core}

    dataset = input_fn(params)
    example = dataset.make_one_shot_iterator().get_next()

    with tf.Session() as sess:
        for _ in range(FLAGS.num_example):
            example_np = sess.run(example)
            print("=" * 160)
            for k, v in example_np.items():
                print(k, v.shape)
                if v.ndim == 2:
                    for i in range(v.shape[0]):
                        if k in ["source", "target", "inputs", "targets"]:
                            print(
                                tokenizer.convert_ids_to_tokens(v[i].tolist()))
                        else:
                            print(v[i].tolist())
                elif v.ndim == 1:
                    if k in ["source", "target", "inputs", "targets"]:
                        print(tokenizer.convert_ids_to_tokens(v.tolist()))
                    else:
                        print(v.tolist())
예제 #5
0
def main(_):
    train_mfcc_dir = os.path.join(FLAGS.input_data_dir, FLAGS.level, 'TRAIN',
                                  'mfcc')
    train_label_dir = os.path.join(FLAGS.input_data_dir, FLAGS.level, 'TRAIN',
                                   'label')
    test_mfcc_dir = os.path.join(FLAGS.input_data_dir, FLAGS.level, 'TEST',
                                 'mfcc')
    test_label_dir = os.path.join(FLAGS.input_data_dir, FLAGS.level, 'TEST',
                                  'label')

    savedir = os.path.join(FLAGS.exp_dir, FLAGS.level, 'save')
    resultdir = os.path.join(FLAGS.exp_dir, FLAGS.level, 'result')

    if FLAGS.is_training:
        batched_data, max_time_steps, total_n = load_batched_data(
            train_mfcc_dir, train_label_dir, FLAGS.batch_size, FLAGS.level)
    else:
        batched_data, max_time_steps, total_n = load_batched_data(
            test_mfcc_dir, test_label_dir, FLAGS.batch_size, FLAGS.level)

    hparams = {}
    hparams['level'] = FLAGS.level
    hparams['batch_size'] = FLAGS.batch_size
    hparams['partition_size'] = FLAGS.partition_size
    hparams['num_hidden'] = FLAGS.num_hidden
    hparams['feature_length'] = FLAGS.feature_length
    hparams['num_classes'] = FLAGS.num_classes
    hparams['num_proj'] = FLAGS.num_proj
    hparams['learning_rate'] = FLAGS.learning_rate
    hparams['keep_prob'] = FLAGS.keep_prob
    hparams['clip_gradient_norm'] = FLAGS.clip_gradient_norm
    hparams['use_peepholes'] = FLAGS.use_peepholes
    if FLAGS.activation == 'tanh':
        hparams['activation'] = tf.tanh
    elif FLAGS.activation == 'relu':
        hparams['activation'] = tf.nn.relu
    hparams['max_time_steps'] = max_time_steps
    with tf.Graph().as_default():
        model = DRNN(FLAGS.cell, hparams, FLAGS.is_training)
        train_writer = tf.summary.FileWriter(resultdir + '/train')
        test_writer = tf.summary.FileWriter(resultdir + '/test')
        with tf.Session(FLAGS.master) as sess:
            # restore from stored model
            if FLAGS.restore:
                ckpt = tf.train.get_checkpoint_state(savedir)
                if ckpt and ckpt.model_checkpoint_path:
                    model.saver.restore(sess, ckpt.model_checkpoint_path)
                    print('Model restored from:' + ckpt.model_checkpoint_path)
            else:
                print('Initializing')
                sess.run(model.initial_op)
            train_writer.add_graph(sess.graph)
            for epoch in range(FLAGS.num_epochs):
                ## training
                start = time.time()
                if FLAGS.is_training:
                    print('Epoch', epoch + 1, '...')
                batch_errors = np.zeros(len(batched_data))
                batched_random_idx = np.random.permutation(len(batched_data))

                for batch, batch_original_idx in enumerate(batched_random_idx):
                    batch_inputs, batch_target_sparse, batch_seq_length = batched_data[
                        batch_original_idx]
                    batch_tgt_idx, batch_tgt_vals, batch_tgt_shape = batch_target_sparse
                    feeddict = {
                        model.x: batch_inputs,
                        model.tgt_idx: batch_tgt_idx,
                        model.tgt_vals: batch_tgt_vals,
                        model.tgt_shape: batch_tgt_shape,
                        model.seq_length: batch_seq_length
                    }

                    if FLAGS.is_training and (
                        (epoch * len(batched_random_idx) + batch + 1) % 20 == 0
                            or (epoch == FLAGS.num_epochs - 1
                                and batch == len(batched_random_idx) - 1)):
                        checkpoint_path = os.path.join(savedir, 'model.ckpt')
                        model.saver.save(sess,
                                         checkpoint_path,
                                         global_step=model.global_step)
                        print('Model has been saved in {}'.format(savedir))

                    if FLAGS.level == 'cha':
                        if FLAGS.is_training:
                            _, l, pre, y, er, global_step = sess.run(
                                [
                                    model.train_op, model.loss,
                                    model.predictions, model.y,
                                    model.error_rate, model.global_step
                                ],
                                feed_dict=feeddict)
                            batch_errors[batch] = er
                            if global_step % 10 == 0:
                                log_scalar(train_writer, 'CER',
                                           er / FLAGS.batch_size, global_step)
                                print(
                                    '{} mode, global_step:{}, lr:{}, total:{}, '
                                    'batch:{}/{},epoch:{}/{},train loss={:.3f},mean train '
                                    'CER={:.3f}'.format(
                                        FLAGS.level, global_step,
                                        FLAGS.learning_rate,
                                        total_n, batch + 1,
                                        len(batched_random_idx), epoch + 1,
                                        FLAGS.num_epochs, l,
                                        er / FLAGS.batch_size))

                        elif not FLAGS.is_training:
                            l, pre, y, er, global_step = sess.run(
                                [
                                    model.loss, model.predictions, model.y,
                                    model.error_rate, model.global_step
                                ],
                                feed_dict=feeddict)
                            batch_errors[batch] = er
                            log_scalar(test_writer, 'CER',
                                       er / FLAGS.batch_size, global_step)
                            print(
                                '{} mode, global_step:{}, total:{}, batch:{}/{},test '
                                'loss={:.3f},mean test CER={:.3f}'.format(
                                    FLAGS.level, global_step, total_n,
                                    batch + 1, len(batched_random_idx), l,
                                    er / FLAGS.batch_size))

                    elif FLAGS.level == 'phn':
                        if FLAGS.is_training:
                            _, l, pre, y, global_step = sess.run(
                                [
                                    model.train_op, model.loss,
                                    model.predictions, model.y,
                                    model.global_step
                                ],
                                feed_dict=feeddict)
                            er = get_edit_distance([pre.values], [y.values],
                                                   True, FLAGS.level)
                            if global_step % 10 == 0:
                                log_scalar(train_writer, 'PER', er,
                                           global_step)
                                print(
                                    '{} mode, global_step:{}, lr:{}, total:{}, '
                                    'batch:{}/{},epoch:{}/{},train loss={:.3f},mean train '
                                    'PER={:.3f}'.format(
                                        FLAGS.level, global_step,
                                        FLAGS.learning_rate,
                                        total_n, batch + 1,
                                        len(batched_random_idx), epoch + 1,
                                        FLAGS.num_epochs, l, er))
                            batch_errors[batch] = er * len(batch_seq_length)
                        elif not FLAGS.is_training:
                            l, pre, y, global_step = sess.run(
                                [
                                    model.loss, model.predictions, model.y,
                                    model.global_step
                                ],
                                feed_dict=feeddict)
                            er = get_edit_distance([pre.values], [y.values],
                                                   True, FLAGS.level)
                            log_scalar(test_writer, 'PER', er, global_step)
                            print(
                                '{} mode, global_step:{}, total:{}, batch:{}/{},test '
                                'loss={:.3f},mean test PER={:.3f}'.format(
                                    FLAGS.level, global_step, total_n,
                                    batch + 1, len(batched_random_idx), l, er))
                            batch_errors[batch] = er * len(batch_seq_length)

                    # NOTE:
                    if er / FLAGS.batch_size == 1.0:
                        break

                    if batch % 100 == 0:
                        print('Truth:\n' +
                              output_to_sequence(y, level=FLAGS.level))
                        print('Output:\n' +
                              output_to_sequence(pre, level=FLAGS.level))

                end = time.time()
                delta_time = end - start
                print('Epoch ' + str(epoch + 1) + ' needs time:' +
                      str(delta_time) + ' s')

                if FLAGS.is_training:
                    if (epoch + 1) % 1 == 0:
                        checkpoint_path = os.path.join(savedir, 'model.ckpt')
                        model.saver.save(sess,
                                         checkpoint_path,
                                         global_step=model.global_step)
                        print('Model has been saved in {}'.format(savedir))
                    epoch_er = batch_errors.sum() / total_n
                    print('Epoch', epoch + 1, 'mean train error rate:',
                          epoch_er)

                if not FLAGS.is_training:
                    with tf.gfile.GFile(
                            os.path.join(resultdir,
                                         FLAGS.level + '_result.txt'),
                            'a') as result:
                        result.write(
                            output_to_sequence(y, level=FLAGS.level) + '\n')
                        result.write(
                            output_to_sequence(pre, level=FLAGS.level) + '\n')
                        result.write('\n')
                    epoch_er = batch_errors.sum() / total_n
                    print(' test error rate:', epoch_er)
def _reset_for_test():
  tf.reset_default_graph()
  yield tf.Session('')
예제 #7
0
def run_experiment(study_hparams=None, trial_handle=None, tuner=None):

    FLAGS = deepcopy(tf.app.flags.FLAGS)

    if FLAGS.use_vizier:
        for key, val in study_hparams.values().items():
            setattr(FLAGS, key, val)

    tf.reset_default_graph()
    np.random.seed(FLAGS.random_seed)
    tf.set_random_seed(FLAGS.random_seed)

    # Initialize env

    env_kwargs = {
        'goal_x': FLAGS.goal_x,
        'min_goal_x': FLAGS.min_goal_x,
        'max_goal_x': FLAGS.max_goal_x,
        'x_threshold': FLAGS.x_threshold,
        'max_reward_for_dist': FLAGS.max_reward_for_dist,
        'reward_per_time_step': FLAGS.reward_per_time_step,
        'fixed_initial_state': FLAGS.fixed_initial_state,
        'reweight_rewards': FLAGS.reweight_rewards
    }
    env = cartpole.make_env(env_kwargs)
    eval_env = cartpole.make_env(env_kwargs)

    if not FLAGS.fixed_env:
        env.env.randomize()

    if trial_handle:
        tensorboard_path = os.path.join(FLAGS.output_dir, trial_handle)
    else:
        tensorboard_path = FLAGS.output_dir
    tf.gfile.MakeDirs(tensorboard_path)

    kwargs = dict(observation_shape=[None] + list(env.observation_space.shape),
                  action_dim=1)
    default_hps = MetaQ.get_default_config().values()

    for key in flags_def:
        if key in default_hps:
            kwargs[key] = getattr(FLAGS, key)

    hps = tf.HParams(**kwargs)

    meta_q = MetaQ(hps, fully_connected_net(FLAGS.nn_arch, FLAGS.activation))
    meta_q.build_graph()

    init_op = tf.global_variables_initializer()

    logger = TensorBoardLogger(tensorboard_path)

    with tf.Session() as sess:
        sess.run(init_op)
        meta_q.init_session(sess)

        inner_loop_buffer = MultiTaskReplayBuffer(len(env.env.goal_positions),
                                                  200000, FLAGS.random_seed)
        outer_loop_buffer = MultiTaskReplayBuffer(len(env.env.goal_positions),
                                                  200000, FLAGS.random_seed)

        pre_update_rewards = []
        post_update_rewards = []
        post_update_greedy_rewards = []
        post_update_q_func = None
        for outer_step in range(FLAGS.outer_loop_steps):
            print('State is ', env.env.state)
            if outer_step % FLAGS.on_policy_steps == 0:
                if FLAGS.fixed_env:
                    goal_positions = [env.env.goal_x]
                else:
                    goal_positions = env.env.goal_positions
                # NOTE: Approximately ~30 to 60 states per trajectory
                inner_loop_buffer = collect_off_policy_data(
                    env, goal_positions, meta_q, post_update_q_func,
                    inner_loop_buffer, FLAGS.inner_loop_n_trajs,
                    FLAGS.inner_loop_data_collection,
                    FLAGS.inner_loop_greedy_epsilon,
                    FLAGS.inner_loop_bolzmann_temp)
                outer_loop_buffer = collect_off_policy_data(
                    env, goal_positions, meta_q, post_update_q_func,
                    outer_loop_buffer, FLAGS.outer_loop_n_trajs,
                    FLAGS.outer_loop_data_collection,
                    FLAGS.outer_loop_greedy_epsilon,
                    FLAGS.outer_loop_bolzmann_temp)

            post_update_greedy_rewards = []

            finetuned_policy = None
            for task_id in range(FLAGS.n_meta_tasks):
                # print('Task: {}'.format(task_id))

                if not FLAGS.fixed_env:
                    env.env.randomize()

                (inner_observations, inner_actions, inner_rewards,
                 inner_next_observations,
                 inner_dones) = inner_loop_buffer.sample(
                     env.env.task_id, FLAGS.inner_loop_n_states)
                # Evaluating true rewards
                post_update_q_func = meta_q.get_post_update_q_function(
                    inner_observations, inner_actions, inner_rewards,
                    inner_next_observations, inner_dones)

                policy = QPolicy(post_update_q_func, epsilon=0.0)

                if outer_step % FLAGS.report_steps == 0 or outer_step >= (
                        FLAGS.outer_loop_steps - 1):
                    _, _, greedy_rewards, _, _ = cartpole_utils.collect_data(
                        env,
                        n_trajs=FLAGS.outer_loop_greedy_eval_n_trajs,
                        policy=policy)
                    post_update_greedy_rewards.append(
                        np.sum(greedy_rewards) /
                        FLAGS.outer_loop_greedy_eval_n_trajs)

                finetuned_policy = policy

                (outer_observations, outer_actions, outer_rewards,
                 outer_next_observations,
                 outer_dones) = outer_loop_buffer.sample(
                     env.env.task_id, FLAGS.outer_loop_n_states)
                meta_q.accumulate_gradient(
                    inner_observations,
                    inner_actions,
                    inner_rewards,
                    inner_next_observations,
                    inner_dones,
                    outer_observations,
                    outer_actions,
                    outer_rewards,
                    outer_next_observations,
                    outer_dones,
                )

            pre_update_loss, post_update_loss = meta_q.run_train_step()

            if not FLAGS.outer_loop_online_target and outer_step % FLAGS.target_update_freq == 0:
                print("updating target network")
                meta_q.update_target_network()

            log_data = dict(
                pre_update_loss=pre_update_loss,
                post_update_loss=post_update_loss,
                goal_x=env.env.goal_x,
            )

            #TODO(hkannan): uncomment this later!!!
            if outer_step % FLAGS.report_steps == 0 or outer_step >= (
                    FLAGS.outer_loop_steps - 1):
                # reward_across_20_tasks = evaluate(
                #     policy, eval_env, meta_q,
                #     inner_loop_n_trajs=FLAGS.inner_loop_n_trajs,
                #     outer_loop_n_trajs=FLAGS.outer_loop_n_trajs, n=21,
                #     weight_rewards=FLAGS.weight_rewards)
                # log_data['reward_mean'] = np.mean(reward_across_20_tasks)
                # log_data['reward_variance'] = np.var(reward_across_20_tasks)
                log_data['post_update_greedy_reward'] = np.mean(
                    post_update_greedy_rewards)
                log_data['post_update_greedy_reward_variance'] = np.var(
                    post_update_greedy_rewards)

            print('Outer step: {}, '.format(outer_step), log_data)
            logger.log_dict(outer_step, log_data)
            # if outer_step % FLAGS.video_report_steps == 0 or outer_step >= (FLAGS.outer_loop_steps - 1):
            #   video_data = {
            #       'env_kwargs': env_kwargs,
            #       'inner_loop_data_collection': FLAGS.inner_loop_data_collection,
            #       'inner_loop_greedy_epsilon': FLAGS.inner_loop_greedy_epsilon,
            #       'inner_loop_bolzmann_temp': FLAGS.inner_loop_bolzmann_temp,
            #       'inner_loop_n_trajs': FLAGS.inner_loop_n_trajs,
            #       'meta_q_kwargs': kwargs,
            #       'weights': meta_q.get_current_weights(),
            #       'tensorboard_path': tensorboard_path,
            #       'filename': 'random_task'
            #   }
            #   reward_across_20_tasks = evaluate(
            #       policy, eval_env, meta_q,
            #       inner_loop_n_trajs=FLAGS.inner_loop_n_trajs,
            #       outer_loop_n_trajs=FLAGS.outer_loop_n_trajs, n=21,
            #       weight_rewards=FLAGS.weight_rewards, video_data=video_data)
            #   log_data['reward_mean'] = np.mean(reward_across_20_tasks)
            #   log_data['reward_variance'] = np.var(reward_across_20_tasks)
            #   logger.log_dict(outer_step, log_data)

            if outer_step >= (FLAGS.outer_loop_steps - 1):
                greedy_reward_path = os.path.join(tensorboard_path, 'reward')
                with gfile.Open(greedy_reward_path, mode='wb') as f:
                    f.write(pickle.dumps(
                        log_data['post_update_greedy_reward']))
            if FLAGS.use_vizier:
                for v in log_data.values():
                    if not np.isfinite(v):
                        tuner.report_done(
                            infeasible=True,
                            infeasible_reason='Nan or inf encountered')
                        return

                if outer_step % FLAGS.report_steps == 0 or outer_step >= (
                        FLAGS.outer_loop_steps - 1):
                    if FLAGS.vizier_objective == 'greedy_reward':
                        objective_value = log_data['post_update_greedy_reward']
                    elif FLAGS.vizier_objective == 'loss':
                        objective_value = post_update_loss
                    elif FLAGS.vizier_objective == 'reward':
                        objective_value = log_data['reward_mean']
                    else:
                        raise ValueError('Unsupported vizier objective!')
                    tuner.report_measure(objective_value=objective_value,
                                         global_step=outer_step,
                                         metrics=log_data)

    if FLAGS.use_vizier:
        tuner.report_done()
예제 #8
0
def EncodeImage(image_tensor):
    with tf.Session():
        image_encoded = tf.image.encode_png(tf.constant(image_tensor)).eval()
    return image_encoded