Beispiel #1
0
def mode_8(sess, graph, save_path):
    """ to find high-openshot-penalty data in 1000 real data
    """
    real_data = np.load(FLAGS.data_path)[:, :FLAGS.seq_length, :, :]
    print('real_data.shape', real_data.shape)
    data_factory = DataFactory(real_data)
    train_data, valid_data = data_factory.fetch_data()
    # placeholder tensor
    real_data_t = graph.get_tensor_by_name('real_data:0')
    matched_cond_t = graph.get_tensor_by_name('matched_cond:0')
    # result tensor
    heuristic_penalty_pframe = graph.get_tensor_by_name(
        'Critic/C_inference/heuristic_penalty/Min:0')
    # 'Generator/G_loss/C_inference/linear_result/Reshape:0')

    if not os.path.exists(save_path):
        os.makedirs(save_path)
    real_hp_pframe_all = []
    for batch_id in range(train_data['A'].shape[0] // FLAGS.batch_size):
        index_id = batch_id * FLAGS.batch_size
        real_data = train_data['B'][index_id:index_id + FLAGS.batch_size]
        cond_data = train_data['A'][index_id:index_id + FLAGS.batch_size]
        # real
        feed_dict = {real_data_t: real_data, matched_cond_t: cond_data}
        real_hp_pframe = sess.run(heuristic_penalty_pframe,
                                  feed_dict=feed_dict)
        real_hp_pframe_all.append(real_hp_pframe)
    real_hp_pframe_all = np.concatenate(real_hp_pframe_all, axis=0)
    print(real_hp_pframe_all.shape)
    real_hp_pdata = np.mean(real_hp_pframe_all, axis=1)
    mean_ = np.mean(real_hp_pdata)
    std_ = np.std(real_hp_pdata)
    print(mean_)
    print(std_)

    concat_AB = np.concatenate([train_data['A'], train_data['B']], axis=-1)
    recoverd = data_factory.recover_data(concat_AB)
    for i, v in enumerate(real_hp_pdata):
        if v > (mean_ + 2 * std_):
            print('bad', i, v)
            game_visualizer.plot_data(recoverd[i],
                                      recoverd.shape[1],
                                      file_path=save_path + 'bad_' + str(i) +
                                      '_' + str(v) + '.mp4',
                                      if_save=True)
        if v < 0.0025:
            print('good', i, v)
            game_visualizer.plot_data(recoverd[i],
                                      recoverd.shape[1],
                                      file_path=save_path + 'good_' + str(i) +
                                      '_' + str(v) + '.mp4',
                                      if_save=True)

    print('!!Completely Saved!!')
Beispiel #2
0
def main(_):
    with tf.get_default_graph().as_default() as graph:
        real_data = np.load(FLAGS.data_path)[:, :FLAGS.seq_length, :, :]
        print('real_data.shape', real_data.shape)
        # normalize
        data_factory = DataFactory(real_data)
        train_data, valid_data = data_factory.fetch_data()
        print(train_data['A'].shape)
        print(valid_data['A'].shape)
        # config setting
        config = TrainingConfig()
        config.show()
        # train
        training(train_data, valid_data, data_factory, config, graph)
def main(_):
    real_data = np.load(FLAGS.data_path)[:, :FLAGS.seq_length, :, :]
    print('real_data.shape', real_data.shape)
    # normalize
    data_factory = DataFactory(real_data)
    train_data, valid_data = data_factory.fetch_data()
    print(train_data['A'].shape)
    print(valid_data['A'].shape)
    # config setting
    config = TrainingConfig()

    baseline_graph = None
    if FLAGS.baseline_checkpoint is not None:
        baseline_graph = tf.Graph()
    default_graph = tf.Graph()

    training(train_data, valid_data, data_factory, config, default_graph,
             baseline_graph)
def mode_1(sess, graph, save_path, is_valid=FLAGS.is_valid):
    """ to collect results 
    Saved Result
    ------------
    results_A_fake_B : float, numpy ndarray, shape=[n_latents=100, n_conditions=128*9, length=100, features=23]
        Real A + Fake B
    results_A_real_B : float, numpy ndarray, shape=[n_conditions=128*9, length=100, features=23]
        Real A + Real B
    results_critic_scores : float, numpy ndarray, shape=[n_latents=100, n_conditions=128*9]
        critic scores for each input data
    """
    # placeholder tensor
    latent_input_t = graph.get_tensor_by_name('latent_input:0')
    team_a_t = graph.get_tensor_by_name('team_a:0')
    G_samples_t = graph.get_tensor_by_name('G_samples:0')
    matched_cond_t = graph.get_tensor_by_name('matched_cond:0')
    # result tensor
    result_t = graph.get_tensor_by_name(
        'Generator/G_inference/conv_result/conv1d/Maximum:0')
    # critic_scores_t = graph.get_tensor_by_name(
    #     'Critic/C_inference_1/conv_output/Reshape:0')
    critic_scores_t = graph.get_tensor_by_name(
        'Critic/C_inference_1/linear_result/BiasAdd:0')

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    real_data = np.load(FLAGS.data_path)[:, :FLAGS.seq_length, :, :]
    print('real_data.shape', real_data.shape)
    # normalize
    data_factory = DataFactory(real_data)
    # result collector
    results_A_fake_B = []
    results_A_real_B = []
    results_critic_scores = []

    # shuffle the data
    train_data, valid_data = data_factory.fetch_data()
    if is_valid:
        target_data = valid_data
    else:
        target_data = train_data
    target_data = np.load('ADD-100.npy')
    team_AB = np.concatenate(
        [
            # ball
            target_data[:, :, 0, :3].reshape(
                [target_data.shape[0], target_data.shape[1], 1 * 3]),
            # team A players
            target_data[:, :, 1:6, :2].reshape(
                [target_data.shape[0], target_data.shape[1], 5 * 2]),
            # team B players
            target_data[:, :, 6:11, :2].reshape(
                [target_data.shape[0], target_data.shape[1], 5 * 2])
        ], axis=-1
    )
    team_AB = data_factory.normalize(team_AB)
    print(team_AB.shape)
    dummy_AB = np.zeros(shape=[98, 100, 23])
    team_AB = np.concatenate([team_AB, dummy_AB], axis=0)
    team_A = team_AB[:, :, :13]
    team_B = team_AB[:, :, 13:]

    # for idx in range(0, FLAGS.batch_size, FLAGS.batch_size):
    real_samples = team_B
    real_conds = team_A
    # generate result
    temp_critic_scores = []
    temp_A_fake_B = []
    for i in range(FLAGS.n_latents):
        latents = z_samples(FLAGS.batch_size)
        feed_dict = {
            latent_input_t: latents,
            team_a_t: real_conds
        }
        result = sess.run(
            result_t, feed_dict=feed_dict)
        feed_dict = {
            G_samples_t: result,
            matched_cond_t: real_conds
        }
        critic_scores = sess.run(
            critic_scores_t, feed_dict=feed_dict)
        temp_A_fake_B.append(data_factory.recover_data(
            np.concatenate([real_conds, result], axis=-1)))
        temp_critic_scores.append(critic_scores)
    results_A_fake_B.append(temp_A_fake_B)
    results_critic_scores.append(temp_critic_scores)
    # concat along with conditions dimension (axis=1)
    results_A_fake_B = np.concatenate(results_A_fake_B, axis=1)
    results_critic_scores = np.concatenate(results_critic_scores, axis=1)
    results_A = data_factory.recover_BALL_and_A(
        real_conds)
    results_real_B = data_factory.recover_B(
        real_samples)
    results_A_real_B = np.concatenate([results_A, results_real_B], axis=-1)
    # saved as numpy
    print(np.array(results_A_fake_B).shape)
    print(np.array(results_A_real_B).shape)
    print(np.array(results_critic_scores).shape)
    np.save(save_path + 'results_A_fake_B.npy',
            np.array(results_A_fake_B)[:, :30].astype(np.float32).reshape([FLAGS.n_latents, 30, FLAGS.seq_length, 23]))
    np.save(save_path + 'results_A_real_B.npy',
            np.array(results_A_real_B)[:30].astype(np.float32).reshape([30, FLAGS.seq_length, 23]))
    np.save(save_path + 'results_critic_scores.npy',
            np.array(results_critic_scores)[:, :30].astype(np.float32).reshape([FLAGS.n_latents, 30]))
    print('!!Completely Saved!!')
def mode_4(sess, graph, save_path, is_valid=FLAGS.is_valid):
    """ to analize code, only change first dimension for comparison
    Saved Result
    ------------
    results_A_fake_B : float, numpy ndarray, shape=[n_latents=11, n_conditions=128*9, length=100, features=23]
        Real A + Fake B
    results_A_real_B : float, numpy ndarray, shape=[n_latents=11, n_conditions=128*9, length=100, features=23]
        Real A + Real B
    results_critic_scores : float, numpy ndarray, shape=[n_latents=11, n_conditions=128*9]
        critic scores for each input data
    """
    target_dims = 0
    n_latents = 11

    # placeholder tensor
    latent_input_t = graph.get_tensor_by_name('latent_input:0')
    team_a_t = graph.get_tensor_by_name('team_a:0')
    G_samples_t = graph.get_tensor_by_name('G_samples:0')
    matched_cond_t = graph.get_tensor_by_name('matched_cond:0')
    # result tensor
    result_t = graph.get_tensor_by_name(
        'Generator/G_inference/conv_result/conv1d/Maximum:0')
    critic_scores_t = graph.get_tensor_by_name(
        'Critic/C_inference_1/linear_result/BiasAdd:0')
    # 'Generator/G_loss/C_inference/linear_result/Reshape:0')

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    real_data = np.load(FLAGS.data_path)[:, :FLAGS.seq_length, :, :]
    print('real_data.shape', real_data.shape)
    # normalize
    data_factory = DataFactory(real_data)
    # result collector
    results_A_fake_B = []
    results_A_real_B = []
    results_critic_scores = []

    # shuffle the data
    train_data, valid_data = data_factory.fetch_data()
    if is_valid:
        target_data = valid_data
    else:
        target_data = train_data
    latents = z_samples(FLAGS.batch_size)
    for idx in range(0, FLAGS.n_conditions, FLAGS.batch_size):
        real_samples = target_data['B'][idx:idx + FLAGS.batch_size]
        real_conds = target_data['A'][idx:idx + FLAGS.batch_size]
        # generate result
        temp_critic_scores = []
        temp_A_fake_B = []
        for i in range(n_latents):
            latents[:, target_dims] = -2.5 + 0.5 * i
            feed_dict = {
                latent_input_t: latents,
                team_a_t: real_conds
            }
            result = sess.run(
                result_t, feed_dict=feed_dict)
            feed_dict = {
                G_samples_t: result,
                matched_cond_t: real_conds
            }
            critic_scores = sess.run(
                critic_scores_t, feed_dict=feed_dict)
            temp_A_fake_B.append(data_factory.recover_data(
                np.concatenate([real_conds, result], axis=-1)))
            temp_critic_scores.append(critic_scores)
        results_A_fake_B.append(temp_A_fake_B)
        results_critic_scores.append(temp_critic_scores)
    # concat along with conditions dimension (axis=1)
    results_A_fake_B = np.concatenate(results_A_fake_B, axis=1)
    results_critic_scores = np.concatenate(results_critic_scores, axis=1)
    results_A = data_factory.recover_BALL_and_A(
        target_data['A'][:FLAGS.n_conditions])
    results_real_B = data_factory.recover_B(
        target_data['B'][:FLAGS.n_conditions])
    results_A_real_B = np.concatenate([results_A, results_real_B], axis=-1)
    # saved as numpy
    print(np.array(results_A_fake_B).shape)
    print(np.array(results_A_real_B).shape)
    print(np.array(results_critic_scores).shape)
    np.save(save_path + 'results_A_fake_B.npy',
            np.array(results_A_fake_B).astype(np.float32).reshape([n_latents, FLAGS.n_conditions, FLAGS.seq_length, 23]))
    np.save(save_path + 'results_A_real_B.npy',
            np.array(results_A_real_B).astype(np.float32).reshape([FLAGS.n_conditions, FLAGS.seq_length, 23]))
    np.save(save_path + 'results_critic_scores.npy',
            np.array(results_critic_scores).astype(np.float32).reshape([n_latents, FLAGS.n_conditions]))
    print('!!Completely Saved!!')