Пример #1
0
def main():
    with open(ARGS.config) as f:
        model_params = json.load(f)

    model_params['pred_steps'] = ARGS.pred_steps

    lstm_multistep_regressor = tf.estimator.Estimator(model_fn=model_fn,
                                                      params=model_params,
                                                      model_dir=ARGS.log_dir)

    if ARGS.train:
        train_data = load_data(ARGS.data_dir,
                               ARGS.data_transpose,
                               edge=False,
                               prefix='train')

        train_input_fn = tf.estimator.inputs.numpy_input_fn(
            x=train_data,
            batch_size=ARGS.batch_size,
            num_epochs=None,
            shuffle=True)

        lstm_multistep_regressor.train(input_fn=train_input_fn,
                                       steps=ARGS.train_steps)

    # Evaluation
    if ARGS.eval:
        valid_data = load_data(ARGS.data_dir,
                               ARGS.data_transpose,
                               edge=False,
                               prefix='valid')

        eval_input_fn = tf.estimator.inputs.numpy_input_fn(
            x=valid_data,
            batch_size=ARGS.batch_size,
            num_epochs=1,
            shuffle=False)
        eval_results = lstm_multistep_regressor.evaluate(
            input_fn=eval_input_fn)

        if not ARGS.verbose:
            print('Evaluation results: {}'.format(eval_results))

    # Prediction
    if ARGS.test:
        test_data = load_data(ARGS.data_dir,
                              ARGS.data_transpose,
                              edge=False,
                              prefix='test')

        predict_input_fn = tf.estimator.inputs.numpy_input_fn(
            x=test_data, batch_size=ARGS.batch_size, shuffle=False)

        prediction = lstm_multistep_regressor.predict(
            input_fn=predict_input_fn)
        prediction = np.array([pred['next_steps'] for pred in prediction])
        np.save(
            os.path.join(ARGS.log_dir,
                         'prediction_{}.npy'.format(ARGS.pred_steps)),
            prediction)
Пример #2
0
def main():
    with open(ARGS.config) as f:
        model_params = json.load(f)

    seg_len = model_params['seg_len']
    model_params['pred_steps'] = ARGS.pred_steps

    seq2seq_regressor = tf.estimator.Estimator(model_fn=model_fn,
                                               params=model_params,
                                               model_dir=ARGS.log_dir)

    if ARGS.train:
        train_data = load_data(ARGS.data_dir,
                               ARGS.data_transpose,
                               edge=False,
                               prefix='train')

        features = {'time_series': train_data}

        train_input_fn = input_fn(features, seg_len, ARGS.pred_steps,
                                  ARGS.batch_size, 'train')

        seq2seq_regressor.train(input_fn=train_input_fn,
                                steps=ARGS.train_steps)

    # Evaluation
    if ARGS.eval:
        valid_data = load_data(ARGS.data_dir,
                               ARGS.data_transpose,
                               edge=False,
                               prefix='valid')
        features = {'time_series': valid_data}

        eval_input_fn = input_fn(features, seg_len, ARGS.pred_steps,
                                 ARGS.batch_size, 'eval')

        eval_results = seq2seq_regressor.evaluate(input_fn=eval_input_fn)

        if not ARGS.verbose:
            print('Evaluation results: {}'.format(eval_results))

    # Prediction
    if ARGS.test:
        test_data = load_data(ARGS.data_dir,
                              ARGS.data_transpose,
                              edge=False,
                              prefix='test')
        features = {'time_series': test_data}

        predict_input_fn = input_fn(features, seg_len, ARGS.pred_steps,
                                    ARGS.batch_size, 'test')

        prediction = seq2seq_regressor.predict(input_fn=predict_input_fn)
        prediction = np.array([pred['next_steps'] for pred in prediction])
        np.save(
            os.path.join(ARGS.log_dir,
                         'prediction_{}.npy'.format(ARGS.pred_steps)),
            prediction)
Пример #3
0
def main():
    with open(ARGS.config) as f:
        model_params = json.load(f)

    mlp_encoder_classifier = tf.estimator.Estimator(model_fn=encoder_model_fn,
                                                    params=model_params,
                                                    model_dir=ARGS.log_dir)

    if ARGS.train:
        train_data, train_edge = load_data(ARGS.data_dir,
                                           ARGS.data_transpose,
                                           edge=True,
                                           prefix='train')

        train_input_fn = tf.estimator.inputs.numpy_input_fn(
            x=train_data,
            y=train_edge,
            batch_size=ARGS.batch_size,
            num_epochs=None,
            shuffle=True)

        mlp_encoder_classifier.train(input_fn=train_input_fn,
                                     steps=ARGS.train_steps)

    # Evaluation
    if ARGS.eval:
        valid_data, valid_edge = load_data(ARGS.data_dir,
                                           ARGS.data_transpose,
                                           edge=True,
                                           prefix='valid')

        eval_input_fn = tf.estimator.inputs.numpy_input_fn(x=valid_data,
                                                           y=valid_edge,
                                                           num_epochs=1,
                                                           shuffle=False)
        eval_results = mlp_encoder_classifier.evaluate(input_fn=eval_input_fn)

        if not ARGS.verbose:
            print('Evaluation results: {}'.format(eval_results))

    # Predictoin
    if ARGS.test:
        test_data, test_edge = load_data(ARGS.data_dir,
                                         ARGS.data_transpose,
                                         edge=True,
                                         prefix='test')

        pred_input_fn = tf.estimator.inputs.numpy_input_fn(x=test_data,
                                                           shuffle=False)
        prediction = mlp_encoder_classifier.predict(input_fn=pred_input_fn)
        predicted_edge_type = [pred['classes'] for pred in prediction]
        np.save(os.path.join(ARGS.log_dir, 'prediction.npy'),
                predicted_edge_type)
Пример #4
0
def main():
    with open(ARGS.config) as f:
        model_params = json.load(f)

    # model_params['pred_steps'] = ARGS.pred_steps
    seg_len = 2 * len(model_params['cnn']['filters']) + 1

    prefix = 'test'

    model_params['edge_type'] = model_params.get('edge_type', 1)
    # data contains edge_types if `edge=True`.
    data = load_data(ARGS.data_dir, ARGS.data_transpose,
                     edge=model_params['edge_type'] > 1, prefix=prefix)
    print(f"\nData from {ARGS.data_dir} loaded.")

    # input_data: a list which is [time_segs, edge_types] if `edge_type` > 1, else [time_segs]
    input_data, expected_time_segs = preprocess_data(
        data, seg_len, ARGS.pred_steps, edge_type=model_params['edge_type'])
    print(f"Data processed.\n")
    nagents, ndims = expected_time_segs.shape[-2:]

    model_params.update({'nagents': nagents, 'ndims': ndims,
                         'pred_steps': ARGS.pred_steps, 'time_seg_len': seg_len})
    model, inputs = gnn.build_model(model_params, return_inputs=True)

    print("Original model summary:")
    model.summary()
    print('\n')

    gnn.load_model(model, ARGS.log_dir)

    # Create Debug model
    outlayer_name = ARGS.layer_name

    outlayer_model = keras.Model(
        inputs=inputs, outputs=model.get_layer(outlayer_name).output)
    print(f"\nOutput up to {outlayer_name}\n")
    outlayer_model.summary()
    layer_output = outlayer_model.predict(input_data)
    np.save(os.path.join(ARGS.log_dir, f'{outlayer_name}_output'), layer_output)
    print(f"Layer {outlayer_name} output saved.")
Пример #5
0
def main():
    with open(ARGS.config) as f:
        model_params = json.load(f)

    model_params['pred_steps'] = ARGS.pred_steps
    model_params['supervised'] = ARGS.supervised

    mlp_gnn_regressor = tf.estimator.Estimator(model_fn=model_fn,
                                               params=model_params,
                                               model_dir=ARGS.log_dir)
    # Training
    if ARGS.train:
        train_data, train_edge = load_data(ARGS.data_dir,
                                           ARGS.data_transpose,
                                           edge=True,
                                           prefix='train')

        train_input_fn = tf.estimator.inputs.numpy_input_fn(
            x=train_data,
            y=train_edge,
            batch_size=ARGS.batch_size,
            num_epochs=None,
            shuffle=True)

        mlp_gnn_regressor.train(input_fn=train_input_fn,
                                steps=ARGS.train_steps)

    # Evaluation
    if ARGS.eval:
        valid_data, valid_edge = load_data(ARGS.data_dir,
                                           ARGS.data_transpose,
                                           edge=True,
                                           prefix='valid')

        eval_input_fn = tf.estimator.inputs.numpy_input_fn(
            x=valid_data,
            y=valid_edge,
            num_epochs=1,
            batch_size=ARGS.batch_size,
            shuffle=False)
        eval_results = mlp_gnn_regressor.evaluate(input_fn=eval_input_fn)

        if not ARGS.verbose:
            print('Evaluation results: {}'.format(eval_results))

    # Prediction
    if ARGS.test:
        test_data, test_edge = load_data(ARGS.data_dir,
                                         ARGS.data_transpose,
                                         edge=True,
                                         prefix='test')

        predict_input_fn = tf.estimator.inputs.numpy_input_fn(
            x=test_data, batch_size=ARGS.batch_size, shuffle=False)

        prediction = mlp_gnn_regressor.predict(input_fn=predict_input_fn)
        prediction = [(pred['state_next_step'], pred['edge_type_prob'])
                      for pred in prediction]
        state_next_step, edge_type_prob = zip(*prediction)
        np.save(os.path.join(ARGS.log_dir, 'prediction.npy'), state_next_step)
        np.save(os.path.join(ARGS.log_dir, 'infered_edge_type.npy'),
                edge_type_prob)
Пример #6
0
def main():
    with open(ARGS.config) as f:
        model_params = json.load(f)

    model_params['pred_steps'] = ARGS.pred_steps
    seg_len = 2 * len(model_params['cnn']['filters']) + 1

    cnn_multistep_regressor = tf.estimator.Estimator(
        model_fn=model_fn,
        params=model_params,
        model_dir=ARGS.log_dir)

    if ARGS.train:
        if model_params.get('edge_types', 0) > 1:
            train_data, train_edge = load_data(ARGS.data_dir, ARGS.data_transpose, edge=True,
                                               prefix='train')
            if ARGS.data_size:
                train_data, train_edge = train_data[:ARGS.data_size], train_edge[:ARGS.data_size]

            train_edge = gnn.utils.one_hot(train_edge, model_params['edge_types'], np.float32)

            features = {'time_series': train_data, 'edge_type': train_edge}
        else:
            train_data = load_data(ARGS.data_dir, ARGS.data_transpose, edge=False,
                                   prefix='train')
            if ARGS.data_size:
                train_data = train_data[:ARGS.data_size]

            features = {'time_series': train_data}

        train_input_fn = input_fn(features, seg_len, ARGS.pred_steps, ARGS.batch_size, 'train')

        cnn_multistep_regressor.train(input_fn=train_input_fn,
                                      steps=ARGS.train_steps)

    # Evaluation
    if ARGS.eval:
        if model_params.get('edge_types', 0) > 1:
            valid_data, valid_edge = load_data(ARGS.data_dir, ARGS.data_transpose, edge=True,
                                               prefix='valid')
            valid_edge = gnn.utils.one_hot(valid_edge, model_params['edge_types'], np.float32)

            features = {'time_series': valid_data, 'edge_type': valid_edge}
        else:
            valid_data = load_data(ARGS.data_dir, ARGS.data_transpose, edge=False,
                                   prefix='valid')
            features = {'time_series': valid_data}

        eval_input_fn = input_fn(features, seg_len, ARGS.pred_steps, ARGS.batch_size, 'eval')

        eval_results = cnn_multistep_regressor.evaluate(input_fn=eval_input_fn)

        if not ARGS.verbose:
            print('Evaluation results: {}'.format(eval_results))

    # Prediction
    if ARGS.test:
        if model_params.get('edge_types', 0) > 1:
            test_data, test_edge = load_data(ARGS.data_dir, ARGS.data_transpose, edge=True,
                                             prefix='test')
            test_edge = gnn.utils.one_hot(test_edge, model_params['edge_types'], np.float32)

            features = {'time_series': test_data, 'edge_type': test_edge}
        else:
            test_data = load_data(ARGS.data_dir, ARGS.data_transpose, edge=False,
                                  prefix='test')
            features = {'time_series': test_data}

        predict_input_fn = input_fn(features, seg_len, ARGS.pred_steps, ARGS.batch_size, 'test')

        prediction = cnn_multistep_regressor.predict(input_fn=predict_input_fn)
        prediction = np.array([pred['next_steps'] for pred in prediction])
        np.save(os.path.join(ARGS.log_dir, 'prediction_{}.npy'.format(
            ARGS.pred_steps)), prediction)
Пример #7
0
def main():
    with open(ARGS.config) as f:
        model_params = json.load(f)

    # model_params['pred_steps'] = ARGS.pred_steps
    seg_len = 2 * len(model_params['cnn']['filters']) + 1

    if ARGS.train:
        prefix = 'train'
    elif ARGS.eval:
        prefix = 'valid'
    elif ARGS.test:
        prefix = 'test'

    data = load_data(ARGS.data_dir,
                     ARGS.data_transpose,
                     prefix=prefix,
                     size=ARGS.data_size,
                     padding=ARGS.max_padding)

    input_data, expected_time_segs = preprocess_data(
        data, seg_len, ARGS.pred_steps, edge_type=model_params['edge_type'])
    print(f"\nData from {ARGS.data_dir} processed.\n")

    nagents, ndims = expected_time_segs.shape[-2:]

    model_params.update({
        'num_nodes': nagents,
        'ndims': ndims,
        'pred_steps': ARGS.pred_steps,
        'time_seg_len': seg_len
    })

    models = {'MPNN': gnn.MPNN, 'MFGNN': gnn.MFGNN}
    model = models[ARGS.model].build_model(model_params)
    # model.summary()

    gnn.utils.load_model(model, ARGS.log_dir)

    if ARGS.train:
        checkpoint = gnn.utils.save_model(model, ARGS.log_dir)
        # tb_callback = tf.keras.callbacks.TensorBoard(log_dir=ARGS.log_dir, histogram_freq=1)

        history = model.fit(input_data,
                            expected_time_segs,
                            epochs=ARGS.epochs,
                            batch_size=ARGS.batch_size,
                            callbacks=[checkpoint])
        # print(history.history)

    elif ARGS.eval:
        result = model.evaluate(input_data,
                                expected_time_segs,
                                batch_size=ARGS.batch_size)
        # result = MSE
        print("Evaluating baseline...")
        baseline = eval_base_line(data)
        print('Baseline:', baseline, '\t| MSE / Baseline:', result / baseline)

    elif ARGS.test:
        prediction = model.predict(input_data)
        np.save(
            os.path.join(ARGS.log_dir, f'prediction_{ARGS.pred_steps}.npy'),
            prediction)
Пример #8
0
def main():
    with open(ARGS.config) as f:
        model_params = json.load(f)

    # model_params['pred_steps'] = ARGS.pred_steps
    seg_len = 2 * len(model_params['cnn']['filters']) + 1

    if ARGS.train:
        prefix = 'train'
    elif ARGS.eval:
        prefix = 'valid'
    elif ARGS.test:
        prefix = 'test'

    model_params['edge_type'] = model_params.get('edge_type', 1)
    # data contains edge_types if `edge=True`.
    data = load_data(ARGS.data_dir,
                     ARGS.data_transpose,
                     edge=model_params['edge_type'] > 1,
                     prefix=prefix,
                     size=ARGS.data_size)

    # input_data: a list which is [time_segs, edge_types] if `edge_type` > 1, else [time_segs]
    input_data, expected_time_segs = preprocess_data(
        data, seg_len, ARGS.pred_steps, edge_type=model_params['edge_type'])
    print(f"\nData from {ARGS.data_dir} processed.\n")

    nagents, ndims = expected_time_segs.shape[-2:]

    model_params.update({
        'nagents': nagents,
        'ndims': ndims,
        'pred_steps': ARGS.pred_steps,
        'time_seg_len': seg_len
    })
    model = gnn.build_model(model_params)
    # model.summary()

    gnn.load_model(model, ARGS.log_dir)

    if ARGS.train:
        checkpoint = gnn.save_model(model, ARGS.log_dir)

        # Freeze some of the layers according to train mode.
        if ARGS.train_mode > 0:
            model.conv1d.trainable = False
            if model_params['edge_type'] > 1:
                for edge_encoder in model.edge_encoders:
                    edge_encoder.trainable = False
            else:
                model.edge_encoder.trainable = False

        if ARGS.train_mode > 1:
            model.node_encoder.trainable = False

        history = model.fit(input_data,
                            expected_time_segs,
                            epochs=ARGS.epochs,
                            batch_size=ARGS.batch_size,
                            callbacks=[checkpoint])
        # print(history.history)

    elif ARGS.eval:
        result = model.evaluate(input_data,
                                expected_time_segs,
                                batch_size=ARGS.batch_size)
        # result = MSE
        baseline = eval_base_line(input_data)
        print('Baseline:', baseline, '\t| MSE / Baseline:', result / baseline)

    elif ARGS.test:
        prediction = model.predict(input_data)
        np.save(
            os.path.join(ARGS.log_dir, f'prediction_{ARGS.pred_steps}.npy'),
            prediction)
def main():
    with open(ARGS.config) as f:
        model_params = json.load(f)

    model_params['pred_steps'] = ARGS.pred_steps
    seg_len = 2 * len(model_params['cnn']['filters']) + 1

    cnn_multistep_regressor = tf.estimator.Estimator(model_fn=model_fn,
                                                     params=model_params,
                                                     model_dir=ARGS.log_dir)

    # Prediction
    if ARGS.test:
        if model_params.get('edge_types', 0) > 1:
            test_data, test_edge = load_data(ARGS.data_dir,
                                             ARGS.data_transpose,
                                             edge=True,
                                             prefix='test')
            test_edge = gnn.utils.one_hot(test_edge,
                                          model_params['edge_types'],
                                          np.float32)

            features = {'time_series': test_data, 'edge_type': test_edge}
        else:
            test_data = load_data(ARGS.data_dir,
                                  ARGS.data_transpose,
                                  edge=False,
                                  prefix='test')
            features = {'time_series': test_data}

        if not ARGS.dynamic_update:

            curr_time = time.time()

            predict_input_fn = input_fn(features, seg_len, ARGS.pred_steps,
                                        ARGS.batch_size, 'test')
            prediction = cnn_multistep_regressor.predict(
                input_fn=predict_input_fn)
            prediction = np.array([pred['next_steps'] for pred in prediction])

            prediction = np.swapaxes(prediction, 0, 1)

            print('GNN execution time = %f' % (-curr_time + time.time()))

        #print(prediction.shape)

        print("===========================================================")
        print("===========================================================")

        # Instantiate Robotarium object
        N = test_data.shape[2]
        r = robotarium.Robotarium(number_of_agents=N,
                                  show_figure=False,
                                  save_data=False,
                                  update_time=0.3)

        # Create barrier certificates to avoid collision
        si_barrier_cert = create_single_integrator_barrier_certificate(N)

        # ------------------------- initalizing initial positions ------------------------

        # initialize the the agents according to the simulation
        initial_goal_points = np.squeeze(
            np.swapaxes(test_data, 2, 3)[:, 0, :2, :])
        initial_orientations = np.zeros((1, 20))

        initial_goal_states = np.concatenate(
            [initial_goal_points, initial_orientations], axis=0)

        move_to_goal(r, N, si_barrier_cert, initial_goal_states)
        r.call_at_scripts_end()

        time.sleep(0.03)

        print('Step 1')
        print('error = %f' %
              (np.linalg.norm(r.get_poses()[:2, :] - initial_goal_points)))

        current_position = r.get_poses()[:2, :]
        current_velocities = unicycle_to_single_integrator(
            r.velocities, r.get_poses())

        current_pos_vel = np.concatenate(
            [current_position, current_velocities], axis=0)

        pos_vel_log = np.swapaxes(
            np.expand_dims(np.expand_dims(current_pos_vel, axis=0), axis=0), 2,
            3)

        for i in range(1, 8):
            print('Step %d' % (i + 1))
            goal_points = np.squeeze(np.swapaxes(test_data, 2, 3)[:, i, :2, :])
            goal_velocities = np.squeeze(
                np.swapaxes(test_data, 2, 3)[:, i, 2:, :])

            apply_control(r, N, goal_velocities)

            current_position = r.get_poses()[:2, :]
            current_velocities = unicycle_to_single_integrator(
                r.velocities, r.get_poses())

            current_pos_vel = np.concatenate(
                [current_position, current_velocities], axis=0)

            pos_vel_log = np.concatenate([
                pos_vel_log,
                np.swapaxes(
                    np.expand_dims(np.expand_dims(current_pos_vel, axis=0),
                                   axis=0), 2, 3)
            ],
                                         axis=1)

            print('error = %f' %
                  (np.linalg.norm(r.get_poses()[:2, :] - goal_points)))

            # Always call this function at the end of your scripts!  It will accelerate the
            # execution of your experiment
            # r.call_at_scripts_end()

        #------------- intialization end ----------------------

        # -------------------- Prediction from Graph neural network -------------------------

        pos_vel_log = pos_vel_log.astype(dtype=np.float32)

        for i in range(8, test_data.shape[1]):

            if ARGS.dynamic_update:

                curr_time = time.time()

                features = {'time_series': pos_vel_log[:, i - 8:i, :, :]}

                predict_input_fn = input_fn(features, seg_len, ARGS.pred_steps,
                                            ARGS.batch_size, 'test')
                prediction = cnn_multistep_regressor.predict(
                    input_fn=predict_input_fn)

                prediction = np.array(
                    [pred['next_steps'] for pred in prediction])

                print(
                    "------------------------------------------------------------------"
                )

                print('GNN execution time = %f' % (-curr_time + time.time()))

                goal_velocities = np.squeeze(
                    np.swapaxes(prediction, 2, 3)[:, :, 2:, :])

            else:
                print(prediction.shape)
                goal_velocities = np.squeeze(
                    np.swapaxes(prediction, 2, 3)[:, i - 8, 2:, :])

            print('Step %d' % (i + 1))
            goal_points = np.squeeze(np.swapaxes(test_data, 2, 3)[:, i, :2, :])
            #goal_velocities = np.squeeze(np.swapaxes(prediction,2,3)[:,:,2:,:])

            apply_control(r, N, goal_velocities)

            current_position = r.get_poses()[:2, :]
            current_velocities = unicycle_to_single_integrator(
                r.velocities, r.get_poses())

            current_pos_vel = np.concatenate(
                [current_position, current_velocities], axis=0)

            pos_vel_log = np.concatenate([
                pos_vel_log,
                np.swapaxes(
                    np.expand_dims(np.expand_dims(current_pos_vel, axis=0),
                                   axis=0), 2, 3)
            ],
                                         axis=1)

            pos_vel_log = pos_vel_log.astype(dtype=np.float32)

            print('error = %f' %
                  (np.linalg.norm(r.get_poses()[:2, :] - goal_points)))

            # Always call this function at the end of your scripts!  It will accelerate the
            # execution of your experiment
        r.call_at_scripts_end()
Пример #10
0
def main():
    with open(ARGS.config) as f:
        model_params = json.load(f)

    model_params['pred_steps'] = ARGS.pred_steps

    mlp_decoder_regressor = tf.estimator.Estimator(model_fn=decoder_model_fn,
                                                   params=model_params,
                                                   model_dir=ARGS.log_dir)

    if ARGS.train:
        train_data, train_edge = load_data(ARGS.data_dir,
                                           ARGS.data_transpose,
                                           edge=True,
                                           prefix='train')
        train_edge = gnn.utils.one_hot(train_edge, model_params['edge_types'],
                                       np.float32)

        train_input_fn = tf.estimator.inputs.numpy_input_fn(
            x={
                'time_series': train_data,
                'edge_type': train_edge
            },
            batch_size=ARGS.batch_size,
            num_epochs=None,
            shuffle=True)

        mlp_decoder_regressor.train(input_fn=train_input_fn,
                                    steps=ARGS.train_steps)

    # Evaluation
    if ARGS.eval:
        valid_data, valid_edge = load_data(ARGS.data_dir,
                                           ARGS.data_transpose,
                                           edge=True,
                                           prefix='valid')
        valid_edge = gnn.utils.one_hot(valid_edge, model_params['edge_types'],
                                       np.float32)

        eval_input_fn = tf.estimator.inputs.numpy_input_fn(
            x={
                'time_series': valid_data,
                'edge_type': valid_edge
            },
            batch_size=ARGS.batch_size,
            num_epochs=1,
            shuffle=False)
        eval_results = mlp_decoder_regressor.evaluate(input_fn=eval_input_fn)

        if not ARGS.verbose:
            print('Evaluation results: {}'.format(eval_results))

    # Prediction
    if ARGS.test:
        test_data, test_edge = load_data(ARGS.data_dir,
                                         ARGS.data_transpose,
                                         edge=True,
                                         prefix='test')
        test_edge = gnn.utils.one_hot(test_edge, model_params['edge_types'],
                                      np.float32)

        predict_input_fn = tf.estimator.inputs.numpy_input_fn(
            x={
                'time_series': test_data,
                'edge_type': test_edge
            },
            batch_size=ARGS.batch_size,
            shuffle=False)

        prediction = mlp_decoder_regressor.predict(input_fn=predict_input_fn)
        prediction = np.array(
            [pred['state_next_steps'] for pred in prediction])
        np.save(
            os.path.join(ARGS.log_dir,
                         'prediction_{}.npy'.format(ARGS.pred_steps)),
            prediction)