Beispiel #1
0
def run_dcrnn(args):

    #Pick GPU to use. Activate tensorflow_gpu conda env
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_instance

    with open(args.config_filename) as f:
        config = yaml.load(f)
    tf_config = tf.ConfigProto()
    if args.use_cpu_only:
        tf_config = tf.ConfigProto(device_count={'GPU': 0})
    tf_config.gpu_options.allow_growth = True

    ### From the yaml file get the adjacency matrix
    graph_pkl_filename = config['data']['graph_pkl_filename']
    _, _, adj_mx = load_graph_data(graph_pkl_filename)
    with tf.Session(config=tf_config) as sess:
        supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)

        ### Load the current trained model, access filename from yaml file
        supervisor.load(sess, config['train']['model_filename'])

        ### Evaluate or perform prediction
        outputs = supervisor.evaluate(sess)
        np.savez_compressed(args.output_filename, **outputs)
        print('Predictions saved as {}.'.format(args.output_filename))
Beispiel #2
0
def main(args):

    config_filename = './data/model/dcrnn_{}.yaml'.format(args.city)
    with open(config_filename) as f:
        supervisor_config = yaml.load(f)

        graph_pkl_filename = supervisor_config['data'].get(
            'graph_pkl_filename')
        adj_mx = load_graph_data1(graph_pkl_filename)

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': -1})
        else:
            os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
            os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda)
            print('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES']))

        tf_config.gpu_options.allow_growth = True
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
            supervisor.train(sess=sess)

        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
            supervisor.load(sess, supervisor_config['train']['model_filename'])
            outputs = supervisor.evaluate(sess)
Beispiel #3
0
def main(args):
    with open(args.config_filename) as f:
        supervisor_config = yaml.load(f)

        SC_mx = load_graph_data(
            supervisor_config)  # Load structural connectivity matrix.

        if args.test_dataset:  # For evaluating the model on a different dataset.
            supervisor_config['data']['dataset_dir'] = args.test_dataset

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': 0})
        tf_config.gpu_options.allow_growth = True
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx=SC_mx, **supervisor_config)
            supervisor.load(
                sess,
                supervisor_config['train']['model_filename'])  # Restore model.

            if args.save_predictions:
                outputs, _ = supervisor.evaluate(sess=sess)

                print('Save outputs in: ', supervisor._log_dir)
                np.savez(supervisor._log_dir + '/' + args.output_name,
                         predictions=outputs['predictions'],
                         groundtruth=outputs['groundtruth'])

                plot_predictions(
                    log_dir=supervisor._log_dir,
                    output_name=args.output_name,
                    dataset_dir=supervisor_config['data']['dataset_dir'])
Beispiel #4
0
def main(args):
    with open(args.config_filename) as f:
        supervisor_config = yaml.load(f)

        SEED = 1234
        random.seed(SEED)
        np.random.seed(SEED)
        tf.set_random_seed(SEED)

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': 0})
        tf_config.gpu_options.allow_growth = True
        bytes_in_use = BytesInUse()
        maxbytes_in_use = tf.contrib.memory_stats.MaxBytesInUse()
        with tf.Session(config=tf_config) as sess:

            # Train
            data_tag = supervisor_config.get('base_dir')
            if restart:
                data_tag = supervisor_config.get('base_dir')
                yaml_files = glob.glob('%s/*/*.yaml' % data_tag,
                                       recursive=True)
                yaml_files.sort(key=os.path.getmtime)
                config_filename = yaml_files[-1]  #'config_%d.yaml' % config_id
                print(config_filename)
                with open(config_filename) as f:
                    config = yaml.load(f)
                supervisor = DCRNNSupervisor(config)
                supervisor.load(sess, config['train']['model_filename'])
            else:
                supervisor = DCRNNSupervisor(supervisor_config)
                folder = data_tag
                if not os.path.exists(folder):
                    os.makedirs(folder)
            supervisor.train(sess=sess)

            # Test
            supervisor = DCRNNSupervisor(supervisor_config)
            data_tag = supervisor_config.get('base_dir')
            yaml_files = glob.glob('%s/*/*.yaml' % data_tag, recursive=True)
            yaml_files.sort(key=os.path.getmtime)
            config_filename = yaml_files[-1]  #'config_%d.yaml' % config_id

            with open(config_filename) as f:
                config = yaml.load(f)
            # Load model and evaluate
            supervisor.load(sess, config['train']['model_filename'])
            y_preds = supervisor.evaluate(sess)

            folder = 'data/results/'
            if not os.path.exists(folder):
                os.makedirs(folder)
            df_sp = pd.DataFrame(y_preds)
            filename = os.path.join(folder, 'dcrnn_speed_bay.h5')
            df_sp.to_hdf(filename, 'results')
Beispiel #5
0
def run_dcrnn(args):
    graph_pkl_filename = 'data/sensor_graph/adj_mx.pkl'
    with open(args.config_filename) as f:
        config = yaml.load(f)
    tf_config = tf.ConfigProto()
    if args.use_cpu_only:
        tf_config = tf.ConfigProto(device_count={'GPU': 0})
    tf_config.gpu_options.allow_growth = True
    _, _, adj_mx = load_graph_data(graph_pkl_filename)
    with tf.Session(config=tf_config) as sess:
        supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)
        supervisor.load(sess, config['train']['model_filename'])
        outputs = supervisor.evaluate(sess)
        np.savez_compressed(args.output_filename, **outputs)
        print('Predictions saved as {}.'.format(args.output_filename))
Beispiel #6
0
def run_dcrnn(args):
    config_filename = './data/model/dcrnn_{}.yaml'.format(args.city)
    with open(config_filename) as f:
        config = yaml.load(f)
        graph_pkl_filename = config['data'].get('graph_pkl_filename')
        adj_mx = load_graph_data1(graph_pkl_filename)
        node_pos_pkl_filename = config['data'].get('node_pos_pkl_filename')
        node_pos = np.load(node_pos_pkl_filename)
        indicies = utcPlus3
        if args.city == 'Berlin':
            indicies = utcPlus2

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': -1})
        tf_config.gpu_options.allow_growth = True

        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)
            supervisor.load(sess, args.model_file)
            supervisor.pred_write_submission_files_with_avg(
                sess, args, node_pos, indicies)
Beispiel #7
0
def run_dcrnn(args):
    graph_pkl_filename = 'data/sensor_graph/adj_mx.pkl'
    with open(args.config_filename) as f:
        config = yaml.load(f)
    tf_config = tf.ConfigProto()
    if args.use_cpu_only:
        tf_config = tf.ConfigProto(device_count={'GPU': 0})
    tf_config.gpu_options.allow_growth = True
    _, _, adj_mx = load_graph_data(graph_pkl_filename)
    with tf.Session(config=tf_config) as sess:
        supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)
        supervisor.load(
            sess, config['train']
            ['model_filename'])  # self._saver.restore(sess, model_filename)

        # supervisor._test_model
        # Save the variables to disk.
        # save_path = supervisor._test_model.save(sess, "/tmp/test_model.ckpt")
        save_path = 'data/model/pretrained/'
        # supervisor._saver.save(sess, save_path+"model.ckpt") #tf.train.Saver()
        print("Test_Model saved in path: %s" % save_path)

        ## Restore the Model
        saver = supervisor._saver  #tf.train.import_meta_graph(save_path+'model.ckpt.meta', clear_devices=True)
        # sess = tf.Session()
        saver.restore(sess, save_path + "model.ckpt")

        # tf.train.write_graph(sess.graph_def, save_path, 'model-temp.pb', as_text=True)
        graph = tf.get_default_graph()

        input_graph_def = graph.as_graph_def()
        # output_node_names = "outputs"
        # print "node2##### ", input_graph_def.node.name
        print "node Names ########################### "
        # for v in sess.graph.get_operations():
        #         print(v.name)
        print len(sess.graph.get_operations())

        x = supervisor._test_model.inputs
        y = supervisor._test_model.outputs
        print "tf.SignatureDef...."
        print x, y
        tensor_info_x = tf.saved_model.utils.build_tensor_info(x)
        tensor_info_y = tf.saved_model.utils.build_tensor_info(y)
        print "tensor_info_... "
        print tensor_info_x
        print tensor_info_y
        prediction_signature = (
            tf.saved_model.signature_def_utils.build_signature_def(
                inputs={'images': tensor_info_x},
                outputs={'scores': tensor_info_y},
                method_name="tensorflow/serving/predict"))
        saved_model_dir = save_path + 'pb_model'
        builder = tf.saved_model.builder.SavedModelBuilder(saved_model_dir)
        builder.add_meta_graph_and_variables(
            sess, ['serve'],
            signature_def_map={
                tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                prediction_signature
            })
        builder.save()
Beispiel #8
0
def evaluate_dcrnn(adj_mx, config):
    # with tf.device('/device:GPU:{}'.format(config['gpu'])):
    dcrnn_supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)
    dcrnn_supervisor.load(session, config['train']['model_filename'])
    dcrnn_supervisor.evaluate(sess=session)
Beispiel #9
0
def test_dcrnn(adj_mx, config):
    # with tf.device('/device:GPU:{}'.format(config['gpu'])):
    dcrnn_supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)
    dcrnn_supervisor.load(session, config['train']['model_filename'])
    dcrnn_supervisor.test(sess=session)
    dcrnn_supervisor.plot_series()
def main(args):
    with open(args.config_filename) as f:
        supervisor_config = yaml.load(f)

        graph_pkl_filename = supervisor_config['data'].get(
            'graph_pkl_filename')
        sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(
            graph_pkl_filename)
        supervisor_config['model']['num_nodes'] = num_nodes = len(sensor_ids)

        # Data preprocessing
        traffic_df_filename = supervisor_config['data']['hdf_filename']
        df_data = pd.read_hdf(traffic_df_filename)
        #df_data = df_data.iloc[int(df_data.shape[0]/3):,:]
        validation_ratio = supervisor_config.get('data').get(
            'validation_ratio')
        test_ratio = supervisor_config.get('data').get('test_ratio')
        df_train, df_val, df_test = train_val_test_split(
            df_data, val_ratio=validation_ratio, test_ratio=test_ratio)

        batch_size = supervisor_config.get('data').get('batch_size')
        val_batch_size = supervisor_config.get('data').get('val_batch_size')
        test_batch_size = supervisor_config.get('data').get('test_batch_size')
        horizon = supervisor_config.get('model').get('horizon')
        seq_len = supervisor_config.get('model').get('seq_len')
        scaler = StandardScaler(mean=df_train.values.mean(),
                                std=df_train.values.std())

        data_train = generate_seq2seq_data(df_train, batch_size, seq_len,
                                           horizon, num_nodes, 'train', scaler)
        data_val = generate_seq2seq_data(df_val, val_batch_size, seq_len,
                                         horizon, num_nodes, 'val', scaler)
        data_train.update(data_val)
        #data_train['scaler'] = scaler

        data_test = generate_seq2seq_data(df_test, test_batch_size, seq_len,
                                          horizon, num_nodes, 'test', scaler)
        #data_test['scaler'] = scaler

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': 0})
        tf_config.gpu_options.allow_growth = True
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx, data_train, supervisor_config)

            data_tag = supervisor_config.get('data').get('dataset_dir')
            folder = data_tag + '/model/'
            if not os.path.exists(folder):
                os.makedirs(folder)
            # Train
            supervisor.train(sess=sess)

            # Test
            yaml_files = glob.glob('%s/model/*/*.yaml' % data_tag,
                                   recursive=True)
            yaml_files.sort(key=os.path.getmtime)
            config_filename = yaml_files[-1]  #'config_%d.yaml' % config_id

            with open(config_filename) as f:
                config = yaml.load(f)
            # Load model and evaluate
            supervisor.load(sess, config['train']['model_filename'])
            y_preds = supervisor.evaluate(sess, data_test)

            n_test_samples = data_test['y_test'].shape[0]
            folder = data_tag + '/results/'
            if not os.path.exists(folder):
                os.makedirs(folder)
            for horizon_i in range(data_test['y_test'].shape[1]):
                y_pred = scaler.inverse_transform(y_preds[:, horizon_i, :, 0])
                eval_dfs = df_test[seq_len + horizon_i:seq_len + horizon_i +
                                   n_test_samples]
                df = pd.DataFrame(y_pred,
                                  index=eval_dfs.index,
                                  columns=eval_dfs.columns)
                #df = pd.DataFrame(y_pred, columns=df_test.columns)
                filename = os.path.join(
                    '%s/results/' % data_tag,
                    'dcrnn_speed_prediction_%s.h5' % str(horizon_i + 1))
                df.to_hdf(filename, 'results')

            print(
                'Predictions saved as %s/results/dcrnn_prediction_[1-12].h5...'
                % data_tag)