Example #1
0
def main(args):
    
    #Pick GPU to use. Activate tensorflow_gpu conda env
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_instance
    
    ### Open model parameters file
    with open(args.config_filename) as f:
        supervisor_config = yaml.load(f)
        
        ### Load adjacency matrix
        graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
        
        ### load the graph look at /lib/utils.py for this function
        sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': 0})
        tf_config.gpu_options.allow_growth = True
        
        ### Call the DCRNN supervisor class and start training
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)

            supervisor.train(sess=sess)
Example #2
0
def run_dcrnn(traffic_reading_df):
    run_id = 'dcrnn_DR_2_h_12_64-64_lr_0.01_bs_64_d_0.00_sl_12_MAE_1207002222'

    log_dir = os.path.join('data/model', run_id)

    config_filename = 'config_100.json'
    graph_pkl_filename = 'data/sensor_graph/adj_mx.pkl'
    with open(os.path.join(log_dir, config_filename)) as f:
        config = json.load(f)
    tf_config = tf.ConfigProto()
    if FLAGS.use_cpu_only:
        tf_config = tf.ConfigProto(device_count={'GPU': 0})
    tf_config.gpu_options.allow_growth = True
    _, _, adj_mx = load_graph_data(graph_pkl_filename)
    with tf.Session(config=tf_config) as sess:
        supervisor = DCRNNSupervisor(traffic_reading_df,
                                     config=config,
                                     adj_mx=adj_mx)
        supervisor.restore(sess, config=config)
        df_preds = supervisor.test_and_write_result(sess,
                                                    config['global_step'])
        for horizon_i in df_preds:
            df_pred = df_preds[horizon_i]
            filename = os.path.join('data/results/',
                                    'dcrnn_prediction_%d.h5' % (horizon_i + 1))
            df_pred.to_hdf(filename, 'results')
        print(
            'Predictions saved as data/results/dcrnn_seq2seq_prediction_[1-12].h5...'
        )
Example #3
0
def main(args):
    with open(args.config_filename) as f:
        supervisor_config = yaml.load(f)

        SC_mx = load_graph_data(
            supervisor_config)  # Load structural connectivity matrix.

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': 0})
        tf_config.gpu_options.allow_growth = True
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx=SC_mx, **supervisor_config)
            supervisor.train(sess=sess)

            if args.save_predictions:
                outputs, _ = supervisor.evaluate(sess=sess)

                print('Save outputs in: ', supervisor._log_dir)
                np.savez(supervisor._log_dir + '/outputs',
                         predictions=outputs['predictions'],
                         groundtruth=outputs['groundtruth'])

                plot_predictions(
                    log_dir=supervisor._log_dir,
                    dataset_dir=supervisor_config['data']['dataset_dir'])
Example #4
0
def run_dcrnn(args, dataloaders, adj_mx, node_ids):
    # logger = utils.get_logger(args.paths['model_dir'], __name__, level=args.get('log_level', 'INFO'))
    args = get_model_filename(args)
    model_filename = args.paths['model_filename']
    pred_df = None
    if model_filename:
        tf_config = setup_tf(args)
        with tf.Session(config=tf_config) as sess:
            supervisor = \
                DCRNNSupervisor(sess, adj_mx, dataloaders, args)
            outputs = supervisor.evaluate(sess)
        np.savez_compressed(args.paths['output_filename'], **outputs)

        pred_tensor = np.stack(outputs['predictions'])
        # pred_arr2d = pred_tensor[:, -1, :]
        pred_arr2d = pred_tensor[:, 0, :]  # Note: the indices are reversed
        np.savetxt(args.paths['pred_arr2d_filename'],
                   pred_arr2d,
                   delimiter=',')
        # print('Predictions saved as {}.'.format(args.paths['pred_arr2d_filename']))

        # pred_df = pd.read_csv(args.paths['pred_arr2d_filename'], index_col=False,
        #                       sep=',', header=None)
        pred_df = pd.DataFrame(pred_arr2d)
        pred_df.columns = node_ids
        pred_df['timestamp'] = \
            pd.date_range(start=args.datetime_future_start, periods=args.model['horizon'], freq=args.timestep_size_freq)
        pred_df = pred_df.set_index('timestamp')
        pred_df.to_csv(args.paths['pred_df_filename'])
    else:
        print('Pretrained model was not found in the directory: {}'.\
              format(args.paths['model_dir']))
    return args, pred_df
Example #5
0
def run_dcrnn(args):

    #Pick GPU to use. Activate tensorflow_gpu conda env
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_instance

    with open(args.config_filename) as f:
        config = yaml.load(f)
    tf_config = tf.ConfigProto()
    if args.use_cpu_only:
        tf_config = tf.ConfigProto(device_count={'GPU': 0})
    tf_config.gpu_options.allow_growth = True

    ### From the yaml file get the adjacency matrix
    graph_pkl_filename = config['data']['graph_pkl_filename']
    _, _, adj_mx = load_graph_data(graph_pkl_filename)
    with tf.Session(config=tf_config) as sess:
        supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)

        ### Load the current trained model, access filename from yaml file
        supervisor.load(sess, config['train']['model_filename'])

        ### Evaluate or perform prediction
        outputs = supervisor.evaluate(sess)
        np.savez_compressed(args.output_filename, **outputs)
        print('Predictions saved as {}.'.format(args.output_filename))
Example #6
0
def main(args):
    with open(args.config_filename) as f:
        supervisor_config = yaml.load(f)

        SC_mx = load_graph_data(
            supervisor_config)  # Load structural connectivity matrix.

        if args.test_dataset:  # For evaluating the model on a different dataset.
            supervisor_config['data']['dataset_dir'] = args.test_dataset

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': 0})
        tf_config.gpu_options.allow_growth = True
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx=SC_mx, **supervisor_config)
            supervisor.load(
                sess,
                supervisor_config['train']['model_filename'])  # Restore model.

            if args.save_predictions:
                outputs, _ = supervisor.evaluate(sess=sess)

                print('Save outputs in: ', supervisor._log_dir)
                np.savez(supervisor._log_dir + '/' + args.output_name,
                         predictions=outputs['predictions'],
                         groundtruth=outputs['groundtruth'])

                plot_predictions(
                    log_dir=supervisor._log_dir,
                    output_name=args.output_name,
                    dataset_dir=supervisor_config['data']['dataset_dir'])
Example #7
0
def main():
    # Reads graph data.
    with open(FLAGS.config_filename) as f:
        supervisor_config = json.load(f)
        logger = log_helper.get_logger(supervisor_config.get('base_dir'),
                                       'info.log')
        logger.info('Loading graph from: ' + FLAGS.graph_pkl_filename)
        sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(
            FLAGS.graph_pkl_filename)
        adj_mx[adj_mx < 0.1] = 0
        logger.info('Loading traffic data from: ' + FLAGS.traffic_df_filename)
        traffic_df_filename = FLAGS.traffic_df_filename
        traffic_reading_df = pd.read_csv(traffic_df_filename)
        #modify by AG
        sensors_ids = [
            'time_stamp', 'aqi_W San Gabriel Vly', 'aqi_E San Fernando Vly',
            'aqi_SW Coastal LA', 'aqi_San Gabriel Mts',
            'aqi_SW San Bernardino', 'aqi_Southeast LA CO',
            'aqi_South Coastal LA', 'aqi_Central LA CO', 'aqi_NW Coastal LA',
            'aqi_Santa Clarita Vly', 'aqi_W San Fernando Vly',
            'aqi_E San Gabriel V-2'
        ]
        #test
        test = pd.read_hdf('data/df_highway_2012_4mon_sample.h5')
        test = test.ix[:, ]

        traffic_reading_df = traffic_reading_df.ix[:, sensor_ids]
        supervisor_config['use_cpu_only'] = FLAGS.use_cpu_only
        if FLAGS.log_dir:
            supervisor_config['log_dir'] = FLAGS.log_dir
        if FLAGS.use_curriculum_learning is not None:
            supervisor_config[
                'use_curriculum_learning'] = FLAGS.use_curriculum_learning
        if FLAGS.loss_func:
            supervisor_config['loss_func'] = FLAGS.loss_func
        if FLAGS.filter_type:
            supervisor_config['filter_type'] = FLAGS.filter_type
        # Overwrites space with specified parameters.
        for name in [
                'batch_size', 'cl_decay_steps', 'epochs', 'horizon',
                'learning_rate', 'l1_decay', 'lr_decay', 'lr_decay_epoch',
                'lr_decay_interval', 'learning_rate', 'min_learning_rate',
                'patience', 'seq_len', 'test_every_n_epochs', 'verbose'
        ]:
            if getattr(FLAGS, name) >= 0:
                supervisor_config[name] = getattr(FLAGS, name)

        tf_config = tf.ConfigProto()
        if FLAGS.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': 0})
        tf_config.gpu_options.allow_growth = True
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(traffic_reading_df=traffic_reading_df,
                                         adj_mx=adj_mx,
                                         config=supervisor_config)

            supervisor.train(sess=sess)
Example #8
0
def train_dcrnn(args, dataloaders, adj_mx):
    if not args.test_only:
        args = get_model_filename(args)
        tf_config = setup_tf(args)
        with tf.Session(config=tf_config) as sess:
            supervisor = \
                DCRNNSupervisor(sess, adj_mx, dataloaders, args)
            supervisor.train(sess)
    return args
Example #9
0
def main(args):

    config_filename = './data/model/dcrnn_{}.yaml'.format(args.city)
    with open(config_filename) as f:
        supervisor_config = yaml.load(f)

        graph_pkl_filename = supervisor_config['data'].get(
            'graph_pkl_filename')
        adj_mx = load_graph_data1(graph_pkl_filename)

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': -1})
        else:
            os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
            os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda)
            print('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES']))

        tf_config.gpu_options.allow_growth = True
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
            supervisor.train(sess=sess)

        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
            supervisor.load(sess, supervisor_config['train']['model_filename'])
            outputs = supervisor.evaluate(sess)
Example #10
0
def main(args):

    with open(args.config_filename) as f:
        supervisor_config = yaml.load(f)

        tf_config = tf.ConfigProto()
        # if args.use_cpu_only:
        #     tf_config = tf.ConfigProto(device_count={'GPU': 0})
        tf_config.gpu_options.allow_growth = True
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(**supervisor_config)

            supervisor.train(sess=sess)
Example #11
0
def main(args):
    with open(args.config_filename) as f:

        supervisor_config = yaml.load(f)
        graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
        sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': 0})
        tf_config.gpu_options.allow_growth = True
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
            supervisor.train(sess=sess)
Example #12
0
def run_dcrnn(args):
    graph_pkl_filename = 'data/sensor_graph/adj_mx.pkl'
    with open(args.config_filename) as f:
        config = yaml.load(f)
    tf_config = tf.ConfigProto()
    if args.use_cpu_only:
        tf_config = tf.ConfigProto(device_count={'GPU': 0})
    tf_config.gpu_options.allow_growth = True
    _, _, adj_mx = load_graph_data(graph_pkl_filename)
    with tf.Session(config=tf_config) as sess:
        supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)
        supervisor.load(sess, config['train']['model_filename'])
        outputs = supervisor.evaluate(sess)
        np.savez_compressed(args.output_filename, **outputs)
        print('Predictions saved as {}.'.format(args.output_filename))
Example #13
0
def main(args):
    with open(args.config_filename) as f:
        supervisor_config = yaml.load(f)

        graph_pkl_filename = supervisor_config['data'].get(
            'graph_pkl_filename')
        sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(
            graph_pkl_filename)

        import os
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)

            supervisor.train(sess=sess)
Example #14
0
def main():
    # Reads graph data.
    with open(FLAGS.config_filename) as f:
        supervisor_config = yaml.load(f)
        logger = log_helper.get_logger(supervisor_config.get('base_dir'),
                                       'info.log')
        logger.info('Loading graph from: ' + FLAGS.graph_pkl_filename)
        sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(
            FLAGS.graph_pkl_filename)
        adj_mx[adj_mx < 0.1] = 0
        logger.info('Loading traffic data from: ' + FLAGS.traffic_df_filename)
        traffic_df_filename = FLAGS.traffic_df_filename
        traffic_reading_df = pd.read_hdf(traffic_df_filename)
        traffic_reading_df = traffic_reading_df.ix[:, sensor_ids]
        supervisor_config['use_cpu_only'] = FLAGS.use_cpu_only
        if FLAGS.log_dir:
            supervisor_config['log_dir'] = FLAGS.log_dir
        if FLAGS.use_curriculum_learning is not None:
            supervisor_config[
                'use_curriculum_learning'] = FLAGS.use_curriculum_learning
        if FLAGS.loss_func:
            supervisor_config['loss_func'] = FLAGS.loss_func
        if FLAGS.filter_type:
            supervisor_config['filter_type'] = FLAGS.filter_type
        # Overwrites space with specified parameters.
        for name in [
                'batch_size', 'cl_decay_steps', 'epochs', 'horizon',
                'learning_rate', 'l1_decay', 'lr_decay', 'lr_decay_epoch',
                'lr_decay_interval', 'learning_rate', 'min_learning_rate',
                'patience', 'seq_len', 'test_every_n_epochs', 'verbose'
        ]:
            if getattr(FLAGS, name) >= 0:
                supervisor_config[name] = getattr(FLAGS, name)

        tf_config = tf.ConfigProto()
        if FLAGS.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': 0})
        tf_config.gpu_options.allow_growth = True
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(traffic_reading_df=traffic_reading_df,
                                         adj_mx=adj_mx,
                                         config=supervisor_config)

            supervisor.train(sess=sess)
Example #15
0
def main(args):
    tf.reset_default_graph()
    with open(args.config_filename) as f:
        with tf.Graph().as_default() as g:
            supervisor_config = yaml.load(f)
            graph_pkl_filename = supervisor_config['data'].get(
                'graph_pkl_filename')
            if supervisor_config['data']['data_type'] == 'npz':
                sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(
                    graph_pkl_filename)
            elif supervisor_config['data']['data_type'] == 'csv':
                adj_mx = load_graph_data_from_csv(
                    supervisor_config['data'].get('dataset_dir'))
            tf_config = tf.ConfigProto()
            if args.use_cpu_only:
                tf_config = tf.ConfigProto(device_count={'GPU': 0})
            tf_config.gpu_options.allow_growth = True
            #tf_config.gpu_options.per_process_gpu_memory_fraction = 1
            with tf.Session(config=tf_config) as sess:
                supervisor = DCRNNSupervisor(args=args,
                                             adj_mx=adj_mx,
                                             **supervisor_config)

                supervisor.train(sess=sess)
Example #16
0
def run_dcrnn(args):
    config_filename = './data/model/dcrnn_{}.yaml'.format(args.city)
    with open(config_filename) as f:
        config = yaml.load(f)
        graph_pkl_filename = config['data'].get('graph_pkl_filename')
        adj_mx = load_graph_data1(graph_pkl_filename)
        node_pos_pkl_filename = config['data'].get('node_pos_pkl_filename')
        node_pos = np.load(node_pos_pkl_filename)
        indicies = utcPlus3
        if args.city == 'Berlin':
            indicies = utcPlus2

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': -1})
        tf_config.gpu_options.allow_growth = True

        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)
            supervisor.load(sess, args.model_file)
            supervisor.pred_write_submission_files_with_avg(
                sess, args, node_pos, indicies)
Example #17
0
def run_dcrnn(args):
    graph_pkl_filename = 'data/sensor_graph/adj_mx.pkl'
    with open(args.config_filename) as f:
        config = yaml.load(f)
    tf_config = tf.ConfigProto()
    if args.use_cpu_only:
        tf_config = tf.ConfigProto(device_count={'GPU': 0})
    tf_config.gpu_options.allow_growth = True
    _, _, adj_mx = load_graph_data(graph_pkl_filename)
    with tf.Session(config=tf_config) as sess:
        supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)
        supervisor.load(
            sess, config['train']
            ['model_filename'])  # self._saver.restore(sess, model_filename)

        # supervisor._test_model
        # Save the variables to disk.
        # save_path = supervisor._test_model.save(sess, "/tmp/test_model.ckpt")
        save_path = 'data/model/pretrained/'
        # supervisor._saver.save(sess, save_path+"model.ckpt") #tf.train.Saver()
        print("Test_Model saved in path: %s" % save_path)

        ## Restore the Model
        saver = supervisor._saver  #tf.train.import_meta_graph(save_path+'model.ckpt.meta', clear_devices=True)
        # sess = tf.Session()
        saver.restore(sess, save_path + "model.ckpt")

        # tf.train.write_graph(sess.graph_def, save_path, 'model-temp.pb', as_text=True)
        graph = tf.get_default_graph()

        input_graph_def = graph.as_graph_def()
        # output_node_names = "outputs"
        # print "node2##### ", input_graph_def.node.name
        print "node Names ########################### "
        # for v in sess.graph.get_operations():
        #         print(v.name)
        print len(sess.graph.get_operations())

        x = supervisor._test_model.inputs
        y = supervisor._test_model.outputs
        print "tf.SignatureDef...."
        print x, y
        tensor_info_x = tf.saved_model.utils.build_tensor_info(x)
        tensor_info_y = tf.saved_model.utils.build_tensor_info(y)
        print "tensor_info_... "
        print tensor_info_x
        print tensor_info_y
        prediction_signature = (
            tf.saved_model.signature_def_utils.build_signature_def(
                inputs={'images': tensor_info_x},
                outputs={'scores': tensor_info_y},
                method_name="tensorflow/serving/predict"))
        saved_model_dir = save_path + 'pb_model'
        builder = tf.saved_model.builder.SavedModelBuilder(saved_model_dir)
        builder.add_meta_graph_and_variables(
            sess, ['serve'],
            signature_def_map={
                tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                prediction_signature
            })
        builder.save()
Example #18
0
def evaluate_dcrnn(adj_mx, config):
    # with tf.device('/device:GPU:{}'.format(config['gpu'])):
    dcrnn_supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)
    dcrnn_supervisor.load(session, config['train']['model_filename'])
    dcrnn_supervisor.evaluate(sess=session)
Example #19
0
def test_dcrnn(adj_mx, config):
    # with tf.device('/device:GPU:{}'.format(config['gpu'])):
    dcrnn_supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)
    dcrnn_supervisor.load(session, config['train']['model_filename'])
    dcrnn_supervisor.test(sess=session)
    dcrnn_supervisor.plot_series()
Example #20
0
def train_dcrnn(adj_mx, config):
    # with tf.device('/device:GPU:{}'.format(config['gpu'])):
    dcrnn_supervisor = DCRNNSupervisor(adj_mx=adj_mx, **config)
    dcrnn_supervisor.train(sess=session)
def main(args):
    with open(args.config_filename) as f:
        supervisor_config = yaml.load(f)

        graph_pkl_filename = supervisor_config['data'].get(
            'graph_pkl_filename')
        sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(
            graph_pkl_filename)
        supervisor_config['model']['num_nodes'] = num_nodes = len(sensor_ids)

        # Data preprocessing
        traffic_df_filename = supervisor_config['data']['hdf_filename']
        df_data = pd.read_hdf(traffic_df_filename)
        #df_data = df_data.iloc[int(df_data.shape[0]/3):,:]
        validation_ratio = supervisor_config.get('data').get(
            'validation_ratio')
        test_ratio = supervisor_config.get('data').get('test_ratio')
        df_train, df_val, df_test = train_val_test_split(
            df_data, val_ratio=validation_ratio, test_ratio=test_ratio)

        batch_size = supervisor_config.get('data').get('batch_size')
        val_batch_size = supervisor_config.get('data').get('val_batch_size')
        test_batch_size = supervisor_config.get('data').get('test_batch_size')
        horizon = supervisor_config.get('model').get('horizon')
        seq_len = supervisor_config.get('model').get('seq_len')
        scaler = StandardScaler(mean=df_train.values.mean(),
                                std=df_train.values.std())

        data_train = generate_seq2seq_data(df_train, batch_size, seq_len,
                                           horizon, num_nodes, 'train', scaler)
        data_val = generate_seq2seq_data(df_val, val_batch_size, seq_len,
                                         horizon, num_nodes, 'val', scaler)
        data_train.update(data_val)
        #data_train['scaler'] = scaler

        data_test = generate_seq2seq_data(df_test, test_batch_size, seq_len,
                                          horizon, num_nodes, 'test', scaler)
        #data_test['scaler'] = scaler

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': 0})
        tf_config.gpu_options.allow_growth = True
        with tf.Session(config=tf_config) as sess:
            supervisor = DCRNNSupervisor(adj_mx, data_train, supervisor_config)

            data_tag = supervisor_config.get('data').get('dataset_dir')
            folder = data_tag + '/model/'
            if not os.path.exists(folder):
                os.makedirs(folder)
            # Train
            supervisor.train(sess=sess)

            # Test
            yaml_files = glob.glob('%s/model/*/*.yaml' % data_tag,
                                   recursive=True)
            yaml_files.sort(key=os.path.getmtime)
            config_filename = yaml_files[-1]  #'config_%d.yaml' % config_id

            with open(config_filename) as f:
                config = yaml.load(f)
            # Load model and evaluate
            supervisor.load(sess, config['train']['model_filename'])
            y_preds = supervisor.evaluate(sess, data_test)

            n_test_samples = data_test['y_test'].shape[0]
            folder = data_tag + '/results/'
            if not os.path.exists(folder):
                os.makedirs(folder)
            for horizon_i in range(data_test['y_test'].shape[1]):
                y_pred = scaler.inverse_transform(y_preds[:, horizon_i, :, 0])
                eval_dfs = df_test[seq_len + horizon_i:seq_len + horizon_i +
                                   n_test_samples]
                df = pd.DataFrame(y_pred,
                                  index=eval_dfs.index,
                                  columns=eval_dfs.columns)
                #df = pd.DataFrame(y_pred, columns=df_test.columns)
                filename = os.path.join(
                    '%s/results/' % data_tag,
                    'dcrnn_speed_prediction_%s.h5' % str(horizon_i + 1))
                df.to_hdf(filename, 'results')

            print(
                'Predictions saved as %s/results/dcrnn_prediction_[1-12].h5...'
                % data_tag)
Example #22
0
def main(args):
    with open(args.config_filename) as f:
        supervisor_config = yaml.load(f)

        SEED = 1234
        random.seed(SEED)
        np.random.seed(SEED)
        tf.set_random_seed(SEED)

        tf_config = tf.ConfigProto()
        if args.use_cpu_only:
            tf_config = tf.ConfigProto(device_count={'GPU': 0})
        tf_config.gpu_options.allow_growth = True
        bytes_in_use = BytesInUse()
        maxbytes_in_use = tf.contrib.memory_stats.MaxBytesInUse()
        with tf.Session(config=tf_config) as sess:

            # Train
            data_tag = supervisor_config.get('base_dir')
            if restart:
                data_tag = supervisor_config.get('base_dir')
                yaml_files = glob.glob('%s/*/*.yaml' % data_tag,
                                       recursive=True)
                yaml_files.sort(key=os.path.getmtime)
                config_filename = yaml_files[-1]  #'config_%d.yaml' % config_id
                print(config_filename)
                with open(config_filename) as f:
                    config = yaml.load(f)
                supervisor = DCRNNSupervisor(config)
                supervisor.load(sess, config['train']['model_filename'])
            else:
                supervisor = DCRNNSupervisor(supervisor_config)
                folder = data_tag
                if not os.path.exists(folder):
                    os.makedirs(folder)
            supervisor.train(sess=sess)

            # Test
            supervisor = DCRNNSupervisor(supervisor_config)
            data_tag = supervisor_config.get('base_dir')
            yaml_files = glob.glob('%s/*/*.yaml' % data_tag, recursive=True)
            yaml_files.sort(key=os.path.getmtime)
            config_filename = yaml_files[-1]  #'config_%d.yaml' % config_id

            with open(config_filename) as f:
                config = yaml.load(f)
            # Load model and evaluate
            supervisor.load(sess, config['train']['model_filename'])
            y_preds = supervisor.evaluate(sess)

            folder = 'data/results/'
            if not os.path.exists(folder):
                os.makedirs(folder)
            df_sp = pd.DataFrame(y_preds)
            filename = os.path.join(folder, 'dcrnn_speed_bay.h5')
            df_sp.to_hdf(filename, 'results')