示例#1
0
    def __init__(self, is_training=False, **kwargs):
        super(GCONVRNN, self).__init__(**kwargs)

        self._train_batch_size = int(kwargs['data'].get('batch_size'))
        self._test_batch_size = int(kwargs['data'].get('test_batch_size'))

        self._epoch = int(kwargs['train'].get('epoch'))

        self._logstep = int(kwargs['train'].get('logstep'))

        self._checkpoint_secs = int(kwargs['train'].get('checkpoint_secs'))

        self._data = utils.load_dataset_dcrnn(
            seq_len=self._model_kwargs.get('seq_len'),
            horizon=self._model_kwargs.get('horizon'),
            input_dim=self._model_kwargs.get('input_dim'),
            mon_ratio=self._mon_ratio,
            scaler_type=self._kwargs.get('scaler'),
            is_training=is_training,
            **self._data_kwargs)
        for k, v in self._data.items():
            if hasattr(v, 'shape'):
                self._logger.info((k, v.shape))

        # Build models.
        scaler = self._data['scaler']

        W = self._data['adj_mx']
        laplacian = W / W.max()
        laplacian = scipy.sparse.csr_matrix(laplacian, dtype=np.float32)
        lmax = graph.lmax(laplacian)

        if is_training:
            self._train_model = Model(is_training=True,
                                      laplacian=laplacian,
                                      lmax=lmax,
                                      batch_size=self._train_batch_size,
                                      reuse=True,
                                      **self._model_kwargs)
        else:
            self._test_model = Model(is_training=False,
                                     laplacian=laplacian,
                                     lmax=lmax,
                                     batch_size=self._test_batch_size,
                                     reuse=False,
                                     **self._model_kwargs)

        max_to_keep = self._train_kwargs.get('max_to_keep', 100)

        self.saver = tf.train.Saver(tf.global_variables(),
                                    max_to_keep=max_to_keep)
        self.summary_writer = tf.summary.FileWriter(self._log_dir)

        # Log model statistics.
        total_trainable_parameter = utils.get_total_trainable_parameter_size()
        self._logger.info('Total number of trainable parameters: {:d}'.format(
            total_trainable_parameter))
        for var in tf.global_variables():
            self._logger.debug('{}, {}'.format(var.name, var.get_shape()))
示例#2
0
    def __init__(self, is_training=False, **kwargs):
        self._kwargs = kwargs

        self._alg = kwargs.get('alg')
        self._data_kwargs = kwargs.get('data')
        self._train_kwargs = kwargs.get('train')
        self._test_kwargs = kwargs.get('test')
        self._model_kwargs = kwargs.get('model')
        self._base_dir = kwargs.get('base_dir')

        self._epochs = self._train_kwargs.get('epochs')

        # logging.
        self._log_dir = get_log_dir(kwargs)
        log_level = self._kwargs.get('log_level', 'INFO')
        self._logger = utils.get_logger(self._log_dir,
                                        __name__,
                                        'info.log',
                                        level=log_level)
        self._writer = tf.summary.FileWriter(self._log_dir)
        self._logger.info(kwargs)

        self._mon_ratio = float(self._kwargs.get('mon_ratio'))

        # Model's args
        self._input_dim = int(self._model_kwargs.get('input_dim'))
        self._nodes = int(self._model_kwargs.get('num_nodes'))
        self._drop_out = float(self._train_kwargs.get('dropout'))
        self.batch_size = int(self._data_kwargs['batch_size'])
        # Test's args
        self._flow_selection = self._test_kwargs.get('flow_selection')
        self._run_times = self._test_kwargs.get('run_times')
        # Data preparation
        self._day_size = self._data_kwargs.get('day_size')

        self._data = utils.load_dataset_gatlstm(
            num_nodes=self._model_kwargs.get('num_nodes'),
            input_dim=self._model_kwargs.get('input_dim'),
            mon_ratio=self._mon_ratio,
            scaler_type=self._kwargs.get('scaler'),
            is_training=is_training,
            **self._data_kwargs)
        for k, v in self._data.items():
            if hasattr(v, 'shape'):
                self._logger.info((k, v.shape))

        # Build models.
        scaler = self._data['scaler']
        if is_training:
            self.model = GATLSTMModel(scaler=scaler,
                                      batch_size=self.batch_size,
                                      **self._model_kwargs)
        else:
            self.model = GATLSTMModel(scaler=scaler,
                                      batch_size=1,
                                      **self._model_kwargs)

        # Learning rate.
        max_to_keep = self._train_kwargs.get('max_to_keep', 100)

        self.saver = tf.train.Saver(tf.global_variables(),
                                    max_to_keep=max_to_keep)
        self.summary_writer = tf.summary.FileWriter(self._log_dir)

        # Log model statistics.
        total_trainable_parameter = utils.get_total_trainable_parameter_size()
        self._logger.info('Total number of trainable parameters: {:d}'.format(
            total_trainable_parameter))
        for var in tf.global_variables():
            self._logger.debug('{}, {}'.format(var.name, var.get_shape()))
示例#3
0
    def __init__(self, args, adj_mx, **kwargs):

        self._kwargs = kwargs
        self._data_kwargs = kwargs.get('data')
        self._model_kwargs = kwargs.get('model')
        self._train_kwargs = kwargs.get('train')
        # logging.
        self._log_dir = self._get_log_dir(kwargs)
        log_level = self._kwargs.get('log_level', 'INFO')
        self._logger = utils.get_logger(self._log_dir,
                                        __name__,
                                        kwargs['name'] + '_info.log',
                                        level=log_level)
        self._writer = tf.summary.FileWriter(self._log_dir)
        self._logger.info(kwargs)

        # Data preparation
        if self._data_kwargs.get('data_type') == 'npz':
            self._data = utils.load_dataset(**self._data_kwargs)
        elif self._data_kwargs.get('data_type') == 'csv':
            self._data = utils.load_dataset_from_csv(**self._data_kwargs)
        for k, v in self._data.items():
            if hasattr(v, 'shape'):
                self._logger.info((k, v.shape))

        # Build models.
        scaler = self._data['scaler']
        with tf.name_scope('Train'):
            with tf.variable_scope('DCRNN', reuse=False):
                self._train_model = DCRNNModel(
                    args=args,
                    is_training=True,
                    scaler=scaler,
                    batch_size=self._data_kwargs['batch_size'],
                    adj_mx=adj_mx,
                    **self._model_kwargs)

        with tf.name_scope('Test'):
            with tf.variable_scope('DCRNN', reuse=True):
                self._test_model = DCRNNModel(
                    args=args,
                    is_training=False,
                    scaler=scaler,
                    batch_size=self._data_kwargs['test_batch_size'],
                    adj_mx=adj_mx,
                    **self._model_kwargs)

        # Learning rate.
        self._lr = tf.get_variable('learning_rate',
                                   shape=(),
                                   initializer=tf.constant_initializer(0.01),
                                   trainable=False)
        self._new_lr = tf.placeholder(tf.float32,
                                      shape=(),
                                      name='new_learning_rate')
        self._lr_update = tf.assign(self._lr, self._new_lr, name='lr_update')

        # Configure optimizer
        optimizer_name = self._train_kwargs.get('optimizer', 'adam').lower()
        epsilon = float(self._train_kwargs.get('epsilon', 1e-3))
        optimizer = tf.train.AdamOptimizer(self._lr, epsilon=epsilon)
        if optimizer_name == 'sgd':
            optimizer = tf.train.GradientDescentOptimizer(self._lr, )
        elif optimizer_name == 'amsgrad':
            optimizer = AMSGrad(self._lr, epsilon=epsilon)

        # Calculate loss
        output_dim = self._model_kwargs.get('output_dim')
        preds = self._train_model.outputs
        labels = self._train_model.labels[..., :output_dim]

        null_val = 0.
        self._loss_fn = masked_mae_loss(scaler, null_val)
        self._mape_fn = masked_mape(scaler, null_val)
        self._rmse_fn = masked_rmse_loss(scaler, null_val)
        self._train_loss = self._loss_fn(preds=preds, labels=labels)

        tvars = tf.trainable_variables()
        grads = tf.gradients(self._train_loss, tvars)
        max_grad_norm = kwargs['train'].get('max_grad_norm', 1.)
        grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)
        global_step = tf.train.get_or_create_global_step()
        self._train_op = optimizer.apply_gradients(zip(grads, tvars),
                                                   global_step=global_step,
                                                   name='train_op')

        max_to_keep = self._train_kwargs.get('max_to_keep', 100)
        self._epoch = 0
        self._saver = tf.train.Saver(tf.global_variables(),
                                     max_to_keep=max_to_keep)

        # Log model statistics.
        total_trainable_parameter = utils.get_total_trainable_parameter_size()
        self._logger.info('Total number of trainable parameters: {:d}'.format(
            total_trainable_parameter))
        for var in tf.global_variables():
            self._logger.debug('{}, {}'.format(var.name, var.get_shape()))
示例#4
0
    def __init__(self, **kwargs):

        self._kwargs = kwargs
        self._data_kwargs = kwargs.get('data')
        self._model_kwargs = kwargs.get('model')
        self._train_kwargs = kwargs.get('train')

        # logging.
        self._log_dir = self._get_log_dir(kwargs)
        log_level = self._kwargs.get('log_level', 'INFO')
        self._logger = utils.get_logger(self._log_dir,
                                        __name__,
                                        'info.log',
                                        level=log_level)
        self._writer = tf.summary.FileWriter(self._log_dir)
        self._logger.info(kwargs)

        # Data preparation
        # load data set
        self._data = utils.load_dataset(**self._data_kwargs)
        # print(self._data.keys())
        # print(len(self._data))
        # print(self._data['x_train'].shape)
        # print(self._data['y_train'].shape)
        # print(self._data['x_val'].shape)
        # print(self._data['y_val'].shape)
        # print(self._data['x_test'].shape)
        # print(self._data['y_test'].shape)
        # exit()
        # (23974, 12, 207, 2)
        # (23974, 12, 207, 2)
        # (3425, 12, 207, 2)
        # (3425, 12, 207, 2)
        # (6850, 12, 207, 2)
        # (6850, 12, 207, 2)

        # import our node2vec data and replace
        # but how do we comply with the dimensions
        # we can plant the same into every time step, but what about the same dimension
        # exit()

        # I think we just need to attach our node to vector here
        for k, v in self._data.items():
            if hasattr(v, 'shape'):
                self._logger.info((k, v.shape))

        # Build models.
        scaler = self._data['scaler']
        # scaler is the mean and standard deviation pre-computed and stored
        # used to normalize data and de-normalize data
        # print(scaler)
        # exit()
        with tf.name_scope('Train'):
            # Batch size is a term used in machine learning and refers to the number of training examples utilised in one iteration.
            with tf.variable_scope('DCRNN', reuse=False):
                self._train_model = DCRNNModel(
                    is_training=True,
                    scaler=scaler,
                    batch_size=self._data_kwargs['batch_size'],
                    adj_matrix_file=self._data_kwargs['graph_pkl_filename'],
                    **self._model_kwargs)

        with tf.name_scope('Test'):
            with tf.variable_scope('DCRNN', reuse=True):
                self._test_model = DCRNNModel(
                    is_training=False,
                    scaler=scaler,
                    batch_size=self._data_kwargs['test_batch_size'],
                    adj_matrix_file=self._data_kwargs['graph_pkl_filename'],
                    **self._model_kwargs)

        # Learning rate.
        self._lr = tf.get_variable('learning_rate',
                                   shape=(),
                                   initializer=tf.constant_initializer(0.01),
                                   trainable=False)
        self._new_lr = tf.placeholder(tf.float32,
                                      shape=(),
                                      name='new_learning_rate')
        self._lr_update = tf.assign(self._lr, self._new_lr, name='lr_update')

        # Configure optimizer
        optimizer_name = self._train_kwargs.get('optimizer', 'adam').lower()
        epsilon = float(self._train_kwargs.get('epsilon', 1e-3))
        optimizer = tf.train.AdamOptimizer(self._lr, epsilon=epsilon)
        if optimizer_name == 'sgd':
            optimizer = tf.train.GradientDescentOptimizer(self._lr, )
        elif optimizer_name == 'amsgrad':
            optimizer = AMSGrad(self._lr, epsilon=epsilon)

        # Calculate loss
        output_dim = self._model_kwargs.get('output_dim')
        # preds is a placeholder of outputs, which is a stacked tensor
        preds = self._train_model.outputs
        # print(preds.eval())
        # exit()
        # You must feed a value for placeholder tensor
        labels = self._train_model.labels[..., :output_dim]

        null_val = 0.
        self._loss_fn = masked_mae_loss(scaler, null_val)
        self._train_loss = self._loss_fn(preds=preds, labels=labels)

        tvars = tf.trainable_variables()
        grads = tf.gradients(self._train_loss, tvars)
        max_grad_norm = kwargs['train'].get('max_grad_norm', 1.)
        grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)
        global_step = tf.train.get_or_create_global_step()
        self._train_op = optimizer.apply_gradients(zip(grads, tvars),
                                                   global_step=global_step,
                                                   name='train_op')
        # print(type(self._train_op))
        # exit()

        max_to_keep = self._train_kwargs.get('max_to_keep', 100)
        self._epoch = 0
        self._saver = tf.train.Saver(tf.global_variables(),
                                     max_to_keep=max_to_keep)

        # Log model statistics.
        total_trainable_parameter = utils.get_total_trainable_parameter_size()
        self._logger.info('Total number of trainable parameters: {:d}'.format(
            total_trainable_parameter))
        for var in tf.global_variables():
            self._logger.debug('{}, {}'.format(var.name, var.get_shape()))
示例#5
0
    def __init__(self, is_training=False, **kwargs):
        super(DCRNNSupervisor, self).__init__(**kwargs)

        self._data = utils.load_dataset_dcrnn(
            seq_len=self._model_kwargs.get('seq_len'),
            horizon=self._model_kwargs.get('horizon'),
            input_dim=self._model_kwargs.get('input_dim'),
            mon_ratio=self._mon_ratio,
            scaler_type=self._kwargs.get('scaler'),
            is_training=is_training,
            **self._data_kwargs)
        for k, v in self._data.items():
            if hasattr(v, 'shape'):
                self._logger.info((k, v.shape))

        # Build models.
        scaler = self._data['scaler']
        if is_training:
            self.model = DCRNNModel(scaler=scaler,
                                    batch_size=self._data_kwargs['batch_size'],
                                    adj_mx=self._data['adj_mx'],
                                    **self._model_kwargs)
        else:
            self.model = DCRNNModel(scaler=scaler,
                                    batch_size=1,
                                    adj_mx=self._data['adj_mx'],
                                    **self._model_kwargs)

        # Learning rate.
        self._lr = tf.get_variable('learning_rate',
                                   shape=(),
                                   initializer=tf.constant_initializer(0.01),
                                   trainable=False)
        self._new_lr = tf.placeholder(tf.float32,
                                      shape=(),
                                      name='new_learning_rate')
        self._lr_update = tf.assign(self._lr, self._new_lr, name='lr_update')

        # Configure optimizer
        optimizer_name = self._train_kwargs.get('optimizer', 'adam').lower()
        epsilon = float(self._train_kwargs.get('epsilon', 1e-3))
        optimizer = tf.train.AdamOptimizer(
            self._lr,
            epsilon=epsilon,
        )
        if optimizer_name == 'sgd':
            optimizer = tf.train.GradientDescentOptimizer(self._lr, )
        elif optimizer_name == 'amsgrad':
            optimizer = AMSGrad(self._lr, epsilon=epsilon)

        # Calculate loss
        output_dim = self._model_kwargs.get('output_dim')
        preds = self.model.outputs
        labels = self.model.labels[..., :output_dim]

        null_val = 0.
        self._loss_fn = masked_mse_loss(scaler, null_val)
        # self._loss_fn = masked_mae_loss(scaler, null_val)
        self._train_loss = self._loss_fn(preds=preds, labels=labels)

        tvars = tf.trainable_variables()
        grads = tf.gradients(self._train_loss, tvars)
        max_grad_norm = kwargs['train'].get('max_grad_norm', 1.)
        grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)
        global_step = tf.train.get_or_create_global_step()
        self._train_op = optimizer.apply_gradients(zip(grads, tvars),
                                                   global_step=global_step,
                                                   name='train_op')

        max_to_keep = self._train_kwargs.get('max_to_keep', 100)
        self._epoch = 0
        self._saver = tf.train.Saver(tf.global_variables(),
                                     max_to_keep=max_to_keep)

        # Log model statistics.
        total_trainable_parameter = utils.get_total_trainable_parameter_size()
        self._logger.info('Total number of trainable parameters: {:d}'.format(
            total_trainable_parameter))
        for var in tf.global_variables():
            self._logger.debug('{}, {}'.format(var.name, var.get_shape()))
示例#6
0
    def __init__(self, sess, adj_mx, dataloaders, kwargs):
        self._kwargs = kwargs
        self._data_kwargs = kwargs.get('data')
        self._model_kwargs = kwargs.get('model')
        self._train_kwargs = kwargs.get('train')
        self._paths_kwargs = kwargs.get('paths')
        self._save_tensors = kwargs.get('tf_config').get('save_tensors', False) \
            if kwargs.get('tf_config') else False
        self._trace = kwargs.get('tf_config').get('trace', False) \
            if kwargs.get('tf_config') else False
        self._save_graph = kwargs.get('tf_config').get('save_graph', False) \
            if kwargs.get('tf_config') else False

        self._log_dir = self._get_log_dir(kwargs)

        self._writer = tf.summary.FileWriter(self._log_dir, sess.graph) \
            if self._save_graph else tf.summary.FileWriter(self._log_dir)

        # Data preparation
        self._data = dataloaders

        # for k, v in self._data.items():
        #     if hasattr(v, 'shape'):
        #         self._kwargs.logger.info((k, v.shape))

        # Build models.
        scaler = self._data['scaler']
        with tf.name_scope('Train'):
            with tf.variable_scope('DCRNN', reuse=False):
                train_batch_size = dataloaders['train_loader'].batch_size
                self._train_model = DCRNNModel(is_training=True,
                                               scaler=scaler,
                                               batch_size=train_batch_size,
                                               adj_mx=adj_mx,
                                               **self._model_kwargs)

        with tf.name_scope('Val'):
            with tf.variable_scope('DCRNN', reuse=True):
                val_batch_size = dataloaders['val_loader'].batch_size
                self._val_model = DCRNNModel(is_training=False,
                                             scaler=scaler,
                                             batch_size=val_batch_size,
                                             adj_mx=adj_mx,
                                             **self._model_kwargs)

        with tf.name_scope('Test'):
            with tf.variable_scope('DCRNN', reuse=True):
                test_batch_size = dataloaders['test_loader'].batch_size
                self._test_model = DCRNNModel(is_training=False,
                                              scaler=scaler,
                                              batch_size=test_batch_size,
                                              adj_mx=adj_mx,
                                              **self._model_kwargs)

        # Learning rate.
        self._lr = tf.get_variable('learning_rate',
                                   shape=(),
                                   initializer=tf.constant_initializer(0.01),
                                   trainable=False)
        self._new_lr = tf.placeholder(tf.float32,
                                      shape=(),
                                      name='new_learning_rate')
        self._lr_update = tf.assign(self._lr, self._new_lr, name='lr_update')

        # Configure optimizer
        optimizer_name = self._train_kwargs.get('optimizer', 'adam').lower()
        epsilon = float(self._train_kwargs.get('epsilon', 1e-3))
        optimizer = tf.train.AdamOptimizer(self._lr, epsilon=epsilon)
        if optimizer_name == 'sgd':
            optimizer = tf.train.GradientDescentOptimizer(self._lr, )
        elif optimizer_name == 'amsgrad':
            optimizer = AMSGrad(self._lr, epsilon=epsilon)

        # Calculate loss
        output_dim = self._model_kwargs.get('output_dim')
        preds = self._train_model.outputs
        labels = self._train_model.labels[..., :output_dim]

        null_val = 0. if kwargs['model'].get('exclude_zeros_in_metric',
                                             True) else np.nan

        loss_func_dict = {
            'mae': masked_mae_loss(scaler, null_val),
            'rmse': masked_rmse_loss(scaler, null_val),
            'mse': masked_mse_loss(scaler, null_val)
        }
        self._loss_fn = loss_func_dict.get(kwargs['train'].get(
            'loss_func', 'mae'))
        self._metric_fn = loss_func_dict.get(kwargs['train'].get(
            'metric_func', 'mae'))

        self._train_loss = self._loss_fn(preds=preds, labels=labels)

        tvars = tf.trainable_variables()
        grads = tf.gradients(self._train_loss, tvars)
        max_grad_norm = kwargs['train'].get('max_grad_norm', 1.)
        grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)

        self._train_op = optimizer.apply_gradients(
            zip(grads, tvars),
            global_step=tf.train.get_or_create_global_step(),
            name='train_op')

        max_to_keep = self._train_kwargs.get('max_to_keep', 100)
        self._saver = tf.train.Saver(tf.global_variables(),
                                     max_to_keep=max_to_keep)

        # load model
        model_filename = self._paths_kwargs.get('model_filename')
        if model_filename is not None:
            self._saver.restore(sess, model_filename)
            self._kwargs.logger.info(
                'Pretrained model was loaded from : {}'.format(model_filename))
        else:
            sess.run(tf.global_variables_initializer())

        # Log model statistics.
        total_trainable_parameter = utils.get_total_trainable_parameter_size()
        self._kwargs.logger.info('Total number of trainable parameters: {:d}'.\
                          format(total_trainable_parameter))
        for var in tf.global_variables():
            self._kwargs.logger.debug('{}, {}'.format(var.name,
                                                      var.get_shape()))
示例#7
0
    def __init__(self, kwargs):

        self._kwargs = kwargs
        self._data_kwargs = kwargs.get('data')
        self._model_kwargs = kwargs.get('model')
        self._train_kwargs = kwargs.get('train')

        # logging.
        self._log_dir = self._get_log_dir(kwargs)
        log_level = self._kwargs.get('log_level', 'INFO')
        self._logger = utils.get_logger(self._log_dir,
                                        __name__,
                                        'info.log',
                                        level=log_level)
        self._writer = tf.summary.FileWriter(self._log_dir)
        self._logger.info(kwargs)

        # Data preparation
        self.batch_size = self._data_kwargs.get('batch_size')
        self.val_batch_size = self._data_kwargs['val_batch_size']
        self.test_batch_size = 1
        self.horizon = self._model_kwargs.get('horizon')
        self.seq_len = self._model_kwargs.get('seq_len')

        self.validation_ratio = self._data_kwargs['validation_ratio']
        self.test_ratio = self._data_kwargs['test_ratio']

        sensor_filename = self._data_kwargs['sensor_filename']
        self.sensor_df = pd.read_csv(sensor_filename)
        distance_filename = self._data_kwargs['distance_filename']
        self.dist_df = pd.read_csv(distance_filename)
        partition_filename = self._data_kwargs['partition_filename']
        self.partition = np.genfromtxt(partition_filename,
                                       dtype=int,
                                       delimiter="\n",
                                       unpack=False)
        self.clusters = np.unique(self.partition)
        self.max_node = max(np.bincount(self.partition))
        self._model_kwargs['num_nodes'] = self.max_node

        # Build models.
        #scaler = self._data['scaler']
        with tf.name_scope('Train'):
            with tf.variable_scope('DCRNN', reuse=False):
                self._train_model = DCRNNModel(
                    is_training=True,
                    batch_size=self._data_kwargs['batch_size'],
                    **self._model_kwargs)

        with tf.name_scope('Test'):
            with tf.variable_scope('DCRNN', reuse=True):
                self._test_model = DCRNNModel(
                    is_training=False,
                    batch_size=self._data_kwargs['test_batch_size'],
                    **self._model_kwargs)

        # Learning rate.
        self._lr = tf.get_variable('learning_rate',
                                   shape=(),
                                   initializer=tf.constant_initializer(0.01),
                                   trainable=False)
        self._new_lr = tf.placeholder(tf.float32,
                                      shape=(),
                                      name='new_learning_rate')
        self._lr_update = tf.assign(self._lr, self._new_lr, name='lr_update')

        # Configure optimizer
        optimizer_name = self._train_kwargs.get('optimizer', 'adam').lower()
        epsilon = float(self._train_kwargs.get('epsilon', 1e-3))
        optimizer = tf.train.AdamOptimizer(self._lr, epsilon=epsilon)
        if optimizer_name == 'sgd':
            optimizer = tf.train.GradientDescentOptimizer(self._lr, )

        # Calculate loss
        output_dim = self._model_kwargs.get('output_dim')
        preds = self._train_model.outputs
        labels = self._train_model.labels[..., :output_dim]

        self.preds_test = self._test_model.outputs
        self.labels_test = self._test_model.labels[..., :output_dim]

        null_val = 0.
        self._loss_fn = masked_mae_loss(null_val)
        # self._loss_fn = masked_mae_loss(scaler, null_val)
        self._train_loss = self._loss_fn(preds=preds, labels=labels)
        #print('output labels', labels.shape)
        self._test_loss = self._loss_fn(preds=self.preds_test,
                                        labels=self.labels_test)

        tvars = tf.trainable_variables()
        grads = tf.gradients(self._train_loss, tvars)
        max_grad_norm = kwargs['train'].get('max_grad_norm', 1.)
        grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)
        global_step = tf.train.get_or_create_global_step()
        self._train_op = optimizer.apply_gradients(zip(grads, tvars),
                                                   global_step=global_step,
                                                   name='train_op')

        max_to_keep = self._train_kwargs.get('max_to_keep', 100)
        self._epoch = 0
        self._saver = tf.train.Saver(tf.global_variables(),
                                     max_to_keep=max_to_keep)

        # Log model statistics.
        total_trainable_parameter = utils.get_total_trainable_parameter_size()
        self._logger.info('Total number of trainable parameters: {:d}'.format(
            total_trainable_parameter))
        for var in tf.global_variables():
            self._logger.debug('{}, {}'.format(var.name, var.get_shape()))
    def __init__(self, **kwargs):

        self._kwargs = kwargs
        self._data_kwargs = kwargs.get('data')
        self._model_kwargs = kwargs.get('model')
        self._train_kwargs = kwargs.get('train')
        self._test_kwargs = kwargs.get('test')
        # logging.
        self._log_dir = _get_log_dir(kwargs)
        log_level = self._kwargs.get('log_level', 'INFO')
        self._logger = utils.get_logger(self._log_dir,
                                        __name__,
                                        'info.log',
                                        level=log_level)
        self._writer = tf.summary.FileWriter(self._log_dir)
        self._logger.info(kwargs)

        self._mon_ratio = float(self._kwargs.get('mon_ratio'))

        # Model's args
        self._seq_len = int(self._model_kwargs.get('seq_len'))
        self._horizon = int(self._model_kwargs.get('horizon'))
        self._input_dim = int(self._model_kwargs.get('input_dim'))
        self._nodes = int(self._model_kwargs.get('num_nodes'))
        self._r = int(self._model_kwargs.get('r'))

        # Test's args
        self._flow_selection = self._test_kwargs.get('flow_selection')
        self._run_times = self._test_kwargs.get('run_times')

        self._lamda = []
        self._lamda.append(self._test_kwargs.get('lamda_0'))
        self._lamda.append(self._test_kwargs.get('lamda_1'))
        self._lamda.append(self._test_kwargs.get('lamda_2'))

        # Data preparation
        self._day_size = self._data_kwargs.get('day_size')
        self.data = utils.load_dataset_dcrnn_fwbw(
            seq_len=self._model_kwargs.get('seq_len'),
            horizon=self._model_kwargs.get('horizon'),
            input_dim=self._model_kwargs.get('input_dim'),
            mon_ratio=self._mon_ratio,
            **self._data_kwargs)
        for k, v in self.data.items():
            if hasattr(v, 'shape'):
                self._logger.info((k, v.shape))

        # Build models.
        scaler = self.data['scaler']
        with tf.name_scope('Train'):
            with tf.variable_scope('DCRNN', reuse=False):
                self.train_model = DCRNNModel(
                    is_training=True,
                    scaler=scaler,
                    batch_size=self._data_kwargs['batch_size'],
                    adj_mx=self.data['adj_mx'],
                    **self._model_kwargs)

        with tf.name_scope('Val'):
            with tf.variable_scope('DCRNN', reuse=True):
                self.val_model = DCRNNModel(
                    is_training=False,
                    scaler=scaler,
                    batch_size=self._data_kwargs['val_batch_size'],
                    adj_mx=self.data['adj_mx'],
                    **self._model_kwargs)

        with tf.name_scope('Eval'):
            with tf.variable_scope('DCRNN', reuse=True):
                self.eval_model = DCRNNModel(
                    is_training=False,
                    scaler=scaler,
                    batch_size=self._data_kwargs['eval_batch_size'],
                    adj_mx=self.data['adj_mx'],
                    **self._model_kwargs)

        with tf.name_scope('Test'):
            with tf.variable_scope('DCRNN', reuse=True):
                self.test_model = DCRNNModel(
                    is_training=False,
                    scaler=scaler,
                    batch_size=self._data_kwargs['test_batch_size'],
                    adj_mx=self.data['adj_mx'],
                    **self._model_kwargs)

        # Learning rate.
        self._lr = tf.get_variable('learning_rate',
                                   shape=(),
                                   initializer=tf.constant_initializer(0.01),
                                   trainable=False)
        self._new_lr = tf.placeholder(tf.float32,
                                      shape=(),
                                      name='new_learning_rate')

        self._lr_update = tf.assign(self._lr, self._new_lr, name='lr_update')

        # Configure optimizer
        optimizer_name = self._train_kwargs.get('optimizer', 'adam').lower()
        epsilon = float(self._train_kwargs.get('epsilon', 1e-3))
        optimizer = tf.train.AdamOptimizer(
            self._lr,
            epsilon=epsilon,
        )
        if optimizer_name == 'sgd':
            optimizer = tf.train.GradientDescentOptimizer(self._lr, )
        elif optimizer_name == 'amsgrad':
            optimizer = AMSGrad(self._lr, epsilon=epsilon)

        # Calculate loss
        output_dim = self._model_kwargs.get('output_dim')
        # fw decoder
        preds_fw = self.train_model.outputs_fw
        labels_fw = self.train_model.labels_fw[..., :output_dim]

        # bw encoder
        enc_preds_bw = self.train_model.enc_outputs_bw
        enc_labels_bw = self.train_model.enc_labels_bw[..., :output_dim]

        null_val = 0.
        self._loss_fn = masked_mse_loss(scaler, null_val)
        self._train_loss_dec = self._loss_fn(preds=preds_fw, labels=labels_fw)

        # backward loss
        self._train_loss_enc_bw = self._loss_fn(preds=enc_preds_bw,
                                                labels=enc_labels_bw)

        self._train_loss = self._train_loss_dec + self._train_loss_enc_bw

        tvars = tf.trainable_variables()
        grads = tf.gradients(self._train_loss, tvars)
        max_grad_norm = kwargs['train'].get('max_grad_norm', 1.)
        grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)
        global_step = tf.train.get_or_create_global_step()
        self._train_op = optimizer.apply_gradients(zip(grads, tvars),
                                                   global_step=global_step,
                                                   name='train_op')

        max_to_keep = self._train_kwargs.get('max_to_keep', 100)
        self._epoch = 0
        self._saver = tf.train.Saver(tf.global_variables(),
                                     max_to_keep=max_to_keep)

        # Log model statistics.
        total_trainable_parameter = utils.get_total_trainable_parameter_size()
        self._logger.info('Total number of trainable parameters: {:d}'.format(
            total_trainable_parameter))
        for var in tf.global_variables():
            self._logger.debug('{}, {}'.format(var.name, var.get_shape()))
示例#9
0
    def __init__(self, adj_mx, **kwargs):

        self._kwargs = kwargs
        self._data_kwargs = kwargs.get('data')
        self._model_kwargs = kwargs.get('model')
        self._train_kwargs = kwargs.get('train')

        # logging.
        self._log_dir = self._get_log_dir(kwargs)
        log_level = self._kwargs.get('log_level', 'INFO')
        self._logger = utils.get_logger(self._log_dir,
                                        __name__,
                                        'info.log',
                                        level=log_level)
        self._writer = tf.summary.FileWriter(self._log_dir)
        self._logger.info(kwargs)

        # Data preparation
        self._data = utils.load_dataset(
            **self._data_kwargs)  # return 3 DataLoaders and 1 scaler
        for k, v in self._data.items():
            if hasattr(v, 'shape'):
                self._logger.info((k, v.shape))

        # Build models.
        scaler = self._data['scaler']
        with tf.name_scope(
                'Train'
        ):  # reuse、variable_scope讲解:https://www.jianshu.com/p/ab0d38725f88
            with tf.variable_scope(
                    'DCRNN', reuse=False):  # reuse==False的含义: 该作用域下创建的变量不会重用
                self._train_model = DCRNNModel(
                    is_training=True,
                    scaler=scaler,
                    batch_size=self._data_kwargs['batch_size'],
                    adj_mx=adj_mx,
                    **self._model_kwargs)

        with tf.name_scope('Test'):
            with tf.variable_scope('DCRNN', reuse=True):  # 测试时创建的变量可以重用
                self._test_model = DCRNNModel(
                    is_training=False,
                    scaler=scaler,
                    batch_size=self._data_kwargs['test_batch_size'],
                    adj_mx=adj_mx,
                    **self._model_kwargs)

        # Learning rate.
        self._lr = tf.get_variable('learning_rate',
                                   shape=(),
                                   initializer=tf.constant_initializer(0.01),
                                   trainable=False)
        self._new_lr = tf.placeholder(tf.float32,
                                      shape=(),
                                      name='new_learning_rate')
        self._lr_update = tf.assign(self._lr, self._new_lr, name='lr_update')

        # Configure optimizer
        optimizer_name = self._train_kwargs.get('optimizer',
                                                'adam').lower()  # 默认是'adam'
        epsilon = float(self._train_kwargs.get('epsilon', 1e-3))
        optimizer = tf.train.AdamOptimizer(self._lr, epsilon=epsilon)
        if optimizer_name == 'sgd':
            optimizer = tf.train.GradientDescentOptimizer(self._lr, )
        elif optimizer_name == 'amsgrad':
            optimizer = AMSGrad(self._lr, epsilon=epsilon)

        # Calculate loss
        output_dim = self._model_kwargs.get(
            'output_dim')  # output_dim在配置文件里写的1,指只预测speed这一个特征
        preds = self._train_model.outputs
        labels = self._train_model.labels[..., :output_dim]

        null_val = 0.
        self._loss_fn = masked_mae_loss(scaler, null_val)
        self._train_loss = self._loss_fn(preds=preds, labels=labels)

        tvars = tf.trainable_variables()
        grads = tf.gradients(self._train_loss, tvars)
        max_grad_norm = kwargs['train'].get('max_grad_norm', 1.)
        grads, _ = tf.clip_by_global_norm(
            grads, max_grad_norm
        )  # 在一次迭代更新中,所有权重的梯度的平方和在一个设定范围以内,这个范围就是clip_gradient.
        global_step = tf.train.get_or_create_global_step()
        self._train_op = optimizer.apply_gradients(zip(grads, tvars),
                                                   global_step=global_step,
                                                   name='train_op')

        max_to_keep = self._train_kwargs.get('max_to_keep', 100)
        self._epoch = 0
        self._saver = tf.train.Saver(
            tf.global_variables(),
            max_to_keep=max_to_keep)  # Saver将保存最近的max_to_keep个模型

        # Log model statistics.
        total_trainable_parameter = utils.get_total_trainable_parameter_size()
        self._logger.info('Total number of trainable parameters: {:d}'.format(
            total_trainable_parameter))
        for var in tf.global_variables():
            self._logger.debug('{}, {}'.format(var.name, var.get_shape()))
    def __init__(self, **kwargs):

        self._kwargs = kwargs
        self._data_kwargs = kwargs.get('data')
        self._train_kwargs = kwargs.get('train')
        self._test_kwargs = kwargs.get('test')
        self._model_kwargs = kwargs.get('model')

        self._alg_name = self._kwargs.get('alg')

        # logging.
        self._log_dir = self._get_log_dir(kwargs)
        log_level = self._kwargs.get('log_level', 'INFO')
        self._logger = utils.get_logger(self._log_dir,
                                        __name__,
                                        'info.log',
                                        level=log_level)
        self._writer = tf.summary.FileWriter(self._log_dir)
        self._logger.info(kwargs)

        # Data's args
        self._day_size = self._data_kwargs.get('day_size')

        # Model's Args
        self._rnn_units = self._model_kwargs.get('rnn_units')
        self._seq_len = self._model_kwargs.get('seq_len')
        self._horizon = self._model_kwargs.get('horizon')
        self._input_dim = self._model_kwargs.get('input_dim')
        self._input_shape = (self._seq_len, self._input_dim)
        self._output_dim = self._model_kwargs.get('output_dim')
        self._nodes = self._model_kwargs.get('num_nodes')
        self._n_rnn_layers = self._model_kwargs.get('n_rnn_layers')

        # Train's args
        self._drop_out = self._train_kwargs.get('dropout')
        self._epochs = self._train_kwargs.get('epochs')
        self._batch_size = self._data_kwargs.get('batch_size')

        # Test's args
        self._run_times = self._test_kwargs.get('run_times')
        self._flow_selection = self._test_kwargs.get('flow_selection')
        self._test_size = self._test_kwargs.get('test_size')
        self._results_path = self._test_kwargs.get('results_path')

        self._mon_ratio = self._kwargs.get('mon_ratio')

        # Load data
        self._data = utils.load_dataset_lstm(seq_len=self._seq_len,
                                             horizon=self._horizon,
                                             input_dim=self._input_dim,
                                             mon_ratio=self._mon_ratio,
                                             test_size=self._test_size,
                                             **self._data_kwargs)
        for k, v in self._data.items():
            if hasattr(v, 'shape'):
                self._logger.info((k, v.shape))

        scaler = self._data['scaler']

        # Model
        with tf.name_scope('Train'):
            with tf.variable_scope('LSTM', reuse=False):
                self._train_model = EncoderDecoderLSTM(
                    is_training=True,
                    scaler=self._data['scaler'],
                    batch_size=self._batch_size,
                    **self._model_kwargs)
        with tf.name_scope('Val'):
            with tf.variable_scope('LSTM', reuse=True):
                self._val_model = EncoderDecoderLSTM(
                    is_training=True,
                    scaler=self._data['scaler'],
                    batch_size=self._data_kwargs['val_batch_size'],
                    **self._model_kwargs)
        with tf.name_scope('Eval'):
            with tf.variable_scope('LSTM', reuse=True):
                self._eval_model = EncoderDecoderLSTM(
                    is_training=True,
                    scaler=self._data['scaler'],
                    batch_size=self._data_kwargs['eval_batch_size'],
                    **self._model_kwargs)
        with tf.name_scope('Test'):
            with tf.variable_scope('LSTM', reuse=True):
                self._test_model = EncoderDecoderLSTM(
                    is_training=True,
                    scaler=self._data['scaler'],
                    batch_size=self._data_kwargs['test_batch_size'],
                    **self._model_kwargs)

        # Learning rate
        self._lr = tf.get_variable('learning_rate',
                                   shape=(),
                                   initializer=tf.constant_initializer(0.01),
                                   trainable=False)
        self._new_lr = tf.placeholder(tf.float32,
                                      shape=(),
                                      name='new_learning_rate')
        self._lr_update = tf.assign(self._lr, self._new_lr, name='lr_update')

        # Configure optimizer
        optimizer_name = self._train_kwargs.get('optimizer', 'adam').lower()
        epsilon = float(self._train_kwargs.get('epsilon', 1e-3))
        optimizer = tf.train.AdamOptimizer(
            self._lr,
            epsilon=epsilon,
        )
        if optimizer_name == 'sgd':
            optimizer = tf.train.GradientDescentOptimizer(self._lr, )
        elif optimizer_name == 'amsgrad':
            optimizer = AMSGrad(self._lr, epsilon=epsilon)

        # Calculate loss
        output_dim = self._model_kwargs.get('output_dim')
        preds = self._train_model.outputs
        labels = self._train_model.labels[..., :output_dim]

        null_val = 0.
        self._loss_fn = masked_mse_loss(scaler, null_val)
        # self._loss_fn = masked_mae_loss(scaler, null_val)
        self._train_loss = self._loss_fn(preds=preds, labels=labels)

        tvars = tf.trainable_variables()
        grads = tf.gradients(self._train_loss, tvars)
        max_grad_norm = kwargs['train'].get('max_grad_norm', 1.)
        grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)
        global_step = tf.train.get_or_create_global_step()
        self._train_op = optimizer.apply_gradients(zip(grads, tvars),
                                                   global_step=global_step,
                                                   name='train_op')

        max_to_keep = self._train_kwargs.get('max_to_keep', 100)
        self._epoch = 0
        self._saver = tf.train.Saver(tf.global_variables(),
                                     max_to_keep=max_to_keep)

        # Log model statistics.
        total_trainable_parameter = utils.get_total_trainable_parameter_size()
        self._logger.info('Total number of trainable parameters: {:d}'.format(
            total_trainable_parameter))
        for var in tf.global_variables():
            self._logger.debug('{}, {}'.format(var.name, var.get_shape()))