示例#1
0
    def evaluate(self, val_x, val_y, test_x, test_y):
        """
        Evaluates the trained model with validation and test

        Overrides parent function

        :param val_x:
        :param val_y:
        :param test_x:
        :param test_y:
        :return:
        """
        batch_size = self.config['training']['batch']

        if self.runconfig.best:
            self.model = load_model(self.modfile)
        val_yp = self.model.predict(val_x, batch_size=batch_size, verbose=0)
        test_yp = self.model.predict(test_x, batch_size=batch_size, verbose=0)

        # Maintained to be compatible with old configuration files
        if type(self.config['data']['ahead'])==list:
            iahead = self.config['data']['ahead'][0]
            ahead = (self.config['data']['ahead'][1] - self.config['data']['ahead'][0]) + 1
        else:
            iahead = 1
            ahead = self.config['data']['ahead']

        lresults = []

        for i, p in zip(range(1, ahead + 1), range(iahead, self.config['data']['ahead'][1]+1)):
            lresults.append([p]  + ErrorMeasure().compute_errors(val_y[:, i - 1],
                                                               val_yp[:, i - 1],
                                                               test_y[:, i - 1],
                                                               test_yp[:, i - 1]))
        return lresults
示例#2
0
    def evaluate(self,
                 val_x,
                 val_y,
                 test_x,
                 test_y,
                 scaler=None,
                 save_errors=None):
        """
        Evaluates the training
        :param save_errors:
        :return:
        """

        alpha = self.config['arch']['alpha']
        if len(alpha) < val_y.shape[1]:
            alpha.extend([alpha[-1]] * (val_y.shape[1] - len(alpha)))

        if type(self.config['data']['ahead']) == list:
            ahead = self.config['data']['ahead'][1]
        else:
            ahead = self.config['data']['ahead']

        lresults = []
        for a, i in zip(alpha, range(1, ahead + 1)):
            lresults.append([i] + ErrorMeasure().compute_errors(
                (val_x[:, -1] * a) + ((1 - a) * np.mean(val_x, axis=1)),
                val_y[:, i - 1], (test_x[:, -1] * a) +
                ((1 - a) * np.mean(test_x, axis=1)),
                test_y[:, i - 1],
                scaler=scaler))

        return lresults
示例#3
0
    def evaluate(self, val_x, val_y, test_x, test_y, scaler=None, save_errors=None):
        """
        Evaluates a trained model, loads the best if it is configured to do so
        Computes the R² for validation and test

        :param save_errors:
        :param val_x:
        :param val_y:
        :param test_x:
        :param test_y:
        :return:
        """
        batch_size = self.config['training']['batch']

        if self.runconfig.best:
            self.model = load_model(self.modfile)

        val_yp = self.model.predict(val_x, batch_size=batch_size, verbose=0)
        test_yp = self.model.predict(test_x, batch_size=batch_size, verbose=0)

        if save_errors is not None:
            f = h5py.File(f'errors{self.modname}-S{self.config["data"]["datanames"][0]}{save_errors}.hdf5', 'w')
            dgroup = f.create_group('errors')
            dgroup.create_dataset('val_y', val_y.shape, dtype='f', data=val_y, compression='gzip')
            dgroup.create_dataset('val_yp', val_yp.shape, dtype='f', data=val_yp, compression='gzip')
            dgroup.create_dataset('test_y', test_y.shape, dtype='f', data=test_y, compression='gzip')
            dgroup.create_dataset('test_yp', test_yp.shape, dtype='f', data=test_y, compression='gzip')
            if scaler is not None:
                # Unidimensional vectors
                dgroup.create_dataset('val_yu', val_y.shape, dtype='f', data=scaler.inverse_transform(val_y.reshape(-1, 1)), compression='gzip')
                dgroup.create_dataset('val_ypu', val_yp.shape, dtype='f', data=scaler.inverse_transform(val_yp.reshape(-1, 1)), compression='gzip')
                dgroup.create_dataset('test_yu', test_y.shape, dtype='f', data=scaler.inverse_transform(test_y.reshape(-1, 1)), compression='gzip')
                dgroup.create_dataset('test_ypu', test_yp.shape, dtype='f', data=scaler.inverse_transform(test_yp.reshape(-1, 1)), compression='gzip')

        return ErrorMeasure().compute_errors(val_y, val_yp, test_y, test_yp, scaler=scaler)
示例#4
0
    def evaluate(self, val_x, val_y, test_x, test_y, scaler=None):
        """
        Evaluates the trained model with validation and test

        Overrides parent function

        :param val_x:
        :param val_y:
        :param test_x:
        :param test_y:
        :return:
        """

        val_yp = self.model.predict(val_x)
        test_yp = self.model.predict(test_x)

        # Maintained to be compatible with old configuration files
        if type(self.config['data']['ahead']) == list:
            iahead = self.config['data']['ahead'][0]
            ahead = (self.config['data']['ahead'][1] -
                     self.config['data']['ahead'][0]) + 1
        else:
            iahead = 1
            ahead = self.config['data']['ahead']

        lresults = []
        for i, p in zip(range(1, ahead + 1),
                        range(iahead, self.config['data']['ahead'][1] + 1)):
            lresults.append([p] + ErrorMeasure().compute_errors(
                val_y[:, i - 1], val_yp[:, i - 1], test_y[:, i - 1],
                test_yp[:, i - 1], scaler))

        return lresults
示例#5
0
    def evaluate(self, val_x, val_y, test_x, test_y):
        """
        Evaluates the training
        :return:
        """
        val_yp = self.model.predict(val_x)
        test_yp = self.model.predict(test_x)

        return ErrorMeasure().compute_errors(val_y, val_yp, test_y, test_yp)
    def evaluate(self, val_x, val_y, test_x, test_y, scaler=None, save_errors=None):
        """
        Evaluates the trained model with validation and test

        Overrides parent function

        :param save_errors:
        :param val_x:
        :param val_y:
        :param test_x:
        :param test_y:
        :return:
        """
        batch_size = self.config['training']['batch']

        if self.runconfig.best:
            self.model = load_model(self.modfile, custom_objects={"AttentionDecoder": AttentionDecoder})
            # self.model = load_model(self.modfile)
        val_yp = self.model.predict(val_x, batch_size=batch_size, verbose=0)
        test_yp = self.model.predict(test_x, batch_size=batch_size, verbose=0)

        # Maintained to be compatible with old configuration files
        if type(self.config['data']['ahead'])==list:
            iahead = self.config['data']['ahead'][0]
            ahead = (self.config['data']['ahead'][1] - self.config['data']['ahead'][0]) + 1
        else:
            iahead = 1
            ahead = self.config['data']['ahead']

        if 'aggregate' in self.config['data'] and 'y' in self.config['data']['aggregate']:
            step = self.config['data']['aggregate']['y']['step']
            ahead //= step

        if save_errors is not None:
            f = h5py.File(f'errors{self.modname}-S{self.config["data"]["datanames"][0]}{save_errors}.hdf5', 'w')
            dgroup = f.create_group('errors')
            dgroup.create_dataset('val_y', val_y.shape, dtype='f', data=val_y, compression='gzip')
            dgroup.create_dataset('val_yp', val_yp.shape, dtype='f', data=val_yp, compression='gzip')
            dgroup.create_dataset('test_y', test_y.shape, dtype='f', data=test_y, compression='gzip')
            dgroup.create_dataset('test_yp', test_yp.shape, dtype='f', data=test_y, compression='gzip')
            if scaler is not None:
                # n-dimensional vectors
                dgroup.create_dataset('val_yu', val_y.shape, dtype='f', data=scaler.inverse_transform(val_y), compression='gzip')
                dgroup.create_dataset('val_ypu', val_yp.shape, dtype='f', data=scaler.inverse_transform(val_yp), compression='gzip')
                dgroup.create_dataset('test_yu', test_y.shape, dtype='f', data=scaler.inverse_transform(test_y), compression='gzip')
                dgroup.create_dataset('test_ypu', test_yp.shape, dtype='f', data=scaler.inverse_transform(test_yp), compression='gzip')

        lresults = []
        for i, p in zip(range(1, ahead + 1), range(iahead, self.config['data']['ahead'][1]+1)):
            lresults.append([p]  + ErrorMeasure().compute_errors(val_y[:, i - 1],
                                                                val_yp[:, i - 1],
                                                                test_y[:, i - 1],
                                                                test_yp[:, i - 1],
                                                                scaler=scaler))
        return lresults
示例#7
0
    def log_result(self, result):
        """
        logs a result from the model (basic results)

        :param result:
        :return:
        """
        if self.runconfig.info:
            self.summary()

        if not 'iter' in self.config['training']:
            nres = len(result)
        else:
            nres = len(result) // self.config['training']['iter']

        ErrorMeasure().print_errors(self.config['arch']['mode'], nres, result)
示例#8
0
    def evaluate(self, val_x, val_y, test_x, test_y, scaler=None):
        """
        Evaluates the training
        :return:
        """

        if type(self.config['data']['ahead']) == list:
            ahead = self.config['data']['ahead'][1]
        else:
            ahead = self.config['data']['ahead']

        lresults = []
        for i in range(1, ahead + 1):
            lresults.append([i] + ErrorMeasure().compute_errors(
                val_x[:, -1], val_y[:, i - 1], test_x[:, -1], test_y[:,
                                                                     i - 1]))
        return lresults
示例#9
0
    def evaluate(self, val_x, val_y, test_x, test_y):
        """
        Evaluates a trained model, loads the best if it is configured to do so
        Computes the R² for validation and test

        :param val_x:
        :param val_y:
        :param test_x:
        :param test_y:
        :return:
        """
        batch_size = self.config['training']['batch']

        if self.runconfig.best:
            self.model = load_model(self.modfile)

        val_yp = self.model.predict(val_x, batch_size=batch_size, verbose=0)
        test_yp = self.model.predict(test_x, batch_size=batch_size, verbose=0)
        return ErrorMeasure().compute_errors(val_y, val_yp, test_y, test_yp)
示例#10
0
    def evaluate(self, val_x, val_y, test_x, test_y, scaler=None, save_errors=None):
        """
        Evaluates the training
        :param save_errors:
        :return:
        """
        val_yp = self.model.predict(val_x)
        test_yp = self.model.predict(test_x)

        if save_errors is not None:
            f = h5py.File(f'errors{self.modname}-S{self.config["data"]["datanames"][0]}{save_errors}.hdf5', 'w')
            dgroup = f.create_group('errors')
            dgroup.create_dataset('val_y', val_y.shape, dtype='f', data=val_y, compression='gzip')
            dgroup.create_dataset('val_yp', val_yp.shape, dtype='f', data=val_yp, compression='gzip')
            dgroup.create_dataset('test_y', test_y.shape, dtype='f', data=test_y, compression='gzip')
            dgroup.create_dataset('test_yp', test_yp.shape, dtype='f', data=test_y, compression='gzip')
            if scaler is not None:
                # Unidimensional vectors
                dgroup.create_dataset('val_yu', val_y.shape, dtype='f', data=scaler.inverse_transform(val_y.reshape(-1, 1)), compression='gzip')
                dgroup.create_dataset('val_ypu', val_yp.shape, dtype='f', data=scaler.inverse_transform(val_yp.reshape(-1, 1)), compression='gzip')
                dgroup.create_dataset('test_yu', test_y.shape, dtype='f', data=scaler.inverse_transform(test_y.reshape(-1, 1)), compression='gzip')
                dgroup.create_dataset('test_ypu', test_yp.shape, dtype='f', data=scaler.inverse_transform(test_yp.reshape(-1, 1)), compression='gzip')

        return ErrorMeasure().compute_errors(val_y, val_yp, test_y, test_yp)
示例#11
0
    def evaluate(self,
                 val_x,
                 val_y,
                 test_x,
                 test_y,
                 scaler=None,
                 save_errors=None):
        """
        Evaluates the training
        :param save_errors:
        :return:
        """
        if type(self.config['data']['ahead']) == list:
            ahead = self.config['data']['ahead'][1]
        else:
            ahead = self.config['data']['ahead']
        print('shapes', val_x.shape, val_y.shape, test_x.shape, test_y.shape)
        val_yp = np.tile(val_x[:, 17], (12, 1)).transpose()
        test_yp = np.tile(test_x[:, 17], (12, 1)).transpose()

        lresults = []
        for i in range(1, ahead + 1):
            lresults.append([i] + ErrorMeasure().compute_errors(
                val_x[:, -1], val_y[:, i - 1], test_x[:, -1], test_y[:,
                                                                     i - 1]))
        if save_errors is not None:
            f = h5py.File(
                f'errors{self.modname}-S{self.config["data"]["datanames"][0]}{save_errors}.hdf5',
                'w')
            dgroup = f.create_group('errors')
            dgroup.create_dataset('val_y',
                                  val_y.shape,
                                  dtype='f',
                                  data=val_y,
                                  compression='gzip')
            dgroup.create_dataset('val_yp',
                                  val_yp.shape,
                                  dtype='f',
                                  data=val_yp,
                                  compression='gzip')
            dgroup.create_dataset('test_y',
                                  test_y.shape,
                                  dtype='f',
                                  data=test_y,
                                  compression='gzip')
            dgroup.create_dataset('test_yp',
                                  test_yp.shape,
                                  dtype='f',
                                  data=test_y,
                                  compression='gzip')
            if scaler is not None:
                # Unidimensional vectors
                dgroup.create_dataset('val_yu',
                                      val_y.shape,
                                      dtype='f',
                                      data=scaler.inverse_transform(
                                          val_y.reshape(-1, 1)),
                                      compression='gzip')
                dgroup.create_dataset('val_ypu',
                                      val_yp.shape,
                                      dtype='f',
                                      data=scaler.inverse_transform(
                                          val_yp.reshape(-1, 1)),
                                      compression='gzip')
                dgroup.create_dataset('test_yu',
                                      test_y.shape,
                                      dtype='f',
                                      data=scaler.inverse_transform(
                                          test_y.reshape(-1, 1)),
                                      compression='gzip')
                dgroup.create_dataset('test_ypu',
                                      test_yp.shape,
                                      dtype='f',
                                      data=scaler.inverse_transform(
                                          test_yp.reshape(-1, 1)),
                                      compression='gzip')

        return lresults
示例#12
0
def train_gradient_boosting_sequence2sequence(architecture, config, runconfig):
    """
    Training process for sequence 2 sequence architectures

    Mutihorizon MIMO/DIRJOINT strategy plus gradient boosting

    Generates a sequence of models that train over the difference of the previous prediction

    :param architecture:
    :param config:
    :param runconfig:
    :return:
    """

    ahead = config['data']['ahead'] if (type(
        config['data']['ahead']) == list) else [1, config['data']['ahead']]

    if 'iter' in config['training']:
        niter = config['training']['iter']
    else:
        niter = 1

    if type(ahead) == list:
        odimensions = ahead[1] - ahead[0] + 1
    else:
        odimensions = ahead

    # Dataset
    dataset = Dataset(config=config['data'], data_path=wind_data_path)
    dataset.generate_dataset(ahead=ahead,
                             mode=architecture.data_mode,
                             remote=runconfig.remote)
    train_x, train_y, val_x, val_y, test_x, test_y = dataset.get_data_matrices(
    )

    if type(train_x) != list:
        config['idimensions'] = train_x.shape[1:]
    else:
        config['idimensions'] = [d.shape[1:] for d in train_x]
    config['odimensions'] = odimensions

    lresults = []
    for iter in range(niter):

        arch = architecture(config, runconfig)

        if runconfig.multi == 1:
            arch.generate_model()
        else:
            with tf.device('/cpu:0'):
                arch.generate_model()

        if runconfig.verbose:
            arch.summary()
            arch.plot()
            dataset.summary()
            print()

        ############################################
        # Training

        # Training using gradient boosting (kindof)
        # Generate a bunch of models
        boost_train_pred = []
        boost_val_pred = []
        boost_test_pred = []
        n_train_y = train_y
        n_val_y = val_y
        n_test_y = test_y
        alpha = config['arch']['alpha']
        decay = config['arch']['decay']
        for nm in range(config['arch']['nmodels']):
            arch.train(train_x, n_train_y, val_x, n_val_y)

            # Prediction of the current model
            boost_train_pred.append(arch.predict(train_x))
            boost_val_pred.append(arch.predict(val_x))
            boost_test_pred.append(arch.predict(test_x))

            # Compute the prediction of the combination of models
            # Prediction of the first model
            boost_train_predict_y = boost_train_pred[0]
            boost_val_predict_y = boost_val_pred[0]
            boost_test_predict_y = boost_test_pred[0]
            for m in range(1, len(boost_train_pred)):
                boost_train_predict_y += (alpha * boost_train_pred[m])
                boost_val_predict_y += (alpha * boost_val_pred[m])
                boost_test_predict_y += (alpha * boost_test_pred[m])

            # Residual of the prediction for the next step
            n_train_y = train_y - boost_train_predict_y
            n_val_y = val_y - boost_val_predict_y
            # print(ErrorMeasure().compute_errors(val_y[:, 0], boost_val_predict_y[:, 0], test_y[:, 0], boost_test_predict_y[:, 0]))
            alpha *= decay

            # Reset the model
            arch = architecture(config, runconfig)
            if runconfig.multi == 1:
                arch.generate_model()
            else:
                with tf.device('/cpu:0'):
                    arch.generate_model()
            # For now the model is not saved
            arch.save(f'-{ahead[0]}-{ahead[1]}-R{nm}')

        ############################################
        # Results

        # Maintained to be compatible with old configuration files
        if type(config['data']['ahead']) == list:
            iahead = config['data']['ahead'][0]
            ahead = (config['data']['ahead'][1] -
                     config['data']['ahead'][0]) + 1
        else:
            iahead = 1
            ahead = config['data']['ahead']

        itresults = []

        for i, p in zip(range(1, ahead + 1),
                        range(iahead, config['data']['ahead'][1] + 1)):

            if 'descale' not in config['training'] or config['training'][
                    'descale']:
                itresults.append([p] + ErrorMeasure().compute_errors(
                    val_y[:, i - 1],
                    boost_val_predict_y[:, i - 1],
                    test_y[:, i - 1],
                    boost_test_predict_y[:, i - 1],
                    scaler=dataset.scaler))
            else:
                itresults.append([p] + ErrorMeasure().compute_errors(
                    val_y[:, i - 1], boost_val_predict_y[:, i - 1],
                    test_y[:, i - 1], boost_test_predict_y[:, i - 1]))

        lresults.extend(itresults)

        print(strftime('%Y-%m-%d %H:%M:%S'))

    arch.log_result(lresults)

    return lresults
示例#13
0
    def evaluate(self,
                 val_x,
                 val_y,
                 test_x,
                 test_y,
                 scaler=None,
                 save_errors=None):
        """
        Evaluates the trained model with validation and test

        Overrides parent function

        :param save_errors:
        :param val_x:
        :param val_y:
        :param test_x:
        :param test_y:
        :return:
        """

        val_yp = self.model.predict(val_x)
        test_yp = self.model.predict(test_x)

        # Maintained to be compatible with old configuration files
        if type(self.config['data']['ahead']) == list:
            iahead = self.config['data']['ahead'][0]
            ahead = (self.config['data']['ahead'][1] -
                     self.config['data']['ahead'][0]) + 1
        else:
            iahead = 1
            ahead = self.config['data']['ahead']

        if save_errors is not None:
            f = h5py.File(
                f'errors{self.modname}-S{self.config["data"]["datanames"][0]}{save_errors}.hdf5',
                'w')
            dgroup = f.create_group('errors')
            dgroup.create_dataset('val_y',
                                  val_y.shape,
                                  dtype='f',
                                  data=val_y,
                                  compression='gzip')
            dgroup.create_dataset('val_yp',
                                  val_yp.shape,
                                  dtype='f',
                                  data=val_yp,
                                  compression='gzip')
            dgroup.create_dataset('test_y',
                                  test_y.shape,
                                  dtype='f',
                                  data=test_y,
                                  compression='gzip')
            dgroup.create_dataset('test_yp',
                                  test_yp.shape,
                                  dtype='f',
                                  data=test_y,
                                  compression='gzip')
            if scaler is not None:
                # n-dimensional vectors
                dgroup.create_dataset('val_yu',
                                      val_y.shape,
                                      dtype='f',
                                      data=scaler.inverse_transform(val_y),
                                      compression='gzip')
                dgroup.create_dataset('val_ypu',
                                      val_yp.shape,
                                      dtype='f',
                                      data=scaler.inverse_transform(val_yp),
                                      compression='gzip')
                dgroup.create_dataset('test_yu',
                                      test_y.shape,
                                      dtype='f',
                                      data=scaler.inverse_transform(test_y),
                                      compression='gzip')
                dgroup.create_dataset('test_ypu',
                                      test_yp.shape,
                                      dtype='f',
                                      data=scaler.inverse_transform(test_yp),
                                      compression='gzip')

        lresults = []
        for i, p in zip(range(1, ahead + 1),
                        range(iahead, self.config['data']['ahead'][1] + 1)):
            lresults.append([p] + ErrorMeasure().compute_errors(
                val_y[:, i - 1], val_yp[:, i - 1], test_y[:, i - 1],
                test_yp[:, i - 1], scaler))

        return lresults
    def evaluate(self,
                 val_x,
                 val_y,
                 test_x,
                 test_y,
                 scaler=None,
                 save_errors=None):
        """
        The evaluation for this architecture is iterative, for each step a new time in the future is predicted
        using the results of the previous steps, the result is appended for the next step

        :param save_errors:
        :param val_x:
        :param val_y:
        :param test_x:
        :param test_y:
        :return:
        """
        batch_size = self.config['training']['batch']

        if self.runconfig.best:
            self.model = load_model(self.modfile)

        # if type(self.config['data']['ahead']) == list:
        #     ahead = self.config['data']['ahead'][1]
        # else:
        #     ahead = self.config['data']['ahead']

        # Maintained to be compatible with old configuration files
        if type(self.config['data']['ahead']) == list:
            iahead = self.config['data']['ahead'][0]
            ahead = (self.config['data']['ahead'][1] -
                     self.config['data']['ahead'][0]) + 1
        else:
            iahead = 1
            ahead = self.config['data']['ahead']

        # The input for the first step is the last column of the training (t-1)
        val_x_tfi = val_x[0][:, -1, 0]
        val_x_tfi = val_x_tfi.reshape(val_x_tfi.shape[0], 1, 1)
        test_x_tfi = test_x[0][:, -1, 0]
        test_x_tfi = test_x_tfi.reshape(test_x_tfi.shape[0], 1, 1)

        # Copy the first slice (time step 1)
        val_x_tf = val_x_tfi.copy()
        test_x_tf = test_x_tfi.copy()

        lresults = []
        for i in range(1, ahead + 1):
            val_yp = self.model.predict([val_x[0], val_x_tf],
                                        batch_size=batch_size,
                                        verbose=0)
            test_yp = self.model.predict([test_x[0], test_x_tf],
                                         batch_size=batch_size,
                                         verbose=0)

            val_x_tf = np.concatenate((val_x_tfi, val_yp), axis=1)
            test_x_tf = np.concatenate((test_x_tfi, test_yp), axis=1)

        if save_errors is not None:
            f = h5py.File(
                f'errors{self.modname}-S{self.config["data"]["datanames"][0]}{save_errors}.hdf5',
                'w')
            dgroup = f.create_group('errors')
            dgroup.create_dataset('val_y',
                                  val_y.shape,
                                  dtype='f',
                                  data=val_y,
                                  compression='gzip')
            dgroup.create_dataset('val_yp',
                                  val_yp.shape,
                                  dtype='f',
                                  data=val_yp,
                                  compression='gzip')
            dgroup.create_dataset('test_y',
                                  test_y.shape,
                                  dtype='f',
                                  data=test_y,
                                  compression='gzip')
            dgroup.create_dataset('test_yp',
                                  test_yp.shape,
                                  dtype='f',
                                  data=test_y,
                                  compression='gzip')
            if scaler is not None:
                # n-dimensional vectors
                dgroup.create_dataset('val_yu',
                                      val_y.shape,
                                      dtype='f',
                                      data=scaler.inverse_transform(val_y),
                                      compression='gzip')
                dgroup.create_dataset('val_ypu',
                                      val_yp.shape,
                                      dtype='f',
                                      data=scaler.inverse_transform(val_yp),
                                      compression='gzip')
                dgroup.create_dataset('test_yu',
                                      test_y.shape,
                                      dtype='f',
                                      data=scaler.inverse_transform(test_y),
                                      compression='gzip')
                dgroup.create_dataset('test_ypu',
                                      test_yp.shape,
                                      dtype='f',
                                      data=scaler.inverse_transform(test_yp),
                                      compression='gzip')

        # After the loop we have all the predictions for the ahead range
        for i, p in zip(range(1, ahead + 1),
                        range(iahead, self.config['data']['ahead'][1] + 1)):
            lresults.append([p] + ErrorMeasure().compute_errors(
                val_y[:, i - 1], val_yp[:, i - 1], test_y[:, i -
                                                          1], test_yp[:,
                                                                      i - 1]))

        return lresults
示例#15
0
    def evaluate(self, val_x, val_y, test_x, test_y):
        """
        The evaluation for this architecture is iterative, for each step a new time in the future is predicted
        using the results of the previous steps, the result is appended for the next step

        :param val_x:
        :param val_y:
        :param test_x:
        :param test_y:
        :return:
        """
        batch_size = self.config['training']['batch']

        if self.runconfig.best:
            self.model = load_model(self.modfile)

        # if type(self.config['data']['ahead']) == list:
        #     ahead = self.config['data']['ahead'][1]
        # else:
        #     ahead = self.config['data']['ahead']

        # Maintained to be compatible with old configuration files
        if type(self.config['data']['ahead']) == list:
            iahead = self.config['data']['ahead'][0]
            ahead = (self.config['data']['ahead'][1] -
                     self.config['data']['ahead'][0]) + 1
        else:
            iahead = 1
            ahead = self.config['data']['ahead']

        # The input for the first step is the last column of the training (t-1)
        val_x_tfi = val_x[0][:, -1, 0]
        val_x_tfi = val_x_tfi.reshape(val_x_tfi.shape[0], 1, 1)
        test_x_tfi = test_x[0][:, -1, 0]
        test_x_tfi = test_x_tfi.reshape(test_x_tfi.shape[0], 1, 1)

        # Copy the first slice (time step 1)
        val_x_tf = val_x_tfi.copy()
        test_x_tf = test_x_tfi.copy()

        lresults = []
        for i in range(1, ahead + 1):
            val_yp = self.model.predict([val_x[0], val_x_tf],
                                        batch_size=batch_size,
                                        verbose=0)
            test_yp = self.model.predict([test_x[0], test_x_tf],
                                         batch_size=batch_size,
                                         verbose=0)

            val_x_tf = np.concatenate((val_x_tfi, val_yp), axis=1)
            test_x_tf = np.concatenate((test_x_tfi, test_yp), axis=1)

        # After the loop we have all the predictions for the ahead range

        for i, p in zip(range(1, ahead + 1),
                        range(iahead, self.config['data']['ahead'][1] + 1)):
            lresults.append([p] + ErrorMeasure().compute_errors(
                val_y[:, i - 1], val_yp[:, i - 1], test_y[:, i -
                                                          1], test_yp[:,
                                                                      i - 1]))
        # for i in range(1, ahead + 1):
        #     lresults.append((i,
        #                      r2_score(val_y[:, i - 1], val_yp[:, i - 1]),
        #                      r2_score(test_y[:, i - 1], test_yp[:, i - 1])
        #                      ))

        return lresults