Exemplo n.º 1
0
    def test(self):
        scaler = self._data['scaler']
        data_test = self._data['test_data_norm']
        T = len(data_test)
        # K = data_test.shape[1]
        # bm = utils.binary_matrix(self._verified_percentage, len(data_test), self._nodes)
        l = self._seq_len
        h = self._horizon

        data = np.zeros(shape=(T - h, self._input_dim), dtype='float32')
        data[:l, :] = data_test[:l, :]

        _data = np.zeros(shape=(T - h, self._input_dim), dtype='float32')
        _data[:l, :] = data_test[:l, :]
        iterator = tqdm(range(0, T - l - h, h))

        for i in iterator:
            if i + l + h > T - h:
                # trimm all zero lines
                data = data[~np.all(data == 0, axis=1)]
                _data = _data[~np.all(_data == 0, axis=1)]
                iterator.close()
                break
            input = np.zeros(shape=(1, l, self._input_dim))
            input[0, :, :] = data[i:i + l, :].copy()
            yhats = self._predict(input)
            # print(yhats)

            _data[i + l:i + l + h] = yhats
            # print("-------------------")
            # print(i)
            # print("preds: ", yhats)
            # update y
            _gt = data_test[i + l:i + l + h].copy()
            data[i + l:i + l + h] = _gt

    # save bm and pd to log dir
        predicted_data = scaler.inverse_transform(_data)[:, -1]
        ground_truth = scaler.inverse_transform(data_test[:_data.shape[0]])[:,
                                                                            -1]
        np.save(self._log_dir + 'pd', predicted_data)
        np.save(self._log_dir + 'gt', ground_truth)
        # save metrics to log dir
        error_list = utils.cal_error(ground_truth.flatten()[l:],
                                     predicted_data.flatten()[l:],
                                     self._log_dir, self._alg_name)

        utils.save_metrics(error_list, self._log_dir, self._alg_name)
        return predicted_data, ground_truth, error_list
Exemplo n.º 2
0
    def _test(self):
        scaler = self._data['scaler']
        data_test = self._data['test_data_norm']
        weather_data = data_test[:, 0:4].copy()
        pm_data = data_test[:, 4:].copy()
        T = len(data_test)
        l = self._seq_len
        h = self._horizon
        bm = utils.binary_matrix(self._verified_percentage, len(data_test), data_test.shape[1])
        pd = np.zeros(shape=(T - h, self._output_dim), dtype='float32')
        pd[:l] = pm_data[:l]
        _pd = np.zeros(shape=(T - h, self._output_dim), dtype='float32')
        _pd[:l] = pm_data[:l]
        iterator = tqdm(range(0, T - l - h, h))
        for i in iterator:
            if i+l+h > T-h:
                # trimm all zero lines
                pd = pd[~np.all(pd==0, axis=1)]
                _pd = _pd[~np.all(_pd==0, axis=1)]
                iterator.close()
                break
            input = np.zeros(shape=(self._test_batch_size, l, self._input_dim))
            input[0, :, 0:4] = weather_data[i:i + l]
            input[0, :, 4:] = pm_data[i:i + l]
            yhats = self._predict(input)
            _pd[i + l:i + l + h] = yhats

            # update y
            _gt = data_test[i + l:i + l + h, -h].copy()
            _bm = bm[i + l:i + l + h, -h].copy()
            pd[i + l:i + l + h] = yhats * (1.0 - _bm) + _gt * _bm
        # trim row to make weather data shape equal _pd shape
        """ No Inverse Transform
            predicted_data = _pd
            ground_truth = data_test[:predicted_data.shape[0], -self._output_dim:]
        """
        
        weather_data_trim = np.delete(weather_data, (_pd.shape[0]-weather_data.shape[0]), axis=0)
        inverse_pred_data = scaler.inverse_transform(np.concatenate((weather_data_trim,_pd), axis=1))
        predicted_data = inverse_pred_data[:,-self._output_dim:]
        inverse_actual_data = scaler.inverse_transform(data_test[:predicted_data.shape[0]])
        ground_truth = inverse_actual_data[:, -self._output_dim:]
        np.save(self._log_dir+'pd', predicted_data)
        np.save(self._log_dir+'gt', ground_truth)
        # save metrics to log dir
        error_list = utils.cal_error(ground_truth.flatten(), predicted_data.flatten())
        utils.save_metrics(error_list, self._log_dir, self._alg_name)
    def _test(self):
        scaler = self._data['scaler']
        data_test = self._data['test_data_norm']
        T = len(data_test)
        K = data_test.shape[1]
        bm = utils.binary_matrix(self._verified_percentage, len(data_test),
                                 self._nodes)
        l = self._seq_len
        h = self._horizon
        data = np.zeros(shape=(T - h, self._input_dim), dtype='float32')
        data[:l, :] = data_test[:l, :]

        _data = np.zeros(shape=(T - h, self._input_dim), dtype='float32')
        _data[:l, :] = data_test[:l, :]
        iterator = tqdm(range(0, T - l - h, h))
        for i in iterator:
            if i + l + h > T - h:
                # trimm all zero lines
                data = data[~np.all(data == 0, axis=1)]
                _data = _data[~np.all(_data == 0, axis=1)]
                iterator.close()
                break

            input = np.zeros(shape=(1, l, self._input_dim))
            input[0, :, :] = data[i:i + l, :].copy()
            yhats = self._predict_full_model(input)
            yhats = np.squeeze(yhats, axis=-1)
            print(_data[i + l:i + l + h, -1].shape)
            _data[i + l:i + l + h, -1] = yhats
            # update y
            _gt = data_test[i + l:i + l + h].copy()
            data[i + l:i + l + h] = _gt
        # save pd to log dir
        # np.savez(self._log_dir + "binary_matrix_and_pd", pd=pd)
        predicted_data = scaler.inverse_transform(_data)[:, -1]
        ground_truth = scaler.inverse_transform(data_test[:_data.shape[0]])[:,
                                                                            -1]

        np.save(self._log_dir + 'pd', predicted_data)
        np.save(self._log_dir + 'gt', ground_truth)
        # save metrics to log dir
        error_list = utils.cal_error(ground_truth.flatten(),
                                     predicted_data.flatten())
        utils.save_metrics(error_list, self._log_dir, self._alg_name)
 def _test(self):
     scaler = self._data['scaler']
     data_test = self._data['test_data_norm']
     T = len(data_test)
     K = data_test.shape[1]
     bm = utils.binary_matrix(self._verified_percentage, len(data_test),
                              self._nodes)
     l = self._seq_len
     h = self._horizon
     pd = np.zeros(shape=(T - h, self._nodes), dtype='float32')
     pd[:l] = data_test[:l]
     _pd = np.zeros(shape=(T - h, self._nodes), dtype='float32')
     _pd[:l] = data_test[:l]
     iterator = tqdm(range(0, T - l - h, h))
     for i in iterator:
         if i + l + h > T - h:
             # trimm all zero lines
             pd = pd[~np.all(pd == 0, axis=1)]
             _pd = _pd[~np.all(_pd == 0, axis=1)]
             iterator.close()
             break
         for k in range(K):
             input = np.zeros(shape=(1, l, self._input_dim))
             # input_dim = 2
             input[0, :, 0] = pd[i:i + l, k]
             # input[0, :, 1] = bm[i:i + l, k]
             yhats = self._predict(input)
             yhats = np.squeeze(yhats, axis=-1)
             _pd[i + l:i + l + h, k] = yhats
             # update y
             _bm = bm[i + l:i + l + h, k].copy()
             _gt = data_test[i + l:i + l + h, k].copy()
             pd[i + l:i + l + h, k] = yhats * (1.0 - _bm) + _gt * _bm
     # save bm and pd to log dir
     np.savez(self._log_dir + "binary_matrix_and_pd", bm=bm, pd=pd)
     predicted_data = scaler.inverse_transform(_pd)
     ground_truth = scaler.inverse_transform(data_test[:_pd.shape[0]])
     np.save(self._log_dir + 'pd', predicted_data)
     np.save(self._log_dir + 'gt', ground_truth)
     # save metrics to log dir
     error_list = utils.cal_error(ground_truth.flatten(),
                                  predicted_data.flatten())
     utils.save_metrics(error_list, self._log_dir, self._alg_name)
Exemplo n.º 5
0
    def _predict(self):
        bm = utils.binary_matrix(self._verified_percentage,
                                 self._data.shape[0], self._nodes)
        predictions = list()
        gt = []
        l = self._seq_len
        h = self._horizon

        # run predict for 29 nodes
        for column_load_area in range(self._nodes):
            data = self._data[:, column_load_area]
            size = int(len(data) * self._test_size)
            train, test = data[0:size], data[size:]
            history = [x for x in train]
            history = history[-l:]
            for t in range(0, len(test) - h, h):
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore")
                    # Only use l time-steps as inputs
                    model = auto_arima(np.array(history[-l:]),
                                       error_action='ignore')
                    yhat = model.predict(n_periods=h)
                    predictions.append(yhat)
                    gt.append(test[t:t + h])
                    for i in range(h):
                        if bm[(t + size + i), column_load_area] == 1:
                            # Update the data if verified == True
                            history.append(test[t + i])
                        else:
                            # Otherwise use the predicted data
                            history.append(yhat[i])
        predictions = np.stack(predictions, axis=0)
        gt = np.stack(gt, axis=0)

        # save metrics to log
        error_list = utils.cal_error(gt.flatten(), predictions.flatten())
        utils.save_metrics(error_list, self._log_dir, self._alg_name)