예제 #1
0
    def loss_lsf(self, try_alpha0, user_msmt_train=0):
        '''Return error train and AR(q) weights from training LSF, and tuning
        gradient descent hyperparameters, alpha_0, based min total loss.

        Parameters:
        ----------
            try_alpha0 (`float64`) : Hyper-parameter in gradient descent in LSF.
            user_msmt_train (`float64`) : Specifies training data for LSF:
                If user_msmt_train != 0:
                    Gradient descent in LSF will use one data set for all training.
                If user_msmt_train == 0:
                    A new noise realisation and msmt data is generated for each
                    new choice of try_alpha0 i.e. loss_lsf == Bayes Risk.
                Defaults to 0.

        Returns:
        -------
            errorTrain (`float64`): Residuals at each iteratiion step of gradient
                descent in LSF.
            weights (`float64`): Weights learned from training data (`q` weights for
                an approximation to an autoregressive process AR(q)).
        '''

        if np.sum(user_msmt_train) != 0:
            measurements_train = user_msmt_train

        elif np.sum(user_msmt_train) == 0:
            measurements_train = get_data(self.dataobject)[0]

        training_data = sp.build_training_dataset(
            measurements_train,
            past_msmts=self.LSF_past_msmts,
            steps_forward=self.LSF_steps_forward,
            steps_between_msmts=self.LSF_steps_between_msmts)

        weights, errorTrain = sp.gradient_descent(training_data,
                                                  self.LSF_err_train_iter,
                                                  alpha_coeff=try_alpha0)

        # Based on the last value of err train (not gradient estimate of error train)
        lossval = errorTrain[-1]

        if np.isfinite(lossval):
            return lossval, errorTrain, weights[:, 0]
        print("Lossval not finite")
        return None, errorTrain, weights[:, 0]
예제 #2
0
    def train_alpha0_manual(self, arr_alphas):
        '''Return lowest loss alpha_0 from arr_alphas.

        Parameters:
        ----------

            arr_alphas (`float64`): Set of candidate alpha values.BaseException

        Returns:
        -------
            arr_alphas[index[0]] (`float64`) : Optimally tuned (lowest loss) alpha value.
            index (`float64`) : Index (in original array) corressping to optimal alpha.
            lossvalTrain (`float64`) : Representive loss value for one run of LSF
                with a given alpha.
            errTrains (`float64`) : Residual error for each iterative step within
                one run of LSF, for all alphas.
            weightTrain (`float64`) : AR(q) weights discovered by  LSF for all values of alpha.
        '''

        iter_ = arr_alphas.shape[0]
        lossvalTrain = np.zeros(iter_)
        errTrains = np.zeros((iter_, self.LSF_err_train_iter))
        weightTrain = np.zeros((iter_, self.LSF_past_msmts))

        # use only one dataset for training
        measurements_train = get_data(self.dataobject)[0]

        for idx in xrange(iter_):
            lossvalTrain[idx], errTrains[idx, :], weightTrain[
                idx, :] = self.loss_lsf(arr_alphas[idx],
                                        user_msmt_train=measurements_train)

        # pick alpha in arr_alpha with the lowest loss
        # pairs = zip(arr_alphas, lossvalTrain)
        # alpha_srtd, loss_srtd = zip(*sorted(pairs, key=(lambda x: x[1])))

        index, losses = sort_my_vals(lossvalTrain)
        return arr_alphas[
            index[0]], index, lossvalTrain, errTrains, weightTrain
예제 #3
0
    def make_LS_Ensemble_data(self, pick_alpha0, savetopath, num_of_iterGD=50):
        '''
        Saves LSF predictions analysis as an .npz file. 

        Parameters:
        ----------
             pick_alpha0 (`float64`) : Hyper-parameter for gradient descent tuning.
             savetopath (`str`) : Filepath for saving LSF analysis output as a .npz file.
             num_of_iterGD (`int`, optional) : Number of iterations of gradient descent in LSF.
                Defaults to 50.

        Returns:
        -------
            Saves LSF predictions analysis as an .npz file.
        '''

        n_train = self.dataobject.Expt.n_train
        n_start_at = n_train - self.LSF_past_msmts + 1

        # measurements_train, pick_train = get_data(self.dataobject) # implementation in DRAFT 1, DATA_v0/testingLSF/

        macro_weights = []
        macro_predictions = []
        macro_actuals = []
        macro_errorTrain_fore = [
        ]  # changes by steps forwards, not a risk avergae.
        macro_truths = []
        macro_data = []

        for idx_en in xrange(self.LSF_ensembl_size):

            # desired implementation in DATA v0
            measurements_train, pick_train = get_data(self.dataobject)

            measurements_val, pick_val = get_data(self.dataobject)
            shape = self.dataobject.LKFFB_macro_truth.shape
            noisetrace_val = self.dataobject.LKFFB_macro_truth.reshape(
                shape[0] * shape[1], shape[2])[pick_val, :]

            output = doLSF_forecast(
                measurements_train,
                measurements_val,
                pick_alpha0,
                n_start_at,
                self.LSF_steps_forward,
                self.LSF_past_msmts,
                steps_between_msmts=self.LSF_steps_between_msmts,
                num_of_iterGD=num_of_iterGD)

            macro_weights.append(output[1])
            macro_predictions.append(output[2])
            macro_actuals.append(output[3])
            macro_errorTrain_fore.append(output[4])
            macro_truths.append(noisetrace_val)
            macro_data.append(measurements_val)
            # Save after each run
            np.savez(
                savetopath + 'test_case_' + str(self.test_case) + '_var_' +
                str(self.variation) + '_LS_Ensemble',
                n_start_at=n_start_at,
                past_msmts=self.LSF_past_msmts,
                n_train=
                n_train,  ################################## n_train not used
                n_predict=self.LSF_steps_forward,
                test_case=self.test_case,
                var=self.variation,
                pick_alpha=pick_alpha0,
                macro_weights=macro_weights,
                macro_predictions=macro_predictions,
                macro_actuals=macro_actuals,
                macro_truths=macro_truths,
                macro_data=macro_data,
                # pick_train=pick_train,
                # measurements_train=measurements_train,
                macro_errorTrain_fore=macro_errorTrain_fore)