Esempio n. 1
0
    def train(self, X_, Y_):
        print('training ... ')
        self.weights = np.dot(np.linalg.pinv(X_), Y_)
        Y = np.dot(X_, self.weights)
        rmse = np.sqrt(sklmse(Y_, Y))

        return Y, rmse
Esempio n. 2
0
    def valid(self, trainX, trainY):
        H = self.activCalc(trainX)
        output = numpy.dot(H, self.W)
        assert numpy.shape(output) == (numpy.shape(trainX)[0], self.outdim)
        rmse = numpy.sqrt(sklmse(output, trainY))

        return output, rmse
Esempio n. 3
0
File: gbr.py Progetto: a-jd/npsn
    def train_model(self, params):
        '''
        Input a dict, params, containing:
            loss_type: String, 'ls', 'lad', or 'huber'
            learning_rate: Float, ~0.1
            n_estimators: Int, boosting stages, ~100
            criterion: String, split quality, 'friedman_mse', 'mse', 'mae'
            max_depth: Int, depth of regressors, ~3
            max_features: String, method, 'auto', 'sqrt', 'log2'
        Returns:
            Dict containing info on combination
        '''
        loss_type = params['loss']
        learning_rate = params['learning_rate']
        n_estimators = int(params['n_estimators'])
        criterion = params['criterion']
        max_depth = int(params['max_depth'])
        max_features = params['max_features']

        model = MOR(
            skGBR(loss=loss_type,
                  learning_rate=learning_rate,
                  n_estimators=n_estimators,
                  criterion=criterion,
                  max_depth=max_depth,
                  max_features=max_features))

        # Print current combination
        print('Current GBR combination: {}'.format(params))

        # Flat versions of y (power/flux distribution)
        y_tr_fl, y_te_fl = self.flat_y()

        # Fit
        model.fit(self.x_train, y_tr_fl)

        # Hyperopt loss for each combination
        y_predict = model.predict(self.x_test)
        hyp_loss = sklmse(y_te_fl, y_predict)
        self.tr_hist.update_history(params, hyp_loss, model)

        return {'loss': hyp_loss, 'status': STATUS_OK}
Esempio n. 4
0
File: svr.py Progetto: a-jd/npsn
    def train_model(self, params):
        '''
        Input a dict, params, containing:
            nu: Float, fraction of support vectors (0,1]
            C: Float, penalty parameter of error (~1.0)
            kernel: String, 'linear', 'poly', 'rbf', sigmoid'
            degree: Int, degree of polynomial for poly
            gamma: String, 'scale'/'auto' for 'rbf', 'poly', 'sigmoid'
        Returns:
            Dict containing info on combination
        '''
        kernel = params['kernel']
        nu = params['nu']
        C = params['C']

        # Instantiate SVR
        if kernel in ['linear']:
            model = MOR(NuSVR(C=C, nu=nu, kernel=kernel))
        elif kernel in ['rbf', 'sigmoid']:
            gamma = params['gamma']
            model = MOR(NuSVR(C=C, nu=nu, kernel=kernel, gamma=gamma))
        elif kernel in ['poly']:
            gamma = params['gamma']
            degree = params['degree']
            model = MOR(
                NuSVR(C=C, nu=nu, kernel=kernel, degree=degree, gamma=gamma))

        # Print current combination
        print('Current SVR combination: {}'.format(params))

        # Flat versions of y (power/flux distribution)
        y_tr_fl, y_te_fl = self.flat_y()

        # Fit
        model.fit(self.x_train, y_tr_fl)

        # Hyperopt loss for each combination
        y_predict = model.predict(self.x_test)
        hyp_loss = sklmse(y_te_fl, y_predict)
        self.tr_hist.update_history(params, hyp_loss, model)

        return {'loss': hyp_loss, 'status': STATUS_OK}
Esempio n. 5
0
    def train_model(self, params):
        '''
        Input a dict, params, containing:
            kernel: String, Covariance function
                    {Linear, Exponential, Matern52,
                     Linear+Exponential, Linear*Exponential,
                     Linear+Matern52, Linear*Matern52,
                     Exponential+Matern52, Exponential*Matern52,
                     Dim_Linear+Exponential, Dim_Linear*Exponential,
                     Dim_Linear+Matern52, Dim_Linear*Matern52,
                     Dim_Exponential+Matern52, Dim_Exponential*Matern52,
                     }
            nipts: Integer, number of inducing points (kernel definition)
        Returns:
            Dict containing info on combination
        '''
        kernel = params['kernel']
        nipts = params['nipts']

        # Flat versions of y (power/flux distribution)
        y_tr_fl, y_te_fl = self.flat_y()

        # Define kernel used
        y_flshp = np.prod(self.y_shape)
        if kernel == 'Linear':
            ker = kc_si([k_lin() for _ in range(y_flshp)])
        elif kernel == 'Exponential':
            ker = kc_si([k_exp() for _ in range(y_flshp)])
        elif kernel == 'Matern52':
            ker = kc_si([k_m52() for _ in range(y_flshp)])
        elif kernel == 'Linear+Exponential':
            kl = [k_lin() + k_exp() for _ in range(y_flshp)]
            ker = kc_si(kl)
        elif kernel == 'Linear*Exponential':
            kl = [k_lin() * k_exp() for _ in range(y_flshp)]
            ker = kc_si(kl)
        elif kernel == 'Dim_Linear+Exponential':
            kl = [
                k_lin(active_dims=[0]) + k_exp(active_dims=[1])
                for _ in range(y_flshp)
            ]
            ker = kc_si(kl)
        elif kernel == 'Dim_Linear*Exponential':
            kl = [
                k_lin(active_dims=[0]) * k_exp(active_dims=[1])
                for _ in range(y_flshp)
            ]
            ker = kc_si(kl)
        elif kernel == 'Linear+Matern52':
            kl = [k_lin() + k_m52() for _ in range(y_flshp)]
            ker = kc_si(kl)
        elif kernel == 'Linear*Matern52':
            kl = [k_lin() * k_m52() for _ in range(y_flshp)]
            ker = kc_si(kl)
        elif kernel == 'Dim_Linear+Matern52':
            kl = [
                k_lin(active_dims=[0]) + k_m52(active_dims=[1])
                for _ in range(y_flshp)
            ]
            ker = kc_si(kl)
        elif kernel == 'Dim_Linear*Matern52':
            kl = [
                k_lin(active_dims=[0]) * k_m52(active_dims=[1])
                for _ in range(y_flshp)
            ]
            ker = kc_si(kl)
        elif kernel == 'Exponential+Matern52':
            kl = [k_exp() + k_m52() for _ in range(y_flshp)]
            ker = kc_si(kl)
        elif kernel == 'Exponential*Matern52':
            kl = [k_exp() * k_m52() for _ in range(y_flshp)]
            ker = kc_si(kl)
        elif kernel == 'Dim_Exponential+Matern52':
            kl = [
                k_exp(active_dims=[0]) + k_m52(active_dims=[1])
                for _ in range(y_flshp)
            ]
            ker = kc_si(kl)
        elif kernel == 'Dim_Exponential*Matern52':
            kl = [
                k_exp(active_dims=[0]) * k_m52(active_dims=[1])
                for _ in range(y_flshp)
            ]
            ker = kc_si(kl)

        # Define inducing points
        # Inducing points = poitns at which kernel is trained/defined
        # Helps reduce computational requirement
        # Must be weighed against reduction in variance vs. pts
        # Hard coded upper and lower bounds for scaled control
        # rod heights (0.1, 0.9). See PowerReader._initiate_scalers
        # in npsn/dg.py
        single_ipts = np.linspace(0.1, 0.9, nipts)[:, None]
        ipts = np.repeat(single_ipts, self.x_shape, axis=1)

        ivars = gpf.inducing_variables.SharedIndependentInducingVariables(
            gpf.inducing_variables.InducingPoints(ipts))

        # Define gp model
        model = gpf.models.SVGP(ker,
                                gpf.likelihoods.Gaussian(),
                                inducing_variable=ivars,
                                num_latent_gps=y_flshp)

        # Optimize
        opt = gpf.optimizers.Scipy()
        opt.minimize(
            model.training_loss_closure((self.x_train, y_tr_fl)),
            variables=model.trainable_variables,
            method='L-BFGS-B',
            options={
                "disp": True,
                "maxiter": ci_niter(2000)
            },
        )

        # Hyperopt loss for each combination
        y_predict, y_predict_var = model.predict_y(self.x_test)
        hyp_loss = sklmse(y_te_fl, y_predict)
        self.tr_hist.update_history(params, hyp_loss, model)

        return {'loss': hyp_loss, 'status': STATUS_OK}
Esempio n. 6
0
def calc_err(y_true, y_predict):
    return sklmse(y_true, y_predict, multioutput='raw_values')
Esempio n. 7
0
    def predict(self, X_, Y_):
        print('validating ... ')
        Y = np.dot(X_, self.weights)
        rmse = np.sqrt(sklmse(Y_, Y))

        return Y, rmse