示例#1
0
    def gradient_descent(self):

        # this is in case we want to compute baseline and beta
        # parameters via linear regression rather than estimation
        # this should only be used for a grid-search, not fmin
        if self.overloaded_ballpark is not None:
            return utils.gradient_descent_search(
                self.data, utils.error_function,
                self.model.generate_prediction, self.overloaded_ballpark,
                self.bounds, self.very_verbose)
        else:
            return utils.gradient_descent_search(
                self.data, utils.error_function,
                self.model.generate_prediction, self.ballpark, self.bounds,
                self.very_verbose)
示例#2
0
                def optimize_parameters(args):
                    data, ix, row = args

                    #if row['r2'] < r2_thr:
                    #return ix, return_grid_results(row)

                    #else:
                    ballpark = row.x, row.y, row.size, row.amplitude, row.baseline

                    r = gradient_descent_search(
                        data, error_function,
                        self.model_func.generate_prediction, ballpark, bounds,
                        verbose)

                    row['x'] = r[0][0]
                    row['y'] = r[0][1]
                    row['size'] = r[0][2]
                    row['amplitude'] = r[0][3]
                    row['baseline'] = r[0][4]

                    pred = self.model_func.generate_prediction(*r[0])
                    row['r2'] = coeff_of_determination(data, pred) / 100

                    row['estimation_method'] = 'Traditional method'

                    return ix, row
示例#3
0
                def optimize_parameters(args):

                    data, ix, row = args

                    #if row['r2'] < r2_thr:
                    #return ix, return_grid_results(row)

                    #else:
                    data_ = (data - data.mean()) / data.std()

                    ballpark = row.x, row.y, row.size
                    r = gradient_descent_search(data, error_function,
                                                make_zscored_prediction,
                                                ballpark, bounds, verbose)

                    row['x'] = r[0][0]
                    row['y'] = r[0][1]
                    row['size'] = r[0][2]

                    X = np.ones((len(data_), 2))
                    X[:, 1] = make_zscored_prediction(*r[0], normalize=False)

                    beta, residuals, _, _ = np.linalg.lstsq(X, data)

                    row['baseline'] = beta[0]
                    row['amplitude'] = beta[1]

                    row['r2'] = coeff_of_determination(data, X.dot(beta)) / 100
                    row['estimation_method'] = 'Correlation method'

                    return ix, row
示例#4
0
def test_gradient_descent_search():

    # create a parameter to estimate
    params = (10, 10)

    # set grids + bounds
    grids = ((0, 20), (5, 15))
    bounds = ()

    # set the verbose level 0 is silent, 1 is final estimate, 2 is each iteration
    verbose = 0

    # set the number of search samples
    Ns = 3

    # create a simple function to transform the parameters
    func = lambda freq, offset: np.sin(
        np.linspace(0, 1, 1000) * 2 * np.pi * freq) + offset

    # create a "response"
    response = func(*params)

    # get the fine estimate
    phat = utils.gradient_descent_search(response, utils.error_function, func,
                                         (8, 8), bounds, verbose)

    # assert that the estimate is equal to the parameter
    npt.assert_almost_equal(params, phat[0])
示例#5
0
def test_gradient_descent_search():

    # create a parameter to estimate
    params = (10,10)

    # set grids + bounds
    grids = ((0,20),(5,15))
    bounds = ()

    # set the verbose level 0 is silent, 1 is final estimate, 2 is each iteration
    verbose = 0

    # set the number of search samples
    Ns = 3

    # create a simple function to transform the parameters
    func = lambda freq, offset: np.sin( np.linspace(0,1,1000) * 2 * np.pi * freq) + offset

    # create a "response"
    response = func(*params)

    # get the fine estimate
    phat = utils.gradient_descent_search(response, utils.error_function, func, (8,8), bounds, verbose)

    # assert that the estimate is equal to the parameter
    npt.assert_almost_equal(params, phat[0])
示例#6
0
文件: og.py 项目: mekman/popeye
 def estimate(self):
     return utils.gradient_descent_search((self.x0, self.y0, self.s0, self.beta0, self.hrf0),
                                          (self.model.stimulus.deg_x,
                                           self.model.stimulus.deg_y,
                                           self.model.stimulus.stim_arr,
                                           self.tr_length),
                                          self.fit_bounds,
                                          self.data,
                                          utils.error_function,
                                          compute_model_ts)
示例#7
0
 def gradient_descent(self):
     
     # this is in case we want to compute baseline and beta
     # parameters via linear regression rather than estimation
     # this should only be used for a grid-search, not fmin
     if self.overloaded_ballpark is not None:
         return utils.gradient_descent_search(self.data,
                                              utils.error_function,
                                              self.model.generate_prediction,
                                              self.overloaded_ballpark,
                                              self.bounds,
                                              self.very_verbose)
     else:
         return utils.gradient_descent_search(self.data,
                                              utils.error_function,
                                              self.model.generate_prediction,
                                              self.ballpark,
                                              self.bounds,
                                              self.very_verbose)
示例#8
0
 def estimate(self):
     return utils.gradient_descent_search((self.center_freq0, self.sigma0),
                                          (self.model.stimulus.spectrogram,
                                           self.model.stimulus.freqs,
                                           self.model.stimulus.target_times),
                                          self.bounds,
                                          self.data,
                                          utils.error_function,
                                          compute_model_ts,
                                          self.very_verbose)
示例#9
0
文件: base.py 项目: arokem/popeye
 def gradient_descent(self):
     
     if self.very_verbose: # pragma: no cover
         print('The gridfit solution was %s, starting gradient descent ...' %(self.ballpark))
     
     return utils.gradient_descent_search(self.data,
                                          utils.error_function,
                                          self.model.generate_prediction,
                                          self.ballpark,
                                          self.bounds,
                                          self.very_verbose)
示例#10
0
文件: base.py 项目: noahbenson/popeye
    def gradient_descent(self):

        if self.very_verbose:  # pragma: no cover
            print(
                'The gridfit solution was %s, starting gradient descent ...' %
                (self.ballpark))

        return utils.gradient_descent_search(self.data, utils.error_function,
                                             self.model.generate_prediction,
                                             self.ballpark, self.bounds,
                                             self.very_verbose)
示例#11
0
 def estimate(self):
     return utils.gradient_descent_search(
         (self.center_freq0, self.sigma0),
         (self.model.stimulus.spectrogram, self.model.stimulus.freqs,
          self.model.stimulus.target_times), self.bounds, self.data,
         utils.error_function, compute_model_ts, self.very_verbose)
示例#12
0
def fit_gradient_descent(model, data, ballpark, bounds, verbose=0):
    return utils.gradient_descent_search(data, utils.error_function,
                                         model.generate_prediction, ballpark,
                                         bounds, verbose)[0]