def estimate_p(self):
        
        # will estimate p_a and p_i for each threshold quasi-independently, since lower thresholds should not depend on higher thresholds
        for thr_k in range(0,self.num_thresh):

            # set up adaptive constraints so that p is positive and strictly decreasing with threshold:
            if thr_k == 0:
                p_UB = [1.,1.]
            else:
                p_UB = [self.p_a[thr_k-1],self.p_i[thr_k-1]]

            p_guess = [self.p_a[thr_k],self.p_i[thr_k]]

            # make sure initial guess for p is within constraints
            if p_guess[0] > p_UB[0]:
                p_guess[0] = 0.4*p_UB[0]
            if p_guess[1] > p_UB[1]:
                p_guess[1] = 0.4*p_UB[1]

            # use constrained Nelder-Mead for estimation
            p_opt_res = cNM.constrNM(self.p_func,p_guess,[0.,0.],p_UB,full_output=True,args=[thr_k])
            p_est = p_opt_res['xopt']
            
            # update guesses for p_a & p_i
            self.p_a[thr_k] = p_est[0]
            self.p_i[thr_k] = p_est[1]

            # calculate difference from previous guesses
            self.p_a_conv[thr_k] = np.abs(p_est[0] - p_guess[0])
            self.p_i_conv[thr_k] = np.abs(p_est[1] - p_guess[1])
    def estimate_lambda(self):
        # ICM estimation process for lambda: lambda only depends on NNs
        lambda_prev = copy.deepcopy(self.lambd)
        for vox in range(0,self.lambd.size):
            # only perform estimation if voxel in use:
            if self.vox_in_use[vox] == True:
                # now maximize local likelihood with constrained Nelder-Mead
                lambda_guess = [self.lambd[vox]]
                vox_opt_res = cNM.constrNM(self.lambda_func,lambda_guess,[0.],[1.],args=[vox])
                self.lambd[vox] = vox_opt_res['xopt']
        # output estimation progress to terminal
		lambda_msg = "Estimation complete for voxel %i of %i" % (vox, self.lambd.size)
                sys.stdout.write(lambda_msg + chr(8)*len(lambda_msg))
                sys.stdout.flush()
                if vox_opt_res['warnflag'] is not None:
                    sys.stdout.write("\n")
                    print('Potential issue with lambda optimization')
                    print(vox_opt_res)
                    print(vox)
                    sys.stdout.write("\n")
        sys.stdout.write("\n")
        # calculate change in lambda for each voxel
        self.lambda_conv = np.abs(self.lambd - lambda_prev) 
def fit_to_g_off(dat, user_std, user_p0=np.array([])):
    
    if dat.size<32:
        raise Exception('NEED MORE COUNTS IN PEAK')
        
    std = user_std
        
    xs,ys = bin_dat(dat)
    
    ys_smoothed = do_smooth_with_gaussian(ys,std)
    
    
    opt_fun = lambda p: np.sum(np.square(pk_mod_fun(xs, *p)-ys_smoothed))

    

#    def resid_func(p): 
#        return pk_mod_fun(xs,*p)-ys_smoothed
    
    N4 = ys_smoothed.size//4
    mx_idx = np.argmax(ys_smoothed[N4:(3*N4)])+N4
    
    if(user_p0.size == 0):
        p0 = np.array([ys_smoothed[mx_idx]-np.min(ys_smoothed), xs[mx_idx], 0.015,  np.percentile(ys_smoothed,20)])
    else:
        p0 = user_p0    
        
    # b_model2(x,amp_g,x0,sigma,b):
    lbs = np.array([0,       np.percentile(xs,10),  0.005,   0])
    ubs = np.array([2*p0[0],  np.percentile(xs,90), 0.50,   p0[0]])

    # Force in bounds
    p_guess = np.sort(np.c_[lbs,p0,ubs])[:,1]
    
    
#    popt2, pcov =  = curve_fit(pk_mod_fun, xs, ys_smoothed, p0=p_guess, bounds=(lbs,ubs), verbose=2, ftol=1e-12, max_nfev=2048)
#    popt2, pcov =  = curve_fit(pk_mod_fun, xs, ys_smoothed)
    
    
#    bnds = ((0,2*p0[0]),
#            (np.percentile(xs,10),np.percentile(xs,90)),
#             (0.007,0.1),
#             (0,p0[0]))
#    

#    opts = {'xatol' : 1e-5,
#            'fatol' : 1e-12,
#             'maxiter' : 1024,
#             'maxfev' : 1024,
#             'disp' : True}
    
    
    ret_dict = constrNM(opt_fun,p_guess,lbs,ubs,xtol=1e-5, ftol=1e-12, maxiter=1024, maxfun=1024, full_output=1, disp=0)
    
#    print(p_guess)
#    print(ret_dict['xopt'])
    
#    res = minimize(opt_fun, 
#                   p_guess,
#                   options=opts,
##                   bounds=bnds,
#                   method='Nelder-Mead')  
#
#    if res.x[1]<np.percentile(xs,10) or res.x[1]>np.percentile(xs,90):
#        res.x = p_guess


#    print(np.abs(res.x))
    
    
#    popt2 = least_squares(resid_func, x0=p0, bounds=(lbs,ubs), verbose=2, ftol=1e-12, max_nfev=2048)
#    curve_fit(pk_mod_fun, xs, ys_smoothed, p0=p0)
    
#    popt = least_squares(resid_func, p0, verbose=0, ftol=1e-12, max_nfev=2048)
#
#    fig = plt.figure(num=999)
#    fig.clear()
#    ax = plt.axes()
#    
#    ax.plot(xs,ys,'.',label='raw')
#    ax.plot(xs,ys_smoothed,label='smoothed')
#    
#    mod_y = pk_mod_fun(xs,*p_guess)
#    ax.plot(xs,mod_y,label='guess')
#    
#    
#    mod_y = pk_mod_fun(xs,*res.x)
#    
#    ax.plot(xs,mod_y,label='fit')
#    
#    ax.legend()
#    
#    
#    
#    plt.pause(.001)
#
#    # Find halfway down and up each side
#    max_idx = np.argmax(N)
#    max_val = N[max_idx]
#    
#    lhs_dat = N[0:max_idx]
#    rhs_dat = N[max_idx:]
#    
#    lhs_idx = np.argmin(np.abs(lhs_dat-max_val/2))
#    rhs_idx = np.argmin(np.abs(rhs_dat-max_val/2))+max_idx
##    
#    hwhm_est = (x[rhs_idx]-x[lhs_idx])/2
#
#
    return np.abs(ret_dict['xopt'])
예제 #4
0
"""Example script for a simple optimization using constrNM.

We want to find the minimum of the rosenbrock function with constraints 2<x and 2<y<3.

The rosenbrock function for two dimensions is already implemented in ``test_funcs.rosenbrock``.

"""

# Define initial guess
x0 = [2.5, 2.5]

# Define lower and upper bounds
LB = [2, 2]
UB = [None, 3]

# Call optimizer
import constrNMPy as cNM
res = cNM.constrNM(cNM.test_funcs.rosenbrock, x0, LB, UB, full_output=True)

# Print results
cNM.printDict(res)
예제 #5
0
"""Example script for a simple optimization using constrNM.

We want to find the minimum of the beales function with constraints -5<x<2.5 and -5<y. 

The beales function for two dimensions is already implemented in ``test_funcs.beales``.

"""

# Define initial guess
x0 = [2, 5]

# Define lower and upper bounds
LB = [-5, -5]
UB = [2.5, None]

# Call optimizer
import constrNMPy as cNM
res = cNM.constrNM(cNM.test_funcs.beales, x0, LB, UB, full_output=True)

# Print results
cNM.printDict(res)
예제 #6
0
def test_constrNM_rosenbrock_non_global():

    res = cNM.constrNM(cNM.test_funcs.rosenbrock, [2.5, 2.5], [2, 2],
                       [None, 3],
                       full_output=True)
    assert abs(res['xopt'] - np.array([2., 3.])).sum() < 1E-3
예제 #7
0
def test_constNM_beales():

    res = cNM.constrNM(cNM.test_funcs.beales, [0.5, 1], [-5, -5], [10, None],
                       full_output=False)

    assert abs(res['xopt'] - np.array([3., .5])).sum() < 1E-3
예제 #8
0
# Create figure
fig = plt.figure()
fig.show()

# Plot function
X, Y = np.meshgrid(np.linspace(-10, 10, 100), np.linspace(-10, 10, 100))
x = np.array([X.flatten(), Y.flatten()])

# Create beales function for plot
f = obj(x, a)
F = f.reshape(100, 100)

# Plot objective function
ax = fig.add_subplot(111)
p1 = ax.contourf(X, Y, F, levels=np.linspace(f.min(), f.max(), 100))
plt.draw()
#raw_input()

# Define initial guess
x0 = [2.5, 2.5]

# Define lower and upper bounds
LB = [-10, -10]
UB = [None, 10]

# Call optimizer
res = cNM.constrNM(obj, x0, LB, UB, full_output=True, args=[a])

# Print results
cNM.printDict(res)