def spline_1D(xarr,yarr,invar,breakpoints,order=4):
    """ Returns the spline fit to xarr, yarr with inverse variance invar for
        the specified breakpoint array (and optionally spline order)
        
        Assumes independent data points.
    """
    B_spline_matrix = basic_spline(xarr,breakpoints,order=order)
    coeffs, chi = sf.chi_fit(yarr,B_spline_matrix.T,np.diag(invar))
    spline_interp = np.dot(B_spline_matrix.T,coeffs)[:,0]
    return spline_interp
def spline_2D(img_matrix,invar_matrix,h_breakpoints,v_breakpoints,order=4,return_coeffs=False):
    """ Returns a 2D spline interpolation of an image.  Option to also return scaled spline_coeffs
    
        Assumes a pixel-based image (ie, integer horizontal and vertical
        steps between points)
        Assumes pixel errors are independent
        
        Note, computational time scales up very quickly, suggest modest image
        sizes (~100 x 100 px or less)
    """
    hpix = np.shape(img_matrix)[1] #horizontal pixels
    vpix = np.shape(img_matrix)[0] #vertical pixels
    harr = np.arange(hpix)
    varr = np.arange(vpix)
    ### Given h, v arrays and breakpoints, find splines along both directions
    h_splines = basic_spline(harr,h_breakpoints,order=order)
    v_splines = basic_spline(varr,v_breakpoints,order=order)
    h_sp_len = np.shape(h_splines)[0]
    v_sp_len = np.shape(v_splines)[0]
    dim1 = hpix*vpix
    dim2 = h_sp_len * v_sp_len
    ### Use the h and v splines to construct a 2D profile matrix for linear fitting
    profile_matrix = np.zeros((vpix,hpix,dim2))
    for i in range(h_sp_len):
        for j in range(v_sp_len):
            k = i*v_sp_len + j
            h_sp_tmp = np.reshape(h_splines[i],(1,hpix))
            v_sp_tmp = np.reshape(v_splines[j],(vpix,1))
            profile_matrix[:,:,k] = np.dot(v_sp_tmp,h_sp_tmp)
    
    #Reshape the profile matrix and input image and invar matrices for chi^2 fitting
    profile_matrix = np.reshape(profile_matrix,(dim1,dim2))
    data = np.reshape(img_matrix,(dim1,))
    noise = np.diag(np.reshape(invar_matrix,(dim1,)))
    #Chi^2 fit
    a_coeffs, chi = sf.chi_fit(data,profile_matrix,noise)
    #Evaluate spline fit and reshape back to 2D array
    spline_fit = np.dot(profile_matrix,a_coeffs)
    spline_fit = np.reshape(spline_fit,(vpix,hpix))
    spline_integral = np.sum(spline_fit)
    a_coeffs_scale = a_coeffs/spline_integral
    if return_coeffs:
        return spline_fit, a_coeffs_scale, spline_integral
    else:        
        return spline_fit
def fit_spline_psf(raw_img,hcenters,vcenters,sigmas,powers,readnoise,
                   gain,plot_results=False,verbose=False):
    """ function to fit parameters for radial bspline psf.
    """
    ### 1. Estimate spline amplitudes, centers, w/ circular model
    actypix = raw_img.shape[1]
    #r_breakpoints = [0, 1.2, 2.5, 3.7, 5, 8, 10]
    ## 2.3, 3
    #r_breakpoints = np.hstack(([0, 1.5, 2.4, 3],np.arange(3.5,10,0.5))) #For cpad=8
    
    ##########################################################################
    ############## All this hardcoded stuff should be flexible (TODO) ########    
    ##########################################################################
    
#    r_breakpoints = np.hstack(([0, 1.5, 2.4, 3],np.arange(3.5,6.6,1))) #For cpad=5
    r_breakpoints = np.hstack(([0, 1.5, 2.4, 3],np.arange(3.5,8.6,1))) #For cpad=6
    #r_breakpoints = np.hstack(([0, 1.2, 2.3, 3],np.arange(3.5,10,0.5))) #For cpad=8
    theta_orders = [0]
    cpad = 6
    bp_space = 2 #beakpoint spacing in pixels
    invar = 1/(raw_img+readnoise**2)
    ### Initial spline coeff guess
    
    spl_coeffs, s_scale, fit_params, new_hcenters, new_vcenters = spline_coeff_fit(raw_img,hcenters,vcenters,invar,r_breakpoints,sigmas,powers,theta_orders=theta_orders,cpad=cpad,bp_space=bp_space,return_new_centers=True)
    
    #'''
    ### 2. Set up and initialize while loop (other steps embedded within loop)
    num_bases = spl_coeffs.shape[1]
    new_hscale = (new_hcenters-actypix/2)/actypix
    peak_mask = np.ones((len(new_hscale)),dtype=bool) #Can be used to mask "bad" peaks
    params1 = lmfit.Parameters()
    ### Loop to add horizontal/vertical centers
    for j in range(len(new_hscale)):
        harr = np.arange(-cpad,cpad+1)+int(np.floor(new_hcenters[j]))
        varr = np.arange(-cpad,cpad+1)+int(np.floor(new_vcenters[j])) ### Shouldn't need +1...
        params1.add('vc{}'.format(j), value = new_vcenters[j]-varr[0])
        params1.add('hc{}'.format(j), value = new_hcenters[j]-harr[0])
    ### and add initial ellitical parameter guesses (for quadratic variation)
    params1.add('q0', value=0.9, min=0, max=1)
    params1.add('PA0', value=0, min=-np.pi, max=np.pi)
    params1.add('q1', value=0, min=-1, max=1)
    params1.add('PA1', value=0, min=-np.pi, max=np.pi)
    params1.add('q2', value=0, min=-1, max=1)
    params1.add('PA2', value=0, min=-np.pi, max=np.pi)
    params = lmfit.Parameters()
    params.add('hc', value = params1['hc0'].value)
    params.add('vc', value = params1['vc0'].value)
    params.add('q', value = 1, min=0, max=1)
    params.add('PA', value=0, min=-np.pi, max=np.pi)
    
    ### Start while loop - iterate until convergence
    chi_new = np.ones((sum(peak_mask))) #Can build this from first fit if desired
    chi_old = np.zeros((sum(peak_mask)))
    chi_min = 100
    coeff_matrix_min = np.zeros((3,np.shape(spl_coeffs)[1])).T
    params_min = lmfit.Parameters()
    dlt_chi = 1e-3 #difference between successive chi_squared values to cut off
    mx_loops = 50 #eventually must cutoff
    loop_cnt = 0
    fit_bg = False ## True fits a constant background at each subimage
    while abs(np.sum(chi_new)-np.sum(chi_old)) > dlt_chi and loop_cnt < mx_loops:
        if verbose:
            print("starting loop {}".format(loop_cnt))
            print("  chi_old mean = {}".format(np.mean(chi_old)))
            print("  chi_new mean = {}".format(np.mean(chi_new)))
            print("  delta_chi = {}".format((np.sum(chi_new)-np.sum(chi_old))))
        chi_old = np.copy(chi_new)
    ### 3. Build profile, data, and noise matrices at each pixel point and sum
        dim_s = (2*cpad+1)**2
        dim_h = sum(peak_mask)*dim_s
        profile_matrix = np.zeros((dim_h,3*num_bases+fit_bg*len(new_hscale))) #hardcoded for quadratic
    #    last_profile = np.zeros((dim_s,3*num_bases+fit_bg))
        data_array = np.zeros((dim_h))
        noise_array = np.zeros((dim_h))
        data_for_fitting = np.zeros((2*cpad+1,2*cpad+1,len(new_hscale)))
        invar_for_fitting = np.zeros((2*cpad+1,2*cpad+1,len(new_hscale)))
        d_scale = np.zeros(len(new_hscale)) # Will build from data
#        bg_data = np.zeros(len(new_hscale))
        for k in range(len(new_hscale)):
            ### Slice subset of image data around each peak
            harr = np.arange(-cpad,cpad+1)+int(np.floor(new_hcenters[k]))
            varr = np.arange(-cpad,cpad+1)+int(np.floor(new_vcenters[k]))
            harr = harr[harr>=0]
            harr = harr[harr<raw_img.shape[1]]
            varr = varr[varr>=0]
            varr = varr[varr<raw_img.shape[0]]
            data_for_fitting[:,:,k] = raw_img[varr[0]:varr[-1]+1,harr[0]:harr[-1]+1]#/s_scale[k]
#            invar_for_fitting[:,:,k] = invar[varr[0]:varr[-1]+1,harr[0]:harr[-1]+1]#/s_scale[k]
            d_scale[k] = np.sum(data_for_fitting[:,:,k])
            invar_for_fitting[:,:,k] = s_scale[k]/(abs(data_for_fitting[:,:,k])+readnoise**2/s_scale[k])
#            rarr = sf.make_rarr(np.arange(2*cpad+1),np.arange(2*cpad+1),cpad,cpad)
#            bg_mask = rarr > 3
#            bg_data[k] = poisson_bg(data_for_fitting[:,:,k],mask=bg_mask)
        ### bound s_scale to (hopefully) prevent runaway growth
    #    for k in range(len(new_hscale)):
    #        sig_factor = 1 #Constrain s_scale to be within this man stddevs
    #        d_min = d_scale[k]-np.sqrt(d_scale[k])*sig_factor
    #        d_max = d_scale[k]+np.sqrt(d_scale[k])*sig_factor
    #        if s_scale[k] < d_min:
    #            s_scale[k] = d_min
    #        elif s_scale[k] > d_max:
    #            s_scale[k] = d_max
    #    s_scale *= np.sum(d_scale)/np.sum(s_scale)
        for k in range(len(new_hscale)):
            ### Pull in best center estimates
            params['hc'].value = params1['hc{}'.format(k)].value
            params['vc'].value = params1['vc{}'.format(k)].value
            ### Pull in best elliptical parameter estimates
            if loop_cnt == 0:
                params['q'].value = 1
            else:            
                params['q'].value = params1['q0'].value + params1['q1'].value*new_hscale[k] + params1['q2'].value*new_hscale[k]**2
            params['PA'].value = params1['PA0'].value + params1['PA1'].value*new_hscale[k] + params1['PA2'].value*new_hscale[k]**2
            ### Scale data
#            data_for_fitting[:,:,k] -= bg_data[k] ### remove bg first
            data_for_fitting[:,:,k] /= s_scale[k]
#            invar_for_fitting[:,:,k] *= s_scale[k]
            ### Setup arrays for spline analysis
            r_arr, theta_arr, dim1, r_inds = spline.build_rarr_thetaarr(data_for_fitting[:,:,k],params)
            ### Build data, noise, and profile array
            data_array[k*dim_s:(k+1)*dim_s] = np.ravel(data_for_fitting[:,:,k])[r_inds] #scaled, sorted data array
            noise_array[k*dim_s:(k+1)*dim_s] = np.ravel(invar_for_fitting[:,:,k])[r_inds]
            profile_base = spline.build_radial_profile(r_arr,theta_arr,r_breakpoints,theta_orders,(2*cpad+1)**2,order=4)
            profile_matrix[k*dim_s:(k+1)*dim_s,0:num_bases] = profile_base
            profile_matrix[k*dim_s:(k+1)*dim_s,num_bases:2*num_bases] = profile_base*new_hscale[k]
            profile_matrix[k*dim_s:(k+1)*dim_s,2*num_bases:3*num_bases] = profile_base*(new_hscale[k]**2)
            if fit_bg:
                profile_matrix[k*dim_s:(k+1)*dim_s,3*num_bases+k*fit_bg] = 1
    #    plt.imshow(profile_matrix,interpolation='none')
    #    plt.show()
        ### 4. Using matrices from step 3. perform chi^2 fitting for coefficients
        next_coeffs, next_chi = sf.chi_fit(data_array,profile_matrix,np.diag(noise_array))
        if fit_bg:
            bg_array = next_coeffs[3*num_bases:]
#            print bg_array*s_scale
            trunc_coeffs = next_coeffs[0:3*num_bases]
        else:
            trunc_coeffs = np.copy(next_coeffs)
        dd2 = int(np.size(trunc_coeffs)/3)
        coeff_matrix = trunc_coeffs.reshape(3,dd2).T
    #    if fit_bg: ### Don't save background fit term
    #        bg_array = coeff_matrix[:,-1]
    #        print bg_array*s_scale
    #        coeff_matrix = coeff_matrix[:,:-1]
    #    last_coeffs = np.dot(coeff_matrix,(np.vstack((ones(len(new_hscale)),new_hscale,new_hscale**2))))
        ### Check each of the profiles with next_coeffs + adjust scale factor
        profile_matrix = np.zeros((dim_s,3*num_bases+fit_bg*len(new_hscale))) #hardcoded for quadratic
        data_array = np.zeros((dim_h))
        noise_array = np.zeros((dim_h))
        chi2_first = np.zeros(len(new_hscale))
    #    fit_sums = 0
    #    print("Temp fit sums:")
        for k in range(len(new_hscale)):
            ### Pull in best center estimates
            params['hc'].value = params1['hc{}'.format(k)].value
            params['vc'].value = params1['vc{}'.format(k)].value
            ### Pull in best elliptical parameter estimates
            if loop_cnt == 0:
                params['q'].value = 1
            else:
                params['q'].value = params1['q0'].value + params1['q1'].value*new_hscale[k] + params1['q2'].value*new_hscale[k]**2
            params['PA'].value = params1['PA0'].value + params1['PA1'].value*new_hscale[k] + params1['PA2'].value*new_hscale[k]**2
            ### Setup arrays for spline analysis
            r_arr, theta_arr, dim1, r_inds = spline.build_rarr_thetaarr(data_for_fitting[:,:,k],params)
            ### Build data, noise, and profile array
            data_array[k*dim_s:(k+1)*dim_s] = np.ravel(data_for_fitting[:,:,k])[r_inds] #scaled, sorted data array
            noise_array[k*dim_s:(k+1)*dim_s] = np.ravel(invar_for_fitting[:,:,k])[r_inds]
            profile_base = spline.build_radial_profile(r_arr,theta_arr,r_breakpoints,theta_orders,(2*cpad+1)**2,order=4)
            profile_matrix[:,0:num_bases] = profile_base
            profile_matrix[:,num_bases:2*num_bases] = profile_base*new_hscale[k]
            profile_matrix[:,2*num_bases:3*num_bases] = profile_base*(new_hscale[k]**2)        
            if fit_bg:
                profile_matrix[:,3*num_bases:] = 0
                profile_matrix[:,3*num_bases+k*fit_bg] = 1
            tmp_fit = np.dot(profile_matrix,next_coeffs)
    #        print np.sum(tmp_fit)
    #        fit_sums += np.sum(tmp_fit)
            resort_inds = np.argsort(r_inds)
            tmp_fit = np.reshape(tmp_fit[resort_inds],data_for_fitting[:,:,k].shape)
        #    plt.figure("Arc, iteration {}".format(k))
        ##    plt.imshow(np.hstack((tmp_fit,small_img/s_scale[k])),interpolation='none')
            chi2_first[k] = np.sum(((tmp_fit-data_for_fitting[:,:,k])**2)*invar_for_fitting[:,:,k])#*s_scale[k]**2
        #    plt.imshow((tmp_fit-small_img/s_scale[k])*small_inv,interpolation='none')
        #    plt.show()
        #    plt.close()
    #    print "chi2 first:", chi2_first
    #    next_coeffs *= fit_sums/(k+1)
    #    s_scale /= fit_sums/(k+1)
        
        
        ### Optional place to check coefficients variation over order    
        #for i in range(8):
        #    plt.plot(new_hscale,last_coeffs[i])
        #
        #plt.show()
        #plt.close()
        
        #first_fit = np.dot(last_profile,next_coeffs)
        #print next_coeffs
        #print params['vc'].value
        #print params['hc'].value
        #print r_arr[0:10]
        #print profile_base[0]
        #print profile_matrix[0,:]/(k+1)
        #print last_profile[0]
        #print first_fit[0]
        #resort_inds = np.argsort(r_inds)
        #scale1 = np.max(small_img)/np.max(first_fit)
        ##print scale1, scale, scale1/scale
        #first_fit = np.reshape(first_fit[resort_inds],small_img.shape)
        #print np.sum(first_fit), k, scale1, s_scale[k]
        #first_fit /= np.sum(first_fit)
        ##plt.imshow(first_fit,interpolation='none')
        #plt.imshow(np.hstack((small_img/s_scale[k],first_fit,(small_img/s_scale[k]-first_fit)*small_inv)),interpolation='none')
        #plt.show()
        #plt.imshow((small_img/s_scale[k]-first_fit)*small_inv,interpolation='none')
        #plt.show()
        
        #test_xs = (np.arange(xpix)-xpix/2)/xpix
        #for i in range(num_bases):
        #    test_ys = next_coeffs[i]+next_coeffs[num_bases+i]*test_xs+next_coeffs[2*num_bases+i]*test_xs**2
        #    plt.plot(test_xs,test_ys)
        #plt.show()
        
    ### 5. Now do a nonlinear fit for hc, vc, q, and PA
        #data_for_lmfit = np.zeros((np.size(small_img),len(new_hscale)))
        #invar_for_lmfit = np.zeros((np.size(small_img),len(new_hscale)))
    #    for k in range(len(new_hscale)):
    #        harr = np.arange(-cpad,cpad+1)+int(np.floor(new_hcenters[k]))
    #        varr = np.arange(-cpad,cpad+1)+int(np.floor(new_vcenters[k]))
    #        data_for_lmfit[:,:,k] = raw_img[varr[0]:varr[-1]+1,harr[0]:harr[-1]+1]/s_scale[k]
    #        invar_for_lmfit[:,:,k] = invar[varr[0]:varr[-1]+1,harr[0]:harr[-1]+1]*(s_scale[k])
        #    r_arr, theta_arr, dim1, r_inds = spline.build_rarr_thetaarr(small_img,params)
        #    data_for_lmfit[:,k] = np.ravel(small_img)[r_inds]/s_scale[k]
        #    invar_for_lmfit[:,k] = np.ravel(small_inv)[r_inds]/np.sqrt(s_scale[k])
        #    resort_inds = np.argsort(r_inds)
        #    plt.imshow(np.resize(data_for_lmfit[:,k][resort_inds],np.shape(small_img)))
        #    plt.show()
        #    plt.close()
            
        ### Make proper inputs for minimizer function
        #centers = np.vstack((new_hcenters,new_vcenters)).T
        args = (data_for_fitting,invar_for_fitting,r_breakpoints,new_hscale,next_coeffs)
        kws = dict()
        kws['theta_orders'] = theta_orders
        kws['fit_bg'] = fit_bg
        minimizer_results = lmfit.minimize(spline.spline_poly_residuals,params1,args=args,kws=kws)
        ### Re-initialize params1, put in elliptical values.  Will add hc/vc at end
        ### (using mask, so #of values for centers will differ)
        params1['q0'].value = minimizer_results.params['q0'].value
        params1['q1'].value = minimizer_results.params['q1'].value
        params1['q2'].value = minimizer_results.params['q2'].value
        params1['PA0'].value = minimizer_results.params['PA0'].value
        params1['PA1'].value = minimizer_results.params['PA1'].value
        params1['PA2'].value = minimizer_results.params['PA2'].value
        #hc_ck = minimizer_results.params['hc0'].value + minimizer_results.params['hc1'].value*new_hscale + minimizer_results.params['hc2'].value*new_hscale**2
        #vc_ck = minimizer_results.params['vc0'].value + minimizer_results.params['vc1'].value*new_hscale + minimizer_results.params['vc2'].value*new_hscale**2
        q_ck = minimizer_results.params['q0'].value + minimizer_results.params['q1'].value*new_hscale + minimizer_results.params['q2'].value*new_hscale**2
        PA_ck = minimizer_results.params['PA0'].value + minimizer_results.params['PA1'].value*new_hscale + minimizer_results.params['PA2'].value*new_hscale**2
    #    print q_ck
    #    print PA_ck
        ### Convert so q is less than 1
        if np.max(q_ck) > 1:
            q_ck_tmp = 1/q_ck #change axis definition
            if np.max(q_ck_tmp) > 1:
                print "q array always over 1!"
            else:
                q_ck = q_ck_tmp
                PA_ck = PA_ck + np.pi/2 #change axis definition
        q_coeffs = np.polyfit(new_hscale,q_ck,2)
        PA_coeffs = np.polyfit(new_hscale,PA_ck,2)
        params1['q0'].value = q_coeffs[2]
        params1['q1'].value = q_coeffs[1]
        params1['q2'].value = q_coeffs[0]
        params1['PA0'].value = PA_coeffs[2]
        params1['PA1'].value = PA_coeffs[1]
        params1['PA2'].value = PA_coeffs[0]
    #    print q_ck
    #    print PA_ck
        #plt.plot(np.arange(5),np.arange(5))
        #plt.show()
        #plt.plot(hc_ck,vc_ck,new_hcenters,new_vcenters)
        #plt.show()
        #ecc = minimizer_results.params['q'].value
        #pos_ang = minimizer_results.params['PA'].value
        
        
        ### Check to see if elliptical values worked out well
        chi_new = np.zeros(len(new_hscale))
        for i in range(len(new_hscale)):
            params['vc'].value = minimizer_results.params['vc{}'.format(i)].value
            params['hc'].value = minimizer_results.params['hc{}'.format(i)].value
        #    harr = np.arange(-cpad,cpad+1)+int(np.floor(new_hcenters[i]))
        #    varr = np.arange(-cpad,cpad+1)+int(np.floor(new_vcenters[i]))
        #    params['vc'].value = new_vcenters[i]-varr[0]+1
        #    params['hc'].value = new_hcenters[i]-harr[0]
            x_coord = new_hscale[i]
            img_matrix = data_for_fitting[:,:,i]
            invar_matrix = invar_for_fitting[:,:,i]
            q = params1['q0'].value + params1['q1'].value*x_coord + params1['q2'].value*x_coord**2
            PA = params1['PA0'].value + params1['PA1'].value*x_coord + params1['PA2'].value*x_coord**2
            params['q'].value = q
            params['PA'].value = PA
            sp_coeffs = np.dot(coeff_matrix,np.array(([1,new_hscale[i],new_hscale[i]**2])))
            if fit_bg:
                sp_coeffs = np.hstack((sp_coeffs,bg_array[i]))
        #    r_arr, theta_arr, dim1, r_inds = spline.build_rarr_thetaarr(small_img,params)
        #    profile_base = spline.build_radial_profile(r_arr,theta_arr,r_breakpoints,theta_orders,(2*cpad+1)**2,order=4)
        
            fitted_image = spline.spline_2D_radial(img_matrix,invar_matrix,r_breakpoints,params,theta_orders,order=4,return_coeffs=False,spline_coeffs=sp_coeffs,sscale=None,fit_bg=fit_bg)
            ### Update s_scale
            chi_new[i] = np.sum(((img_matrix-fitted_image)**2)*invar_matrix)*s_scale[i]/(np.size(img_matrix)-len(sp_coeffs)-2)#*s_scale[i]**2
#            print chi_new[i]
#            print s_scale[i]
#            print np.max(invar_matrix)*3.63**2/s_scale[i]
    #        print chi_new[i]*s_scale[i]
    #        print chi_new[i]*s_scale[i]**2
            ### Set new scale - drive sum of image toward unity
            s_scale[i] = s_scale[i]*np.sum(fitted_image)
#            plt.imshow(np.hstack((img_matrix,fitted_image)),interpolation='none')#,(img_matrix-fitted_image)*invar_matrix)),interpolation='none')
#            plt.imshow(invar_matrix,interpolation='none')
    #        plt.plot(img_matrix[:,5])
    #        plt.plot(fitted_image[:,5])
#            plt.show()
#            plt.close()
        
        #print chi2_first
        #print chi2_second
        #print s_scale
        #print s_scale2
        
        ### Mask/eliminate points with high chi2
        peak_mask = sf.sigma_clip(chi_new,sigma=3,max_iters=1)
        if sum(peak_mask) < 4:
            print("Too few peaks for fitting")
            exit(0)
    #        break
        ### Update new_hscale, s_scale, new_h/vcenters
        s_scale = s_scale[peak_mask]
        cnts = len(new_hscale)
        new_hscale = np.zeros((sum(peak_mask)))
        lp_idx = 0
        for j in range(cnts):
            if not peak_mask[j]:
                if verbose:
                    print "skipping point {}".format(j)
                continue
            else:
                harr = np.arange(-cpad,cpad+1)+int(np.floor(new_hcenters[j]))
                params1.add('hc{}'.format(lp_idx), value = minimizer_results.params['hc{}'.format(j)].value)
                params1.add('vc{}'.format(lp_idx), value = minimizer_results.params['vc{}'.format(j)].value)
                new_hscale[lp_idx] = (params1['hc{}'.format(lp_idx)].value+harr[0]-1-actypix/2)/actypix
                lp_idx += 1
        new_hcenters = new_hcenters[peak_mask]
        new_vcenters = new_vcenters[peak_mask]    
        ### Record minimum values (some subsequent iterations give higher chi2)
        if loop_cnt == 0:
            coeff_matrix_min = np.copy(coeff_matrix)
            params_min = lmfit.Parameters(params1)
        if np.sum(chi_new) < chi_min:
            if verbose:
                print "Better fit on loop ", loop_cnt
            chi_min = np.sum(chi_new)
            coeff_matrix_min = np.copy(coeff_matrix)
            params_min = lmfit.Parameters(params1)
        loop_cnt += 1
    
    ### End of loop
    if verbose:
        print("End of Loop")
    ### Check that q, PA, aren't driving toward unphysical answers
#    test_hscale = np.arange(-1,1,0.01)
    #q = params_min['q0'].value + params_min['q1'].value*test_hscale + params_min['q2'].value*test_hscale**2
    #PA = params_min['PA0'].value + params_min['PA1'].value*test_hscale + params_min['PA2'].value*test_hscale**2
    #bg = coeff_matrix_min[0,-1] + coeff_matrix_min[1,-1]*test_hscale + coeff_matrix_min[2,-1]*test_hscale**2
    #plt.plot(test_hscale,q)
    #plt.show()
    #plt.plot(test_hscale,PA)
    #plt.show()
    #plt.plot(test_hscale,bg)
    #plt.show()
    #plt.close()    
    
    if plot_results:
        ### Plot final answers for evaluation
        for i in range(len(new_hscale)):
            params['vc'].value = minimizer_results.params['vc{}'.format(i)].value
            params['hc'].value = minimizer_results.params['hc{}'.format(i)].value
        #    harr = np.arange(-cpad,cpad+1)+int(np.floor(new_hcenters[i]))
        #    varr = np.arange(-cpad,cpad+1)+int(np.floor(new_vcenters[i]))
        #    params['vc'].value = new_vcenters[i]-varr[0]+1
        #    params['hc'].value = new_hcenters[i]-harr[0]
            x_coord = new_hscale[i]
            img_matrix = data_for_fitting[:,:,i]
            invar_matrix = invar_for_fitting[:,:,i]
            q = params_min['q0'].value + params_min['q1'].value*x_coord + params_min['q2'].value*x_coord**2
            PA = params_min['PA0'].value + params_min['PA1'].value*x_coord + params_min['PA2'].value*x_coord**2
            params['q'].value = q
            params['PA'].value = PA
            sp_coeffs = np.dot(coeff_matrix_min,np.array(([1,new_hscale[i],new_hscale[i]**2])))
            if fit_bg:
                sp_coeffs = np.hstack((sp_coeffs,bg_array[i]))
        #    r_arr, theta_arr, dim1, r_inds = spline.build_rarr_thetaarr(small_img,params)
        #    profile_base = spline.build_radial_profile(r_arr,theta_arr,r_breakpoints,theta_orders,(2*cpad+1)**2,order=4)
        
            fitted_image = spline.spline_2D_radial(img_matrix,invar_matrix,r_breakpoints,params,theta_orders,order=4,return_coeffs=False,spline_coeffs=sp_coeffs,sscale=None,fit_bg=fit_bg)
            ### Update s_scale
        #        print chi_new[i]
        #        print chi_new[i]*s_scale[i]
        #        print chi_new[i]*s_scale[i]**2
            chi_sq_red = np.sum(((img_matrix-fitted_image))**2*invar_matrix)/(np.size(img_matrix)-len(sp_coeffs)-2)*(s_scale[i])
            print "Reduced Chi^2 on iteration ", i, " is: ", chi_sq_red
#            plt.plot(fitted_image[:,cpad]/np.max(fitted_image[:,cpad]))
#            plt.plot(np.sum(fitted_image,axis=1)/np.max(np.sum(fitted_image,axis=1)))
#            plt.show()
#            plt.imshow(np.hstack((img_matrix,fitted_image,(img_matrix-fitted_image))),interpolation='none')
            plt.imshow((img_matrix-fitted_image)*invar_matrix,interpolation='none')
        #    plt.imshow((img_matrix-fitted_image)*invar_matrix,interpolation='none')
            plt.show()
            plt.close()
            
    centers, ellipse = params_to_array(params_min)
    results = np.hstack((np.ravel(coeff_matrix_min),np.ravel(ellipse)))    
    return results
def fit_trace(x,y,ccd,form='gaussian'):
    """quadratic fit (in x) to trace around x,y in ccd
       x,y are integer pixel values
       input "form" can be set to quadratic or gaussian
    """
    x = int(x)
    y = int(y)
    if form=='quadratic':
        xpad = 2
        xvals = np.arange(-xpad,xpad+1)
        def make_chi_profile(x,y,ccd):
            xpad = 2
            xvals = np.arange(-xpad,xpad+1)
            zvals = ccd[x+xvals,y]
            profile = np.ones((2*xpad+1,3)) #Quadratic fit
            profile[:,1] = xvals
            profile[:,2] = xvals**2
            noise = np.diag((1/zvals))
            return zvals, profile, noise
        zvals, profile, noise = make_chi_profile(x,y,ccd)
        coeffs, chi = sf.chi_fit(zvals,profile,noise)
    #    print x
    #    print xvals
    #    print x+xvals
    #    print zvals
    #    plt.errorbar(x+xvals,zvals,yerr=sqrt(zvals))
    #    plt.plot(x+xvals,coeffs[2]*xvals**2+coeffs[1]*xvals+coeffs[0])
    #    plt.show()
        chi_max = 100
        if chi>chi_max:
            #print("bad fit, chi^2 = {}".format(chi))
            #try adacent x
            xl = x-1
            xr = x+1
            zl, pl, nl = make_chi_profile(xl,y,ccd)
            zr, pr, nr = make_chi_profile(xr,y,ccd)
            cl, chil = sf.chi_fit(zl,pl,nl)
            cr, chir = sf.chi_fit(zr,pr,nr)
            if chil<chi and chil<chir:
    #            plt.errorbar(xvals-1,zl,yerr=sqrt(zl))
    #            plt.plot(xvals-1,cl[2]*(xvals-1)**2+cl[1]*(xvals-1)+cl[0])
    #            plt.show()
                xnl = -cl[1]/(2*cl[2])
                znl = cl[2]*xnl**2+cl[1]*xnl+cl[0]
                return xl+xnl, znl, chil
            elif chir<chi and chir<chil:
                xnr = -cr[1]/(2*cr[2])
                znr = cr[2]*xnr**2+cr[1]*xnr+cr[0]
    #            plt.errorbar(xvals+1,zr,yerr=sqrt(zr))
    #            plt.plot(xvals+1,cr[2]*(xvals+1)**2+cr[1]*(xvals+1)+cr[0])
    #            plt.show()
                return xr+xnr, znr, chir
            else:
                ca = coeffs[2]
                cb = coeffs[1]
                xc = -cb/(2*ca)
                zc = ca*xc**2+cb*xc+coeffs[0]
                return x+xc, zc, chi
        else:
            ca = coeffs[2]
            cb = coeffs[1]
            xc = -cb/(2*ca)
            zc = ca*xc**2+cb*xc+coeffs[0]
            return x+xc, zc, chi
    elif form=='gaussian':
        xpad = 7
        xvals = np.arange(-xpad,xpad+1)
        xinds = x+xvals
        xvals = xvals[(xinds>=0)*(xinds<np.shape(ccd)[0])]
        zvals = ccd[x+xvals,y]
        params, errarr = sf.gauss_fit(xvals,zvals)
        xc = x+params[1] #offset plus center
        zc = params[2] #height (intensity)
#        pxn = np.linspace(xvals[0],xvals[-1],1000)
        fit = sf.gaussian(xvals,abs(params[0]),params[1],params[2],params[3],params[4])
        chi = sum((fit-zvals)**2/zvals)
        return xc, zc, chi
            else:
                xtrace[j,i], Itrace[j,i], chi_vals[j,i] = nan, nan, nan
                
    #Finally fit x vs. y on traces.  Start with quadratic for simple + close enough
    trace_coeffs = np.zeros((3,num_fibers))
    trace_intense_coeffs = np.zeros((3,num_fibers))
    for i in range(num_fibers):
        #Given orientation makes more sense to swap x/y
        mask = ~np.isnan(xtrace[i,:])
        if len(mask[mask])<3:
            continue
        profile = np.ones((len(ytrace[i,:][mask]),3)) #Quadratic fit
        profile[:,1] = (ytrace[i,:][mask]-ypix/2)/ypix #scale data to get better fit
        profile[:,2] = ((ytrace[i,:][mask]-ypix/2)/ypix)**2
        noise = np.diag(chi_vals[i,:][mask])
        tmp_coeffs, junk = sf.chi_fit(xtrace[i,:][mask],profile,noise)
        trace_coeffs[0,i] = tmp_coeffs[0]
        trace_coeffs[1,i] = tmp_coeffs[1]
        trace_coeffs[2,i] = tmp_coeffs[2]
        tmp_coeffs2, junk = sf.chi_fit(Itrace[i,:][mask],profile,noise)
        trace_intense_coeffs[0,i] = tmp_coeffs2[0]
        trace_intense_coeffs[1,i] = tmp_coeffs2[1]
        trace_intense_coeffs[2,i] = tmp_coeffs2[2]
            
    #Plot to check traces
#    fig,ax = plt.subplots()
#    ax.pcolorfast(ccd)
#    for i in range(num_fibers):
#        ys = (np.arange(ypix)-ypix/2)/ypix
#        xs = trace_coeffs[2,i]*ys**2+trace_coeffs[1,i]*ys+trace_coeffs[0,i]
#        yp = np.arange(ypix)
def refine_trace_centers(ccd, t_coeffs, i_coeffs, s_coeffs, p_coeffs, fact=10, readnoise=3.63, verbose=False):
    """ Uses estimated centers from fibers flats as starting point, then
        fits from there to find traces based on science ccd frame.
        INPUTS:
            ccd - image on which to fit traces
            t/i/s/p_coeffs - modified gaussian coefficients from fiberflat
            fact - do 1/fact of the available points
    """
    num_fibers = t_coeffs.shape[0]
    hpix = ccd.shape[1]
    vpix = ccd.shape[0]
    ### First fit vc parameters for traces
    rough_pts = int(np.ceil(hpix/fact))
    vc_ccd = np.zeros((num_fibers,rough_pts))
    hc_ccd = np.zeros((num_fibers,rough_pts))
    inv_chi = np.zeros((num_fibers,rough_pts))
    yspec = np.arange(hpix)
    if verbose:
        print("Refining trace centers")
    for i in range(num_fibers):
        if verbose:
            print("Running on index {}".format(i))
    #    slit_num = np.floor((i)/args.telescopes)
        for j in range(0,hpix,fact):
            jadj = int(np.floor(j/fact))
            yj = (yspec[j]-hpix/2)/hpix
            hc_ccd[i,jadj] = yspec[j]
            vc = t_coeffs[2,i]*yj**2+t_coeffs[1,i]*yj+t_coeffs[0,i]
#            Ij = i_coeffs[2,i]*yj**2+i_coeffs[1,i]*yj+i_coeffs[0,i]
            sigj = s_coeffs[2,i]*yj**2+s_coeffs[1,i]*yj+s_coeffs[0,i]
            powj = s_coeffs[2,i]*yj**2+s_coeffs[1,i]*yj+s_coeffs[0,i]
            if np.isnan(vc):
                vc_ccd[i,jadj] = np.nan
                inv_chi[i,jadj] = 0
            else:
                xpad = 7
                xvals = np.arange(-xpad,xpad+1)
                xj = int(vc)
                xwindow = xj+xvals
                xvals = xvals[(xwindow>=0)*(xwindow<vpix)]
                zorig = ccd[xj+xvals,yspec[j]]
                if len(zorig)<1:
                    vc_ccd[i,jadj] = np.nan
                    inv_chi[i,jadj] = 0
                    continue
                invorig = 1/(abs(zorig)+readnoise**2)
                if np.max(zorig)<20:
                    vc_ccd[i,jadj] = np.nan
                    inv_chi[i,jadj] = 0
                else:
                    mn_new, hght, bg = fit_mn_hght_bg(xvals, zorig, invorig, sigj, vc-xj-1, sigj, powj=powj)
                    fitorig = sf.gaussian(xvals,sigj,mn_new,hght,power=powj)
                    if j == 715:
                        print mn_new
                        plt.plot(xvals,zorig,xvals,fitorig)
                        plt.show()
                        plt.close()
                    inv_chi[i,jadj] = 1/sum((zorig-fitorig)**2*invorig)
                    vc_ccd[i,jadj] = mn_new+xj+1
                    
    
    tmp_poly_ord = 10
    trace_coeffs_ccd = np.zeros((tmp_poly_ord+1,num_fibers))
    for i in range(num_fibers):
        mask = ~np.isnan(vc_ccd[i,:])
        profile = np.ones((len(hc_ccd[i,:][mask]),tmp_poly_ord+1)) #Quadratic fit
        for order in range(tmp_poly_ord):
            profile[:,order+1] = ((hc_ccd[i,:][mask]-hpix/2)/hpix)**(order+1)
        noise = np.diag(inv_chi[i,:][mask])
        if len(vc_ccd[i,:][mask])>3:
            tmp_coeffs, junk = sf.chi_fit(vc_ccd[i,:][mask],profile,noise)
        else:
            tmp_coeffs = np.nan*np.ones((tmp_poly_ord+1))
        trace_coeffs_ccd[:,i] = tmp_coeffs 
    return trace_coeffs_ccd
def extract_1D(ccd, t_coeffs, i_coeffs=None, s_coeffs=None, p_coeffs=None, readnoise=1, gain=1, return_model=False, verbose=False):
    """ Function to extract using optimal extraction method.
        This could benefit from a lot of cleaning up
        INPUTS:
        ccd - ccd image to extract
        t_coeffs - estimate of trace coefficients (from 'find_t_coeffs')
        i/s/p_coeffs - optional intensity, sigma, power coefficients
        readnoise, gain - of the ccd
        return_model - set True to return model of image based on extraction
        OUTPUTS:
        spec - extracted spectrum (n x hpix) where n is number of traces
        spec_invar - inverse variance at each point in extracted spectrum
        spec_mask - mask for invalid/suspect points in spectrum
        image_model - only if return_model = True. 
    """
#    def extract(ccd,t_coeffs,i_coeffs=None,s_coeffs=None,p_coeffs=None,readnoise=1,gain=1,return_model=False,fact,verbose=False):
#        """ Extraction.
#        """
    
    ### t_coeffs are from fiber flat - need to shift based on actual exposure
        
    ####################################################
    ###   Prep Needed variables/empty arrays   #########
    ####################################################
    ### CCD dimensions and number of fibers
    hpix = np.shape(ccd)[1]
    vpix = np.shape(ccd)[0]
    num_fibers = np.shape(t_coeffs)[1]

    ####################################################    
    #####   First refine horizontal centers (fit   #####
    #####   traces from data ccd using fiber flat  #####
    #####   as initial estimate)                   #####
    ####################################################
    ta = time.time()  ### Start time of trace refinement
    fact = 20 #do 1/fact * available points
    ### Empty arrays
    rough_pts = int(np.ceil(hpix/fact))
    xc_ccd = np.zeros((num_fibers,rough_pts))
    yc_ccd = np.zeros((num_fibers,rough_pts))
    inv_chi = np.zeros((num_fibers,rough_pts))
    if verbose:
        print("Refining trace centers")
    for i in range(num_fibers):
        for j in range(0,hpix,fact):
            ### set coordinates, gaussian parameters from coeffs
            jadj = int(np.floor(j/fact))
            yj = (j-hpix/2)/hpix
            yc_ccd[i,jadj] = j
            xc = t_coeffs[2,i]*yj**2+t_coeffs[1,i]*yj+t_coeffs[0,i]
#            Ij = i_coeffs[2,i]*yj**2+i_coeffs[1,i]*yj+i_coeffs[0,i] #May use later for normalization
            sigj = s_coeffs[2,i]*yj**2+s_coeffs[1,i]*yj+s_coeffs[0,i]
            powj = p_coeffs[2,i]*yj**2+p_coeffs[1,i]*yj+p_coeffs[0,i]
            ### Don't try to fit any bad trace sections
            if np.isnan(xc):
                xc_ccd[i,jadj] = np.nan
                inv_chi[i,jadj] = 0
            else:
                ### Take subset of ccd of interest, xpad pixels to each side of peak
                xpad = 7
                xvals = np.arange(-xpad,xpad+1)
                xj = int(xc)
                xwindow = xj+xvals
                xvals = xvals[(xwindow>=0)*(xwindow<vpix)]
                zorig = gain*ccd[xj+xvals,j]
                ### If empty slice, don't try to fit
                if len(zorig)<1:
                    xc_ccd[i,jadj] = np.nan
                    inv_chi[i,jadj] = 0
                    continue
                invorig = 1/(abs(zorig)+readnoise**2) ### inverse variance
                ### Don't try to fit profile for very low SNR peaks
                if np.max(zorig)<20:
                    xc_ccd[i,jadj] = np.nan
                    inv_chi[i,jadj] = 0
                else:
                    ### Fit for center (mn_new), amongst other values
#                    mn_new, hght, bg = fit_mn_hght_bg(xvals,zorig,invorig,sigj,xc-xj-1,sigj,powj=powj)
                    mn_new, hght, bg = linear_mn_hght_bg(xvals,zorig,invorig,sigj,xc-xj-1,power=powj)
                    fitorig = sf.gaussian(xvals,sigj,mn_new,hght,power=powj)
                    inv_chi[i,jadj] = 1/sum((zorig-fitorig)**2*invorig)
                    ### Shift from relative to absolute center
                    xc_ccd[i,jadj] = mn_new+xj+1
                   
    #####################################################
    #### Now with new centers, refit trace coefficients #
    #####################################################
    tmp_poly_ord = 6  ### Use a higher order for a closer fit over entire trace
    t_coeffs_ccd = np.zeros((tmp_poly_ord+1,num_fibers))
    for i in range(num_fibers):
        #Given orientation makes more sense to swap x/y
        mask = ~np.isnan(xc_ccd[i,:]) ### Mask bad points
        ### build profile matrix over good points
        profile = np.ones((len(yc_ccd[i,:][mask]),tmp_poly_ord+1))
        for order in range(tmp_poly_ord):
            profile[:,order+1] = ((yc_ccd[i,:][mask]-hpix/2)/hpix)**(order+1)
        noise = np.diag(inv_chi[i,:][mask])
        if len(xc_ccd[i,:][mask])>(tmp_poly_ord+1):
            ### Chi^2 fit
            tmp_coeffs, junk = sf.chi_fit(xc_ccd[i,:][mask],profile,noise)
        else:
            ### if not enough points to fit, call entire trace bad
            tmp_coeffs = np.nan*np.ones((tmp_poly_ord+1))
        t_coeffs_ccd[:,i] = tmp_coeffs

    tb = time.time() ### Start time of extraction/end of trace refinement
    if verbose:
        print("Trace refinement time = {}s".format(tb-ta))
       
    ### Uncomment below to see plot of traces
#    for i in range(num_fibers):
#        ys = (np.arange(hpix)-hpix/2)/hpix
#        xs = t_coeffs_ccd[2,i]*ys**2+t_coeffs_ccd[1,i]*ys+t_coeffs_ccd[0,i]
#        yp = np.arange(hpix)
#        plt.plot(yp,xs)
#    plt.show()
#    plt.close()
    
    ###########################################################
    ##### Finally, full extraction with refined traces ########
    ###########################################################
    
    ### Make empty arrays for return values
    spec = np.zeros((num_fibers,hpix))
    spec_invar = np.zeros((num_fibers,hpix))
    spec_mask = np.ones((num_fibers,hpix),dtype=bool)
    chi2red_array = np.zeros((num_fibers,hpix))
    if return_model:
        image_model = np.zeros((np.shape(ccd))) ### Used for evaluation
    ### Run once for each fiber
    for i in range(num_fibers):
        #slit_num = np.floor((i)/4)#args.telescopes) # Use with slit flats
        if verbose:
            print("extracting trace {}".format(i+1))
        ### in each fiber loop run through each trace
        for j in range(hpix):
            yj = (j-hpix/2)/hpix
            xc = np.poly1d(t_coeffs_ccd[::-1,i])(yj)
#            Ij = i_coeffs[2,i]*yj**2+i_coeffs[1,i]*yj+i_coeffs[0,i]
            sigj = s_coeffs[2,i]*yj**2+s_coeffs[1,i]*yj+s_coeffs[0,i]
            powj = p_coeffs[2,i]*yj**2+p_coeffs[1,i]*yj+p_coeffs[0,i]
            ### If trace center is undefined mask the point
            if np.isnan(xc):
                spec_mask[i,j] = False
            else:
                ### Set values to use in extraction
                xpad = 5  ### can't be too big or traces start to overlap
                xvals = np.arange(-xpad,xpad+1)
                xj = int(xc)
                xwindow = xj+xvals
                xvals = xvals[(xwindow>=0)*(xwindow<vpix)]
                zorig = gain*ccd[xj+xvals,j]
                fitorig = sf.gaussian(xvals,sigj,xc-xj-1,hght,power=powj)
                ### If too short, don't fit, mask point
                if len(zorig)<1:
                    spec[i,j] = 0
                    spec_mask[i,j] = False
                    continue
                invorig = 1/(abs(zorig)+readnoise**2)
                ### don't try to extract for very low signal
                if np.max(zorig)<20:
                    continue
                else:
                    ### Do nonlinear fit for center, height, and background
                    mn_new, hght, bg = fit_mn_hght_bg(xvals,zorig,invorig,sigj,xc-xj-1,sigj/8,powj=powj)
#                    mn_new, hght, bg = linear_mn_hght_bg(xvals,zorig,invorig,sigj,xc-xj-1,power=powj)
                    ### Use fitted values to make best fit arrays
                    fitorig = sf.gaussian(xvals,sigj,mn_new,hght,power=powj)
                    xprecise = np.linspace(xvals[0],xvals[-1],100)
                    fitprecise = sf.gaussian(xprecise,sigj,mn_new,hght,power=powj)
                    ftmp = sum(fitprecise)*np.mean(np.ediff1d(xprecise))
                    #Following if/else handles failure to fit
                    if ftmp==0:
                        fitnorm = np.zeros(len(zorig))
                    else:
                        fitnorm = fitorig/ftmp
                    ### Get extracted flux and error
                    fstd = sum(fitnorm*zorig*invorig)/sum(fitnorm**2*invorig)
                    invorig = 1/(readnoise**2 + abs(fstd*fitnorm))
                    chi2red = np.sum((fstd*fitnorm+bg-zorig)**2*invorig)/(len(zorig)-3)
                    ### Now set up to do cosmic ray rejection
                    rej_min = 0
                    loop_count=0
                    while rej_min==0:
                        pixel_reject = cosmic_ray_reject(zorig,fstd,fitnorm,invorig,S=bg,threshhold=0.3*np.mean(zorig),verbose=True)
                        rej_min = np.min(pixel_reject)
                        ### Once no pixels are rejected, re-find extracted flux
                        if rej_min==0:
                            ### re-index arrays to remove rejected points
                            zorig = zorig[pixel_reject==1]
                            invorig = invorig[pixel_reject==1]
                            xvals = xvals[pixel_reject==1]
                            ### re-do fit (can later cast this into a separate function)
                            mn_new, hght, bg = fit_mn_hght_bg(xvals,zorig,invorig,sigj,xc-xj-1,sigj/8,powj=powj)
#                            mn_new, hght, bg = linear_mn_hght_bg(xvals,zorig,invorig,sigj,xc-xj-1,power=powj)
                            fitorig = sf.gaussian(xvals,sigj,mn_new,hght,power=powj)
                            xprecise = np.linspace(xvals[0],xvals[-1],100)
                            fitprecise = sf.gaussian(xprecise,sigj,mn_new,hght,power=powj)
                            ftmp = sum(fitprecise)*np.mean(np.ediff1d(xprecise))
                            fitnorm = fitorig/ftmp
                            fstd = sum(fitnorm*zorig*invorig)/sum(fitnorm**2*invorig)
                            invorig = 1/(readnoise**2 + abs(fstd*fitnorm))
                            chi2red = np.sum((fstd*fitnorm+bg-zorig)**2*invorig)/(len(zorig)-3)
                        ### if more than 3 points are rejected, mask the extracted flux
                        if loop_count>3:
                            spec_mask[i,j] = False
                            break
                        loop_count+=1
                    ### Set extracted spectrum value, inverse variance
                    spec[i,j] = fstd
                    spec_invar[i,j] = sum(fitnorm**2*invorig)
                    chi2red_array[i,j] = chi2red
                    if return_model and not np.isnan(fstd):
                        ### Build model, if desired
                        image_model[xj+xvals,j] += (fstd*fitnorm+bg)/gain
            ### If a nan came out of the above routine, zero it and mask
            if np.isnan(spec[i,j]):
                spec[i,j] = 0
                spec_mask[i,j] = False
    if verbose:
        print("Average reduced chi^2 = {}".format(np.mean(chi2red)))
    if return_model:
        return spec, spec_invar, spec_mask, image_model
    else:
        return spec, spec_invar, spec_mask
def find_trace_coeffs(image,pord,fiber_space,num_points=None,num_fibers=None,vertical=False,return_all_coeffs=True,skip_peaks=0):
    """ Polynomial fitting for trace coefficients.  Packs into interval [-1,1]
        INPUTS:
            image - 2D ccd image on which you'd like to find traces
            pord - polynomial order to fit trace positions
            fiber_space - estimate of fiber spacing in pixels
            num_points - number of cross sections to average for trace (if None, will set to 1/20 length or 2*pord (whichever is greater))
            num_fibers - number of fibers (if None, will auto-detect)
            vertical - True if traces run vertical. False if horizontal.
            return_all_coeffs - if False, only returns trace_poly_coeffs
        OUTPUTS:
            t_coeffs - nx(pord+1) array where n is number of detected traces
                                gives fitted coeffs for each trace
            i_coeffs - intensity along each trace
            s_coeffs - sigma (for gaussian fit) along each trace
            p_coeffs - power (for pseudo gaussian fit) along each trace
    """
    def find_peaks(array,bg_cutoff=None,mx_peaks=None,skip_peaks=0):
        """ Finds peaks of a 1D array.
            Assumes decent signal to noise ratio, no anomalies
            Assumes separation of at least 5 units between peaks
        """
        ###find initial peaks (center is best in general, but edge is okay here)
        xpix = len(array)
        px = 2
        pcnt = 0
        skcnt = 0
        peaks = np.zeros(len(array))
        if mx_peaks is None:
            mx_peaks = len(array)
        if bg_cutoff is None:
            bg_cutoff = 0.5*np.mean(array)
        while px<xpix:
#            if trct>=num_fibers:
#                break
#            y = yvals[0]
            if array[px-1]>bg_cutoff and array[px]<array[px-1] and array[px-1]>array[px-2]: #not good for noisy
                if skcnt < skip_peaks:
                    skcnt += 1 #Increment skip counter
                    continue
                else:
                    peaks[pcnt] = px-1
                    px += 5 #jump past peak
                    pcnt+=1 #increment peak counts
                    skcnt += 1 #increment skip counter
                if pcnt >= mx_peaks:
                    break
            else:
                px+=1
        peaks = peaks[0:pcnt]
        return peaks
    
    def find_fibers(image):  
        """ Estimates number of fibers.
            Assumes roughly square image.
            THIS DOESN'T WORK RIGHT YET
        """
        shrt_ax = int(min(np.shape(image))/2)
        crsx_pts = min(10,shrt_ax)
        tr_ul_lr = np.zeros(crsx_pts) #traces cutting upper left to lower right
        tr_ur_ll = np.zeros(crsx_pts) #traces cutting upper right to lower left
        for i in range(crsx_pts):
            ul_lr = np.ravel(image)[2*i:np.size(image):(len(image[0])+1)]
            ur_ll = np.ravel(image)[(len(image[0])-1-2*i):np.size(image):(len(image[0])-1)]
            tr_ul_lr[i] = len(find_peaks(ul_lr))
            tr_ur_ll[i] = len(find_peaks(ur_ll))
        tr_ul_lr = int(np.median(tr_ul_lr))
        tr_ur_ll = int(np.median(tr_ur_ll))
        if tr_ul_lr > tr_ur_ll:
            return tr_ul_lr, True
        else:
            return tr_ur_ll, False
                  
    ### Force horizontal
    if vertical:
        image = image.T
    ### Find number of fibers and direction (upper left to lower right or opposite)
    tmp_num_fibers, fiber_dir = find_fibers(image)
    if num_fibers is None:
        num_fibers=tmp_num_fibers
    ### Select number of points to use in tracing
    if num_points is None:
        num_points = max(2*pord,int(np.shape(image)[1]/20))
    ### Make all empty arrays for trace finding
    xpix = np.shape(image)[0]
    ypix = np.shape(image)[1]
    yspace = int(np.floor(ypix/(num_points+1)))
    yvals = yspace*(1+np.arange(num_points))
    xtrace = np.nan*np.ones((num_fibers,num_points)) #xpositions of traces
    ytrace = np.zeros((num_fibers,num_points)) #ypositions of traces
    sigtrace = np.zeros((num_fibers,num_points)) #standard deviation along trace
    powtrace = np.zeros((num_fibers,num_points)) #pseudo-gaussian power along trace
    Itrace = np.zeros((num_fibers,num_points)) #relative intensity of flat at trace
    chi_vals = np.zeros((num_fibers,num_points)) #returned from fit_trace
    bg_cutoff = 1.05*np.median(image) #won't fit values below this intensity
    
    ### Put in initial peak guesses
    peaks = find_peaks(image[:,yvals[0]],bg_cutoff=bg_cutoff,mx_peaks=num_fibers,skip_peaks=skip_peaks)
    xtrace[:len(peaks)-1,0] = peaks[:-1] ### have to cut off last point - trace wanders off ccd
    ytrace[:,0] = yvals[0]*np.ones(len(ytrace[:,0]))   
    ###From initial peak guesses fit for more precise location
    for i in range(num_fibers):
        y = yvals[0]
        if not np.isnan(xtrace[i,0]):
            xtrace[i,0], Itrace[i,0], sigtrace[i,0], powtrace[i,0], chi_vals[i,0] = fit_trace(xtrace[i,0],y,image)
        else:
            Itrace[i,0], sigtrace[i,0], powtrace[i,0], chi_vals[i,0] = np.nan, np.nan, np.nan, np.nan
    
    
    for i in range(1,len(yvals)):
        y = yvals[i]
        crsxn = image[:,y]
        ytrace[:,i] = y
        for j in range(num_fibers):
            if not np.isnan(xtrace[j,i-1]):
                #set boundaries
                lb = int(xtrace[j,i-1]-fiber_space/2)
                ub = int(xtrace[j,i-1]+fiber_space/2)
                #cutoff at edges
                if lb<0:
                    lb = 0
                if ub > xpix:
                    ub = xpix
                #set subregion
                xregion = crsxn[lb:ub]
                #only look at if max is reasonably high (don't try to fit background)
                if np.max(xregion)>bg_cutoff:
                    #estimate of trace position based on tallest peak
                    xtrace[j,i] = np.argmax(xregion)+lb
                    #quadratic fit for sub-pixel precision
#                    print xtrace[j,i]
                    xtrace[j,i], Itrace[j,i], sigtrace[j,i], powtrace[j,i], chi_vals[j,i] = fit_trace(xtrace[j,i],y,image)
                else:
                    xtrace[j,i], Itrace[j,i], sigtrace[j,i], sigtrace[j,i], chi_vals[j,i] = np.nan, np.nan, np.nan, np.nan, np.nan
            else:
                xtrace[j,i], Itrace[j,i], sigtrace[j,i], sigtrace[j,i], chi_vals[j,i] = np.nan, np.nan, np.nan, np.nan, np.nan
                
    Itrace /= np.median(Itrace) #Rescale intensities
    
    #Finally fit x vs. y on traces.  Start with quadratic for simple + close enough
    t_coeffs = np.zeros((3,num_fibers))
    i_coeffs = np.zeros((3,num_fibers))
    s_coeffs = np.zeros((3,num_fibers))
    p_coeffs = np.zeros((3,num_fibers))
    for i in range(num_fibers):
        #Given orientation makes more sense to swap x/y
        mask = ~np.isnan(xtrace[i,:])
        profile = np.ones((len(ytrace[i,:][mask]),3)) #Quadratic fit
        profile[:,1] = (ytrace[i,:][mask]-ypix/2)/ypix #scale data to get better fit
        profile[:,2] = ((ytrace[i,:][mask]-ypix/2)/ypix)**2
        noise = np.diag(chi_vals[i,:][mask])
        if len(xtrace[i,:][mask])>3:
            tmp_coeffs, junk = sf.chi_fit(xtrace[i,:][mask],profile,noise)
            tmp_coeffs2, junk = sf.chi_fit(Itrace[i,:][mask],profile,noise)
            tmp_coeffs3, junk = sf.chi_fit(sigtrace[i,:][mask],profile,noise)
            tmp_coeffs4, junk = sf.chi_fit(powtrace[i,:][mask],profile,noise)
        else:
            tmp_coeffs = np.nan*np.ones((3))
            tmp_coeffs2 = np.nan*np.ones((3))
            tmp_coeffs3 = np.nan*np.ones((3))
            tmp_coeffs4 = np.nan*np.ones((3))
        t_coeffs[0,i] = tmp_coeffs[0]
        t_coeffs[1,i] = tmp_coeffs[1]
        t_coeffs[2,i] = tmp_coeffs[2]
        i_coeffs[0,i] = tmp_coeffs2[0]
        i_coeffs[1,i] = tmp_coeffs2[1]
        i_coeffs[2,i] = tmp_coeffs2[2]
        s_coeffs[0,i] = tmp_coeffs3[0]
        s_coeffs[1,i] = tmp_coeffs3[1]
        s_coeffs[2,i] = tmp_coeffs3[2]      
        p_coeffs[0,i] = tmp_coeffs4[0]
        p_coeffs[1,i] = tmp_coeffs4[1]
        p_coeffs[2,i] = tmp_coeffs4[2]
        
    if return_all_coeffs:
        return t_coeffs, i_coeffs, s_coeffs, p_coeffs
    else:
        return t_coeffs