示例#1
0
def get_data(name, plot=0):
    analysis_mgr = jspec.dssd_analysis_manager(name, '../../storage/',
                                               (32, 32), [1, 1, 1])
    data = analysis_mgr.load_dill('dl_estimates')

    num_sources = len(data)
    num_dets = len(data[0][0])

    ret = meas.empty((num_sources, num_dets))

    for i in range(num_sources):
        for d in range(num_dets):
            tmp = data[i][0][d]
            ret[i, d] = meas.meas(np.nanmean(tmp.x), np.nanstd(tmp.x))

    if plot:
        f, axarr = plt.subplots(num_sources, num_dets, squeeze=0)

        for d in range(num_dets):
            for i in range(num_sources):
                tmp = data[i][0][d]
                print(d, i)
                print(ret[i, d])
                axarr[i, d].errorbar(range(32), tmp.x, tmp.dx, ls='none')
                axarr[i, d].axhline(ret[i, d].x, c='r')
        plt.show()
        plt.close('all')

    return ret
示例#2
0
    def calibrate_pixels( self, peaks_to_use, actual_energies ) :

        mu_values = self.read_params( 'mu' )

        flattened_actual_energies = np.array( [ a for b in range( len( actual_energies ) )
                                                for a in actual_energies[b] ] )
        
        total_num_peaks = len( flattened_actual_energies ) 
        
        # timer = jutils.time_estimator( self.num_dets * self.xdim * self.ydim, 10 ) 

        calibrations = meas.zeros( ( 2, self.num_dets, self.xdim, self.ydim ) )
        calibrations[:] = meas.nan
        
        for det in range( self.num_dets ) :

            mu_flattened = meas.empty( ( total_num_peaks, self.xdim, self.ydim ) )

            k = 0 
            for i in range( self.num_groups ) :
                for j in peaks_to_use[i] :
                    # print( det ) 
                    # print( mu_values.shape ) 
                    mu_flattened[k] = mu_values[i][j][det]
                    k += 1

            for x in range( self.xdim ) :
                for y in range( self.ydim ) :

                    # timer.update()


                    # skip data with any nan-entries
                    if np.sum( np.isnan( mu_flattened[:,x,y].x ) ) > 0 :
                        continue

                    params_guess = [ 1.0, 0.0 ] 
                    
                    ret = scipy.optimize.leastsq( self._linear_resid,
                                                  params_guess,
                                                  args = ( flattened_actual_energies,
                                                           mu_flattened[:,x,y].x,
                                                           mu_flattened[:,x,y].dx ),
                                                  full_output = 1 )

                    params_result, cov, info, msg, status = ret
        
                    success = ( status >= 1 and status <= 4
                                and ( cov is not None ) )

#                    print( success )

                    if status :

                        chisqr = np.sum( info['fvec']**2 )
                        nfree = len( flattened_actual_energies ) - len( params_result ) 
                        redchisqr = chisqr / nfree
                        cov = cov
            
                        params_result_errors = np.sqrt( np.diag( cov ) * redchisqr )
                        # calibrations[ det, x, y ] = params_result[:] 

                        pvalue = 1 - chi2.cdf( chisqr, nfree )
                        
                        # print() 
                        # print( mu_flattened[:,x,y].x ) 
                        # print( params_result ) 
                        # print( params_result_errors )
                        # print( redchisqr )

                        # print( pvalue )
                        if pvalue > 0.05 :
                            calibrations[ :, det, x, y ] = meas.meas( params_result ,
                                                                   params_result_errors )

        with open( self.param_paths[ 'calibration' ], 'wb' ) as f :
            dill.dump( calibrations, f )
def independent_dl_regression(channels,
                              secants,
                              actual_energies,
                              show=0,
                              savename=None,
                              analysis_mgr=None,
                              dets=None,
                              fstrips=None,
                              dpi=50,
                              source_names=None):

    # dl_estimates = spec.dssd_data_container.empty( channels.num_data_per_group, channels.num_dets,
    # channels.dimx, channels.dimy )
    dl_estimates = [[[
        meas.empty(channels.dimx) for d in range(channels.num_dets)
    ] for j in range(channels.num_data_per_group[i])]
                    for i in range(channels.num_groups)]

    for i in range(channels.num_groups):
        for j in range(channels.num_data_per_group[i]):
            for d in range(channels.num_dets):
                dl_estimates[i][j][d][:] = meas.nan

    # handle each det separately.
    if dets is None:
        dets = range(channels.num_dets)

    if fstrips is None:
        fstrips = range(channels.dimx)

    for d in dets:
        for x in fstrips:

            print(d, x)

            # flattened_data_nan_counts = np.array( [ [ np.isnan( channels[i][j][d][x].x )
            #                                           for j in range( channels.num_data_per_group[i] ) ]
            #                                         for i in range( channels.num_groups ) ] )

            # flattened_secants = np.array( [ secants[i][d][x]
            #                                 for i in range( channels.num_groups ) ] )
            sec_mask = [
                np.sum(~np.isnan(secants[i][d][x])) < 3
                for i in range(channels.num_groups)
            ]
            # sec_mask = ~ np.isnan( secants[:,d,x] )

            print(sec_mask)
            if any(sec_mask):
                print('not enough data')
                continue

            # ndata = np.sum( ~( np.isnan( flattened_data )
            #                    | np.isnan( flattened_secants ) ) )

            # if ndata < 2 :
            #     print( 'not enough data' )
            #     continue

            params_guess = [2.0, -100.0
                            ] + [40 for i in range(channels.num_groups)]
            args = (channels, secants, actual_energies, d, x)
            ret = scipy.optimize.basinhopping(strip_objective,
                                              params_guess,
                                              minimizer_kwargs={'args': args},
                                              niter=int(1e2))

            # print( ret )
            #print( 'ndata: ', ndata )
            resid = compute_resid(ret.x, channels, secants, actual_energies, d,
                                  x)
            # print( resid )
            ndata = len(resid)
            print('ndata: ', ndata)

            params_result = ret.x
            a = params_result[0]
            chisqr = ret.fun
            nfree = ndata - len(params_result)
            redchisqr = chisqr / nfree
            print(chisqr)
            print(redchisqr)

            if ret.fun / ndata < 2.1 and a > 1.8 and a < 2.3:
                cov = ret.lowest_optimization_result.hess_inv

                print(params_result)

                params_result_errors = np.sqrt(np.diag(cov) * redchisqr)
                print(params_result_errors)

                for i in range(channels.num_groups):
                    for j in range(channels.num_data_per_group[i]):
                        dl_estimates[i][j][d][x] = meas.meas(
                            params_result[2 + i], params_result_errors[2 + i])

            else:
                print('fit failed')
                params_result = None
                params_result_errors = None

            if show or savename is not None:
                plot_strip(channels,
                           secants,
                           actual_energies,
                           params_result,
                           params_result_errors,
                           redchisqr,
                           d,
                           x,
                           show=show,
                           savename=savename,
                           analysis_mgr=analysis_mgr,
                           dpi=dpi,
                           source_names=source_names)

            print('\n\n\n')
    analysis_mgr.save_dill(dl_estimates, 'dl_estimates_indep')
示例#4
0
def get_secant_matrices( compute_source_sectheta = 0,
                         average_over_source = 0,
                         reset = 0 ):

    # data_path =  _current_abs_path + '../../../storage/secant_matrices/'

    # if average_over_source :
    #     data_path += 'average_over_source/'
    # else:
    #     data_path += 'regular/'
        
    # if not os.path.exists( data_path ) :
    #     os.makedirs( data_path )


    # # what to do if not rewriting all the files: read them from
    # # existing location 
        
    # if not reset:
    #     print( 'INFO: attempting to read secant matrices from disk...' )

    #     secant_matrices = {}

    #     if all( [ os.path.exists( data_path + key + z + '.bin' )
    #               for z in [ '_x', '_dx' ]
    #               for key in sources ] ) :

    #         for key in sources :
                
    #             xpath, dxpath = [ data_path + key + z + '.bin'
    #                               for z in [ '_x', '_dx' ] ]
    #             if os.path.exists( xpath ) and os.path.exists( dxpath ) :

    #                 secant_matrices[ key ] = meas.meas( np.fromfile( xpath ).reshape( 2, 32, 32 ),
    #                                                     np.fromfile( dxpath ).reshape( 2, 32, 32 ) )

    #         print( 'INFO: success.' )
    #         return secant_matrices

    #     else:
    #         print( 'INFO: not all matrices present, reconstructing...' )
    
    # else:
    #     print( 'INFO: reconstructing sectheta grid...' )


        
    secant_matrices_labels = [ sources, ['detector', 'source'] ]
    secant_matrices = dict( zip( sources,
                                 meas.empty( ( len(sources), 2, 32, 32 ) ) ) )

    
    # measurements 
    source_data = _get_source_data() 
    source_data = source_data.set_index( sources_index )
    _fill_redundant_source_data( source_data )

            
    # declare dataframe to store all coordinates, which are pd.Series of two 3-tuples, one for
    # value and one for delta.
    all_coords = pd.Series( index = all_objects_index )
    _populate_all_coords( all_coords, source_data ) 


    # get each array of values / uncertainties and add to the grid.
    _populate_sectheta_grid( secant_matrices, all_coords, source_data,
                                               average_over_source )

    for key, val in secant_matrices.items() :
        secant_matrices[key] = abs( val )


    # write the arrays to files, both bin and csv.
    
    # for key in sources :

    #     xpath, dxpath = [ [ data_path + key + z + suffix
    #                         for suffix in [ '.bin', '.csv' ] ]
    #                       for z in [ '_x', '_dx' ] ]

    #     secant_matrices[ key ].x.tofile( xpath[0] ) 
    #     secant_matrices[ key ].dx.tofile( dxpath[0] )
        
        
    #     np.savetxt( xpath[1], secant_matrices[ key ].x[0], delimiter = ',', fmt = '%4f' )

    #     if key == 'pu_238_angled' :
    #         np.savetxt( xpath[1].replace( key, key + '_source_sectheta' ),
    #                     secant_matrices[ key ].x[1],
    #                     delimiter = ',', fmt = '%4f' )

        
        
    return secant_matrices # , displacements, nhats
示例#5
0
# tmp = get_data( 'det3_moved', plot = 1 )
# data[ 'det3' ][ 'pu' ] = tmp[0][0]
# data[ 'det3' ][ 'cf' ] = tmp[1][0]

# tmp = get_data( 'full_bkgd_tot', plot = 1 )
# data[ 'det1' ][ 'gd' ] = tmp[0][0]
# data[ 'det1' ][ 'cm' ] = tmp[1][0]
# data[ 'det2' ][ 'gd' ] = tmp[0][1]
# data[ 'det2' ][ 'cm' ] = tmp[1][1]
# data[ 'det3' ][ 'gd' ] = tmp[0][2]
# data[ 'det3' ][ 'cm' ] = tmp[1][2]
# data[ 'det4' ][ 'gd' ] = tmp[0][3]
# data[ 'det4' ][ 'cm' ] = tmp[1][3]

data = meas.empty(12)
tmp = get_data('det1_moved', plot=1)
data[0] = tmp[0][0]
data[1] = tmp[1][0]

tmp = get_data('det3_moved', plot=1)
data[2] = tmp[0][0]
data[3] = tmp[1][0]

tmp = get_data('full_bkgd_tot', plot=1)
data[4] = tmp[0][0]
data[5] = tmp[1][0]
data[6] = tmp[0][1]
data[7] = tmp[1][1]
data[8] = tmp[0][2]
data[9] = tmp[1][2]
示例#6
0
data_cuts = [[], [], [], []]

if cut_data:
    remove_strips_from_dets(channels, fstrip_cuts, [])
    remove_data_from_dets(channels, data_cuts)

# INITIAL FIT PARAMETERS

# source_deadlayer_guesses = [ [ 6., 6.], [3.,3.], [15.,15.,15.,15.] ]
source_stopping_power_interps = [None] * 3
det_deadlayer_guess = 100.0
calibration_coefs_guess = [2.0, 0.0]
source_deadlayer_guesses = [[25.0], [25.0]]

ret = meas.empty((2, 2, 4))

for d in range(4):
    for k in range(2):

        # det_sectheta_tmp = [ det_sectheta[ d ] ]
        # source_sectheta_tmp = [ source_sectheta[ d ] ]
        # channels_tmp = [ channels[ 0 ][ d ] ]

        det_sectheta_tmp = [[
            det_sectheta[i][k][d] for i in range(num_sources)
        ]]

        source_sectheta_tmp = det_sectheta_tmp

        channels_tmp = [[[