예제 #1
0
def get_data(name, plot=0):
    analysis_mgr = jspec.dssd_analysis_manager(name, '../../storage/',
                                               (32, 32), [1, 1, 1])
    data = analysis_mgr.load_dill('dl_estimates')

    num_sources = len(data)
    num_dets = len(data[0][0])

    ret = meas.empty((num_sources, num_dets))

    for i in range(num_sources):
        for d in range(num_dets):
            tmp = data[i][0][d]
            ret[i, d] = meas.meas(np.nanmean(tmp.x), np.nanstd(tmp.x))

    if plot:
        f, axarr = plt.subplots(num_sources, num_dets, squeeze=0)

        for d in range(num_dets):
            for i in range(num_sources):
                tmp = data[i][0][d]
                print(d, i)
                print(ret[i, d])
                axarr[i, d].errorbar(range(32), tmp.x, tmp.dx, ls='none')
                axarr[i, d].axhline(ret[i, d].x, c='r')
        plt.show()
        plt.close('all')

    return ret
예제 #2
0
def find_alpha_peak(alpha_spectrum,
                    peaknum=0,
                    num_iterations=500,
                    plot=0,
                    max_failed_samples=500):

    # print( 'alpha_spectrum params:' + str( alpha_spectrum._construct_params_array() ) )

    single_peak_spectrum = alpha_spectrum.subpeak(peaknum)

    mean_params = np.copy(single_peak_spectrum._construct_params_array())

    peakpos_arr = np.empty(num_iterations)

    failed_samples = 0

    for i in np.arange(num_iterations):

        status = 0

        while not status:

            sampled_params = np.random.multivariate_normal(
                mean_params, single_peak_spectrum.cov)

            sampled_params[0] = 1

            single_peak_spectrum.set_params_from_params_array(sampled_params)

            if single_peak_spectrum.check_valid_fit_params():
                break

            elif failed_samples >= max_failed_samples:
                return None

            failed_samples += 1

        # now we have valid params. compute the min of this function.

        # print( 'mean params: ' + str( mean_params ) )
        # print( 'sampled_params: ' + str( sampled_params )  )

        current_inverted_f = lambda x_: 0 - single_peak_spectrum.eval(x_)

        result = scipy.optimize.fmin(current_inverted_f,
                                     sampled_params[1] - 4,
                                     xtol=0.01,
                                     disp=0)

        peakpos_arr[i] = result

    m = np.mean(peakpos_arr)
    s = np.std(peakpos_arr)

    # print( m, s )

    if plot:
        ax = plt.axes()
        ax.set_title('Estimated Peak Channel Distribution', fontsize=20)
        ax.set_xlabel('Peak Channel', fontsize=18)
        ax.set_ylabel('Counts', fontsize=18)
        ax = plt.axes()
        ax.hist(peakpos_arr, bins=20)
        plt.show()

    return meas.meas(m, s)
예제 #3
0
 def get_tau2(self, j):
     return meas.meas(self.det_params[j][2], self.det_params_errors[j][2])
예제 #4
0
 def get_tau1(self, j):
     return meas.meas(self.det_params[j][1], self.det_params_errors[j][1])
예제 #5
0
 def get_eta(self, j):
     return meas.meas(self.det_params[j][0], self.det_params_errors[j][0])
예제 #6
0
 def get_sigma(self, j):
     return meas.meas(self.peak_params[j][2], self.peak_params_errors[j][2])
예제 #7
0
 def get_mu(self, j):
     return meas.meas(self.peak_params[j][1], self.peak_params_errors[j][1])
예제 #8
0
 def get_A(self, j):
     return meas.meas(self.peak_params[j][0], self.peak_params_errors[j][0])
예제 #9
0
    def calibrate_pixels( self, peaks_to_use, actual_energies ) :

        mu_values = self.read_params( 'mu' )

        flattened_actual_energies = np.array( [ a for b in range( len( actual_energies ) )
                                                for a in actual_energies[b] ] )
        
        total_num_peaks = len( flattened_actual_energies ) 
        
        # timer = jutils.time_estimator( self.num_dets * self.xdim * self.ydim, 10 ) 

        calibrations = meas.zeros( ( 2, self.num_dets, self.xdim, self.ydim ) )
        calibrations[:] = meas.nan
        
        for det in range( self.num_dets ) :

            mu_flattened = meas.empty( ( total_num_peaks, self.xdim, self.ydim ) )

            k = 0 
            for i in range( self.num_groups ) :
                for j in peaks_to_use[i] :
                    # print( det ) 
                    # print( mu_values.shape ) 
                    mu_flattened[k] = mu_values[i][j][det]
                    k += 1

            for x in range( self.xdim ) :
                for y in range( self.ydim ) :

                    # timer.update()


                    # skip data with any nan-entries
                    if np.sum( np.isnan( mu_flattened[:,x,y].x ) ) > 0 :
                        continue

                    params_guess = [ 1.0, 0.0 ] 
                    
                    ret = scipy.optimize.leastsq( self._linear_resid,
                                                  params_guess,
                                                  args = ( flattened_actual_energies,
                                                           mu_flattened[:,x,y].x,
                                                           mu_flattened[:,x,y].dx ),
                                                  full_output = 1 )

                    params_result, cov, info, msg, status = ret
        
                    success = ( status >= 1 and status <= 4
                                and ( cov is not None ) )

#                    print( success )

                    if status :

                        chisqr = np.sum( info['fvec']**2 )
                        nfree = len( flattened_actual_energies ) - len( params_result ) 
                        redchisqr = chisqr / nfree
                        cov = cov
            
                        params_result_errors = np.sqrt( np.diag( cov ) * redchisqr )
                        # calibrations[ det, x, y ] = params_result[:] 

                        pvalue = 1 - chi2.cdf( chisqr, nfree )
                        
                        # print() 
                        # print( mu_flattened[:,x,y].x ) 
                        # print( params_result ) 
                        # print( params_result_errors )
                        # print( redchisqr )

                        # print( pvalue )
                        if pvalue > 0.05 :
                            calibrations[ :, det, x, y ] = meas.meas( params_result ,
                                                                   params_result_errors )

        with open( self.param_paths[ 'calibration' ], 'wb' ) as f :
            dill.dump( calibrations, f )
def independent_dl_regression(channels,
                              secants,
                              actual_energies,
                              show=0,
                              savename=None,
                              analysis_mgr=None,
                              dets=None,
                              fstrips=None,
                              dpi=50,
                              source_names=None):

    # dl_estimates = spec.dssd_data_container.empty( channels.num_data_per_group, channels.num_dets,
    # channels.dimx, channels.dimy )
    dl_estimates = [[[
        meas.empty(channels.dimx) for d in range(channels.num_dets)
    ] for j in range(channels.num_data_per_group[i])]
                    for i in range(channels.num_groups)]

    for i in range(channels.num_groups):
        for j in range(channels.num_data_per_group[i]):
            for d in range(channels.num_dets):
                dl_estimates[i][j][d][:] = meas.nan

    # handle each det separately.
    if dets is None:
        dets = range(channels.num_dets)

    if fstrips is None:
        fstrips = range(channels.dimx)

    for d in dets:
        for x in fstrips:

            print(d, x)

            # flattened_data_nan_counts = np.array( [ [ np.isnan( channels[i][j][d][x].x )
            #                                           for j in range( channels.num_data_per_group[i] ) ]
            #                                         for i in range( channels.num_groups ) ] )

            # flattened_secants = np.array( [ secants[i][d][x]
            #                                 for i in range( channels.num_groups ) ] )
            sec_mask = [
                np.sum(~np.isnan(secants[i][d][x])) < 3
                for i in range(channels.num_groups)
            ]
            # sec_mask = ~ np.isnan( secants[:,d,x] )

            print(sec_mask)
            if any(sec_mask):
                print('not enough data')
                continue

            # ndata = np.sum( ~( np.isnan( flattened_data )
            #                    | np.isnan( flattened_secants ) ) )

            # if ndata < 2 :
            #     print( 'not enough data' )
            #     continue

            params_guess = [2.0, -100.0
                            ] + [40 for i in range(channels.num_groups)]
            args = (channels, secants, actual_energies, d, x)
            ret = scipy.optimize.basinhopping(strip_objective,
                                              params_guess,
                                              minimizer_kwargs={'args': args},
                                              niter=int(1e2))

            # print( ret )
            #print( 'ndata: ', ndata )
            resid = compute_resid(ret.x, channels, secants, actual_energies, d,
                                  x)
            # print( resid )
            ndata = len(resid)
            print('ndata: ', ndata)

            params_result = ret.x
            a = params_result[0]
            chisqr = ret.fun
            nfree = ndata - len(params_result)
            redchisqr = chisqr / nfree
            print(chisqr)
            print(redchisqr)

            if ret.fun / ndata < 2.1 and a > 1.8 and a < 2.3:
                cov = ret.lowest_optimization_result.hess_inv

                print(params_result)

                params_result_errors = np.sqrt(np.diag(cov) * redchisqr)
                print(params_result_errors)

                for i in range(channels.num_groups):
                    for j in range(channels.num_data_per_group[i]):
                        dl_estimates[i][j][d][x] = meas.meas(
                            params_result[2 + i], params_result_errors[2 + i])

            else:
                print('fit failed')
                params_result = None
                params_result_errors = None

            if show or savename is not None:
                plot_strip(channels,
                           secants,
                           actual_energies,
                           params_result,
                           params_result_errors,
                           redchisqr,
                           d,
                           x,
                           show=show,
                           savename=savename,
                           analysis_mgr=analysis_mgr,
                           dpi=dpi,
                           source_names=source_names)

            print('\n\n\n')
    analysis_mgr.save_dill(dl_estimates, 'dl_estimates_indep')
예제 #11
0
def _populate_sectheta_grid( secant_matrices, all_coords, source_data,
                             average_over_source = False ):
       
    det_coords = all_coords.loc['detector']

    pu_238_angled_normal = meas.meas.from_list(
        np.array( [ - ( meas.meas( pu_238_angled_data[ 'upper_height' ],
                                   _source_data_delta )
                        - meas.meas( pu_238_angled_data[ 'lower_height' ],
                                     _source_data_delta ) ),
                    meas.meas( 0, _source_data_delta ),
                    meas.meas(
                        source_data.loc[ 'pu_238_angled', 'diameter' ],
                        _source_data_delta ).mean() ] ) )

    # print( pu_238_angled_normal )
    
    pu_238_angled_normal *= _MM_PER_INCH

    # print( pu_238_angled_normal ) 
    
    sourcenum = -1

    for source in sources :

        sourcenum += 1
        
        # print( str(sourcenum) + ' / ' + str(len(sources)) + '...' )

        # extract coords and angels 
        source_coords = all_coords.loc[ source ]
        

        # rename matrices in order to enhance readability 
        det_sectheta_grid = secant_matrices[ source ][ 0 ]
        source_sectheta_grid = secant_matrices[ source ][ 1 ]

        first_pixel_coords = det_coords - source_coords

        # debug: replace the first pixel coords with mary's
        if USE_MARY_DISPLACEMENTS : 
            first_pixel_coords = mary_first_pixel_displacements[ source ] 

        print( source + ': ' + str( first_pixel_coords ) )
        print( 'mary: ' + str( mary_first_pixel_displacements[ source ]  ) )
        
        # keep shifting by 2 mm to get next coord 
        for i in range(32):
            for j in range(32):

                # this works since all the pixels are separated by 2 mm.
                #pixel_displacement = 2.0 * rotate_3d( 1, 6, np.array([ -j, i, 0 ] ), deg=1 )
                pixel_displacement = pu240 2.0 * np.array( [ -j, i, 0 ] ) 
                displacement = first_pixel_coords + pixel_displacement

                                                
                # print 'displacement: ' + str( displacement )
                
                
                # # get angle rel to detector pixel
                # print 'fprime tuple: ' + str( costheta_from_3d_fprime_tuple( displacement.x ) )
                # print 'displacement dx: '  + str( displacement.dx ) 
                costheta = displacement.apply_nd( costheta_from_3d_f,
                                                  costheta_from_3d_fprime_tuple )
                
                sectheta = 1 / costheta

                det_sectheta_grid[i][j] = sectheta

                if not average_over_source : 

                    if source != 'pu_238_angled' :
                        source_sectheta_grid[i,j] = sectheta

                    else:
                        # rotated_displacement = rotate_3d_meas( 1, -theta, displacement ) ) 
                        # source_costheta_grid[i][j] = costheta_from_3d( rotated_displacement )

                        # todo: proper error anaysis.
                        tmp = ( np.linalg.norm( pu_238_angled_normal.x ) *
                                np.linalg.norm( displacement.x ) )

                        tmp /= meas.dot( pu_238_angled_normal, displacement )

                        source_sectheta_grid[i][j] = tmp        

                else:

                    # todo: haven't figured out how to do this calculation yet.
                    if source == 'pu_238_angled' :
                        tmp = ( np.linalg.norm( pu_238_angled_normal.x ) *
                                np.linalg.norm( displacement.x ) )

                        tmp /= meas.dot( pu_238_angled_normal, displacement )

                        source_sectheta_grid[i][j] = tmp

                    else:
                        source_radius = source_data.loc[ source, 'source_diameter' ] / 2 
                        ave_sectheta = (
                            compute_average_sectheta_over_source( displacement.x,
                                                                  np.array( [0.0, 0.0, 1.0] ),
                                                                  source_radius ) )

                        source_sectheta_grid[i][j] = meas.meas( ave_sectheta, 0 ) 
예제 #12
0
def _populate_all_coords( all_coords, source_data ):

    # first handle the detector separately

    # option 1 : "ceramic L/R" to plate edge in the logbook means
    # measured from the left
    # results:

    det_coords = meas.meas.from_list( [ meas.meas( detector_data['x_offset'],
                                                   _source_data_delta ).mean(),
                                        meas.meas( detector_data['y_offset'],
                                                   _source_data_delta ).mean(),
                                        meas.meas( enclosure_data['total_z'],
                                                   _source_data_delta ).mean()
                                        - meas.meas( enclosure_data['top_offset'],
                                                   _source_data_delta ).mean()
                                        - meas.meas( enclosure_data['bottom_offset'],
                                                   _source_data_delta ).mean() ] )


    x_measurement_inverted = 0
    y_measurement_inverted = 0

    if x_measurement_inverted :
        total_x = ( np.mean( source_data.loc[ 'pu_240', 'left' ] )
                    + np.mean( source_data.loc[ 'pu_240', 'diameter' ] )
                    + np.mean( source_data.loc[ 'pu_240', 'right' ] ) )

        det_coords[0] = total_x - det_coords[0]

        
    if y_measurement_inverted :
        total_y = ( np.mean( source_data.loc[ 'pu_240', 'bottom' ] )
                    + np.mean( source_data.loc[ 'pu_240', 'diameter' ] )
                    + np.mean( source_data.loc[ 'pu_240', 'top' ] ) )
        
        det_coords[1] = total_y - det_coords[1]


        
    det_coords *= _MM_PER_INCH


    if x_measurement_inverted :
        shift = - ( ceramic_data[ 'total_x' ]
                    - detector_data[ 'total_width' ] ) / 2 
        det_coords += np.array( [ shift, 0, 0 ] )

    else:
        shift = ( ceramic_data[ 'total_x' ]
                  + detector_data[ 'total_width' ] ) / 2 
        det_coords += np.array( [ shift, 0, 0 ] )

        
    # if y_measurement_inverted :
    #     shift = - detector_data[ 'total_width' ]
    #     det_coords += np.array( [0, shift, 0 ] )

    # else:
    #     shift = ( ceramic_data[ 'total_y' ]
    #               - detector_data[ 'total_width' ] )
    #     det_coords += np.array( [0, shift, 0 ] )


    
    # the 1 mm additions center the pixel. 
    det_coords += np.array( [ -1, 1, 0 ] ) 
        
            
    all_coords.loc['detector'] = det_coords

    

        
    # now populate all the source indices
    for source in sources_index:
        
        # first get x and y from the diameters / distance to edges.
        #  configurations = [ ['left','right','diameter'], ['top','bottom','diameter'] ]
        # for i in range(len(configurations)):
        
        # start off only using the left and bottom  measurements. TODO: use value of total_x
        x, y = [ meas.meas( source_data.loc[ source, col ],
                            _source_data_delta ).mean()
                 + meas.meas( source_data.loc[ source, 'diameter' ],
                              _source_data_delta ).mean() / 2 
                 for col in [ 'left', 'bottom' ]  ]

        # for the top measurement, reference to the bottom of the enclosure.

        if source != 'pu_238_angled' : 
            z = meas.sum( [ meas.meas( source_data.loc[ source, 'height' ],
                                       _source_data_delta ).mean(),
                            meas.meas( source_data.loc[ source, 'wafer' ],
                                       _source_data_delta ).mean() ] )

        else:
            z = ( meas.meas( pu_238_angled_data.loc[ 'upper_height' ],
                             _source_data_delta ) +
                  meas.meas( pu_238_angled_data.loc[ 'lower_height' ],
                             _source_data_delta ) ) / 2
        
            
        xyz = meas.meas.from_list( [ x, y, z ] )
        xyz *= _MM_PER_INCH
        all_coords.loc[source] = xyz
예제 #13
0

_current_abs_path = os.path.dirname( __file__ ) + '/'





_CM_PER_INCH = 2.54
_MM_PER_INCH = 25.4


USE_MARY_DISPLACEMENTS = 1

# these are the first pixel displacements as computed by Mary
mary_first_pixel_displacements = { 'pu_240' : meas.meas( [87.10, -9.31, 58.35], np.zeros(3) ),
                                   'cf_249' : meas.meas( [85.35, -46.06, 57.74], np.zeros(3) ),
                                   'pu_238_centered': meas.meas( [32.95, -31.45, 57.88], np.zeros(3) ),
                                   'pu_238_flat': meas.meas( [32.95, -31.45, 57.88], np.zeros(3) ),
                                   'pu_238_angled': meas.meas( [32.95, -31.45, 58.72], np.zeros(3) ),
                                   'pu_238_moved' : meas.meas( [-44.59, -28.58, 57.88], np.zeros(3) ) } 


# mary_first_pixel_displacements = { 'pu_240' : meas.meas( [87.10, -9.31, 58.35], np.zeros(3) ),
#                                    'cf_249' : meas.meas( [85.35, -46.06, 57.74], np.zeros(3) ),
#                                    'pu_238_centered': meas.meas( [32.95, -31.45, 57.88], np.zeros(3) ),
#                                    'pu_238_flat': meas.meas( [32.95, -31.45, 57.88], np.zeros(3) ),
#                                    'pu_238_angled': meas.meas( [32.95, -31.45, 58.72], np.zeros(3) ),
#                                    'pu_238_moved' : meas.meas( [-44.59, -28.58, 57.88], np.zeros(3) ) } 

예제 #14
0
        source_sectheta_tmp = det_sectheta_tmp

        channels_tmp = [[[
            channels[i][j][d] for j in range(num_peaks_per_source[i])
        ] for i in range(num_sources)]]

        savepath = (analysis_mgr.storage_path + '/dl_regression/%d/%d/' %
                    (analysis_mgr.detidx_to_detnum(d), k))

        dl_result = dl_estimator.estimate_deadlayers(
            model_params,
            channels_tmp,
            actual_energies,
            det_sectheta_tmp,
            source_sectheta_tmp,
            source_stopping_power_interps,
            source_deadlayer_guesses,
            det_stopping_power_interp,
            det_deadlayer_guess,
            calibration_coefs_guess,
            names=data_names,
            strip_coords=strip_coords,
            savepath=savepath)
        tmp1 = dl_result.params['source_constant_0_0']
        tmp2 = dl_result.params['source_constant_1_0']

        ret[:, k, d] = meas.meas([tmp1.value, tmp2.value],
                                 [tmp1.stderr, tmp2.stderr])

analysis_mgr.save_dill(ret, 'agg_dl_sums')