Exemplo n.º 1
0
def handle_diff(args):
    #handler for subcommand 'diff'
    if args.step_size <= 0:
        print "invalid step size!"
        sys.exit(1)
    #factor of 1e-12 to convert from pW to W
    pSat_control = 1e-12 * filehandler.IV_data_to_arr(args.iv0, "Bias_Power")
    pSat_heater = 1e-12 * filehandler.IV_data_to_arr(args.ivN, "Bias_Power")
    if pSat_control.shape != pSat_heater.shape:
        print "Dimensions of data in IV .out files don't match!"
        sys.exit(1)
    diff_power = pSat_control - pSat_heater
    diff_power_per_step = diff_power / float(args.step_size)
    #If an element in either pSat_control or pSat_heater is invalid (0),
    #corresponding entry in diff_power_per_step should also be invalid (0)
    for row in range(pSat_control.shape[0]):
        for col in range(pSat_control.shape[1]):
            if pSat_control[row][col] == 0 or pSat_heater[row][col] == 0:
                diff_power_per_step[row][col] = 0
    #Now write result to file
    if args.filename == None:
        filehandler.printout("Power_per_Dac(W/Dac)", diff_power_per_step,
                             sys.stdout)
    else:
        with open(args.filename, 'w') as f:
            filehandler.printout("Power_per_Dac(W/Dac)", diff_power_per_step,
                                 f)
Exemplo n.º 2
0
def handle_cal(args):
    #handler for subcommand "calibrate"
    #read in data; make sure it's a 3D array
    data = filehandler.get_mce_data(args.filename, row_col=True)
    if len(data.shape) != 3:
        print "Data is not three dimensional!"
        sys.exit(1)
    nrows = data.shape[0]
    ncols = data.shape[1]
    #Check which mode the program is in; that is, check whether user gave
    #STEPSIZE CALFILE DATAFILE, or IV0 IV_HEATER DATAFILE. Respond
    #appropriately.
    try:
        stepsize = float(args.arg1)
        calfile = args.arg2
        diff_biases = filehandler.IV_data_to_arr(calfile, "Power_per_Dac")
        if len(diff_biases) == 0:
            print calfile, "is invalid!"
            sys.exit(1)
        if data.shape[0:2] != diff_biases.shape:
            print "Data is not of same dimension as calibration file", calfile
            sys.exit(1)
        diff_biases *= stepsize
    except ValueError:
        #means 1st argument is not a number
        iv0 = args.arg1
        iv_heater = args.arg2
        pSat_control = 1e-12 * filehandler.IV_data_to_arr(iv0, "Bias_Power")
        if len(pSat_control) == 0:
            print iv0, "is invalid!"
            sys.exit(1)
        pSat_heater = 1e-12 * filehandler.IV_data_to_arr(
            iv_heater, "Bias_Power")
        if len(pSat_heater) == 0:
            print iv_heater, "is invalid!"
            sys.exit(1)
        if data.shape[0:2] != pSat_control.shape:
            print "Data is not of the same dimension as bias powers in", iv0
            sys.exit(1)
        if data.shape[0:2] != pSat_heater.shape:
            print "Data is not of the same dimension as bias powers"\
                " in",iv_heater
            sys.exit(1)
        diff_biases = pSat_control - pSat_heater
        #Traverse pSat_control and pSat_heater; if a 0 entry exists in either
        #file, set corresponding entry in diff_biases to 0 (invalid)
        for row in range(nrows):
            for col in range(ncols):
                if pSat_control[row][col] == 0 or pSat_heater[row][col] == 0:
                    diff_biases[row][col] = 0
    amplitudes = np.zeros((nrows, ncols))
    quartile_ranges = np.zeros((nrows, ncols))
    data_qualities = np.zeros((nrows, ncols))
    responsivities = np.zeros((nrows, ncols))
    periods = np.zeros((nrows, ncols))
    num_amplitudes = np.zeros((nrows, ncols))
    #printout("step_quality",data_qualities)
    print "User input read-in successful. Beginning computation."
    for col in range(ncols):
        print "On col", col
        for row in range(nrows):
            single_data = data[row][col]
            x = StepAnalyzer(single_data, verbose=False)
            amplitude = x.get_med_amplitude()
            quartile_range = x.get_quartile_amplitude()
            method_used = x.get_method_used()
            data_quality = 0
            if method_used=='ramping' or method_used=='noise' \
                    or method_used=='off':
                data_quality = 0
            elif method_used == 'std':
                data_quality = 1
            elif method_used == 'cross-corr' and x.crosscorr_error > 0:
                data_quality = 2
            elif method_used == 'cross-corr' and x.crosscorr_error == 0:
                data_quality = 3
            elif method_used == 'best':
                data_quality = 4
            data_qualities[row][col] = data_quality
            if amplitude != None:
                amplitudes[row][col] = amplitude
                if diff_biases[row][col] > 0:
                    responsivities[row][
                        col] = diff_biases[row][col] / amplitude
            if quartile_range != None and amplitude != None:
                quartile_ranges[row][col] = quartile_range / amplitude
            if (method_used=='best' or method_used=='cross-corr') and\
                    x.period!=None:
                periods[row][col] = x.period
                num_amplitudes[row][col] = len(x.amplitudes)
    to_amps = 1. / filtgain * 1. / 2**dac_bits * (1. / 50) / (
        1. / Rfb + 1. / 50) / (M_ratio * Rfb)
    to_dac = 1. / filtgain
    f = args.filename  #destination file
    filehandler.printout("step_quality", data_qualities, f)
    filehandler.printout("step_amplitude(A)", amplitudes * to_amps, f)
    filehandler.printout("amplitude_quartile_range(fraction)", quartile_ranges,
                         f)
    filehandler.printout("responsivity(W/Dac)", responsivities / to_dac, f)
    filehandler.printout("responsivity(W/A)", responsivities / to_amps, f)
    filehandler.printout("period(indices)", periods, f)
    filehandler.printout("num_amplitudes", num_amplitudes, f)
Exemplo n.º 3
0
    def __init__(self,
                 data,
                 filtgain=1218,
                 dac_bits=14,
                 M_ratio=8.5,
                 Rfb=7084,
                 sampling_freq=399.,
                 NFFT=256,
                 ivfile=None,
                 mapfile='mce_pod_map.txt',
                 use_mapfile=True,
                 col_pos_in_file=2,
                 row_pos_in_file=3,
                 bad_cols=None,
                 subtract_mean=False,
                 filter_params=None):
        """Initializes the object. datafile is the file containing the data,
        and will be read with mce_data. filtgain, dac_bits, M_ratio, and
        sampling_freq are parameters used to scale the power spectrum to units
        of A^2/Hz. If use_mapfile is True, the class will use mapfile to figure
        out which MCE coordinates have detectors, and ignore those that do
        not. col_pos_in_file and row_pos_in_file indicate the columns in
        mapfile in which MCE column and the first set of row data can be
        found. A second set of row data is assumed to exist in the column
        immediately to the right. Counting starts at 0, not 1. subtract_mean
        indicates whether the mean should be subtracted from the data. Note
        that this doesn't affect any PSD plot, and only affects timestream
        plots.  filter_params are the parameters of the integrated Butterworth
        filter, to be used to correct the power spectrum. It must be either
        None, in which case no correction is done, or a dictionary containing
        all of the following parameters: type, n_points, real_samp_freq, b11,
        b12, b21, b22, k1, k2. Refer to power_filter module for explanations of
        these parameters."""
        self.data = data
        self.nrows = self.data.shape[0]
        self.ncols = self.data.shape[1]
        if NFFT % 2 != 0:
            raise ValueError("NFFT must be divisible by 2!")
        self.NFFT = NFFT
        self.filter_params = filter_params
        self.all_psds = []
        self.dark_squids = []
        self.dark_detectors = []
        self.ramping_detectors = []
        self.pos_in_formatset = 0
        if self.filter_params != None:
            args = self.filter_params  #args to be passed to power_filter
            if 'type' not in args or 'real_samp_freq' not in args or \
                    'b11' not in args or 'b12' not in args or 'b21' \
                    not in args or 'b22' not in args or 'k1' \
                    not in args or 'k2' not in args:
                raise KeyError("filter parameters missing!")

            args['n_points'] = NFFT / 2 + 1
            args['eff_samp_freq'] = sampling_freq
            self.pow_response = power_filter.get_pow_response(**args)

        if subtract_mean == True:
            for i in range(0, self.nrows):
                for j in range(0, self.ncols):
                    self.data[i][j] -= np.mean(self.data[i][j])
        self.filtgain = filtgain
        self.dac_bits = dac_bits
        self.M_ratio = M_ratio
        self.Rfb = Rfb
        self.sampling_freq = sampling_freq
        if use_mapfile:
            #initialize all to False
            self.valid_coors = np.zeros((self.nrows, self.ncols)) > 1
            self.build_valid_coordinates_array(mapfile, col_pos_in_file,
                                               row_pos_in_file)
        else:
            #initialize all to True
            self.valid_coors = np.ones((self.nrows, self.ncols)) > 0
        self.is_col_invalid = np.zeros(
            self.ncols) > 1  #initialize all to False
        if bad_cols != None:
            for col in bad_cols:
                self.is_col_invalid[col] = True
        if ivfile != None:
            self.use_responsivities = True
            responsivities = filehandler.IV_data_to_arr(ivfile, "Responsivity")
            self.dark_squid_thres = math.sqrt(self.dark_squid_thres)
            self.dark_squid_thres *= 2**dac_bits * Rfb * M_ratio * 1e-16
            self.ramping_med_thres = math.sqrt(self.ramping_med_thres)
            self.ramping_med_thres *= 2**dac_bits * Rfb * M_ratio * 1e-16
        else:
            self.use_responsivities = False
        for i in range(0, self.nrows):  #initialize all PSDs
            row_psds = []
            for j in range(0, self.ncols):
                psd = self.get_psd(self.data[i][j])
                if ivfile != None:
                    watts_conver = 2**dac_bits * Rfb * M_ratio * responsivities[
                        i][j]
                    psd = np.sqrt(psd) * watts_conver
                row_psds.append(psd)
            self.all_psds.append(row_psds)
        self.all_psds = np.array(self.all_psds)

        self.dark_squids = self.get_dark_squids()
        self.dark_detectors = self.get_dark_detectors()
        self.ramping_detectors = self.get_ramping_detectors()
Exemplo n.º 4
0
                    '--bins',
                    type=int,
                    default=10,
                    help='the number of bins to use in the histograms.')
parser.add_argument('-p',
                    '--plot',
                    type=int,
                    default=0,
                    help='set to 1 to plot histograms')
parser.add_argument('--exclude-cols',type=int,nargs='*',metavar='COL',
                    help='The columns to exclude. If no argument is given,'\
                        ' excludes all columns from 4 to 15 inclusive.')
args = parser.parse_args()

#process arguments
data = fh.IV_data_to_arr(args.filename, args.search_string)
if data == None:
    print "Your search_string returned no data!"
    sys.exit(1)
if args.save != None:
    save_dir = args.save
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    results_path = os.path.join(save_dir, 'stats_results')
    f = open(results_path, 'w')
else:
    #print to standard out by default
    f = sys.stdout
bad_cols = []
if args.exclude_cols != None and len(args.exclude_cols) == 0:
    #user specified --exclude-cols without any arguments
Exemplo n.º 5
0
 def draw_ellipses(self,
                   raw_data_file,
                   search_string,
                   A_or_B,
                   major_axis=20,
                   minor_axis=6,
                   angle=None,
                   min_val=0,
                   max_val=1,
                   convert='nochange',
                   function=0,
                   title=None,
                   fontsize=12,
                   fontweight='normal',
                   cbar_label=None,
                   new_figure=True,
                   legend=None,
                   legend_name=None,
                   colorbar_lim=None,
                   max_color_range=True,
                   combine_colorbar=False,
                   fig_num=1):
     """Draws ellipses for probe A_or_B.  Uses data from raw_data_file,
     searched for using search_string. Ellipses parameters given by
     major_axis, minor_axis, and angle (counterclockwise from horizontal).
     If angle is None, uses self.angle_file; if that's also None, throws
     error.  Data restricted according to convert, min_val, and max_val.
     The title of the graph is title (with properties fontsize and
     fontweight), while the label of the color bar is cbar_label. new_figure
     indicates whether a new figure should be created for plotting. A legend
     is made according to dictionary legend, with keys being names and
     values representing colors.  colorbar_lim sets range of numbers
     represented by colors.  If max_color_range is True, color bar
     represents entire possible range of colors; else, it represents range
     of colors in the data.  combine_colorbar is only relevant if multiple
     sets of data are being plotted on the same figure. In that case, 1
     colorbar is drawn if it is True, and a colorbar is drawn for each set
     of data if it is False. Returns None."""
     if angle == None and self.angle_file == None:
         raise ValueError("No angle or angle file specified!")
     if new_figure == False and len(plt.get_fignums()) == 0:
         print "You are stupid.  Here is a new figure anyways."
         print "new_figure is set to True."
         new_figure = True
     if cbar_label == None and combine_colorbar == False:
         cbar_label = search_string
     elif cbar_label == None and combine_colorbar == True:
         #we can't make a reasonable colorbar label, so don't try
         cbar_label = ""
     data_array = filehandler.IV_data_to_arr(raw_data_file, search_string)
     if len(data_array) == 0:
         print "Yo homeboy! Your search_string returned no valid data!"
         print "You are so dumb. You are really dumb. For real."
         sys.exit(1)
     if A_or_B == 'A':
         try:
             formatted_data = filehandler.data_to_pod_feed_fmt(
                 data_array,
                 col_location=2,
                 row_location=3,
                 map_file=self.map_file)
         except IndexError:
             print "Your search_string did not return enough valid data!"
             sys.exit(1)
     elif A_or_B == 'B':
         try:
             formatted_data = filehandler.data_to_pod_feed_fmt(
                 data_array,
                 col_location=2,
                 row_location=4,
                 map_file=self.map_file)
         except IndexError:
             print "Your search_string did not return enough valid data!"
             sys.exit(1)
     else:
         raise ValueError("Probe should be either A or B!")
     modified_data = self.modify_data(formatted_data, min_val, max_val,
                                      convert, function)
     #        plt.figure(fig_num)
     if new_figure:
         plt.figure()
         plt.gcf().graph_title = ''
     ec = self.TransDisplay(modified_data, A_or_B, major_axis, minor_axis,
                            angle, cbar_label, colorbar_lim,
                            max_color_range, combine_colorbar)
     if legend != None:
         self.set_legend(legend, ec, legend_name)
     self.set_title(raw_data_file, search_string, A_or_B, title, new_figure)
     drawer.set_size_weight(fontsize, fontweight)