def pulsePlots():

    print "\nStart producing PULSE plots, batches:", md.batchNumbers, "\n"

    for batchNumber in md.batchNumbers:

        p_main.defineNameOfProperties()

        runNumbers = md.getAllRunNumbers(batchNumber)

        numpy_arrays = [
            np.empty(0, dtype=dm.getDTYPE(batchNumber))
            for _ in range(len(p_main.var_names))
        ]

        for runNumber in runNumbers:

            md.defineRunInfo(md.getRowForRunNumber(runNumber))

            if runNumber not in md.getRunsWithSensor():
                continue

            for index in range(0, len(p_main.var_names)):
                numpy_arrays[index] = np.concatenate(
                    (numpy_arrays[index],
                     dm.exportImportROOTData("pulse",
                                             p_main.var_names[index])),
                    axis=0)

        if len(numpy_arrays[0]) != 0:
            producePulsePlots(numpy_arrays)

    print "Done with producing PULSE plots.\n"
def pulseAnalysis():

    defineNameOfProperties()
    startTime = dm.getTime()
    
    print "\nStart PULSE analysis, batches:", md.batchNumbers
    
    for batchNumber in md.batchNumbers:
        
        runNumbers = md.getAllRunNumbers(batchNumber)
        
        startTimeBatch = dm.getTime()
    
        print "Batch:", batchNumber, len(runNumbers), "run files.\n"
      
        for runNumber in runNumbers:
            
            md.defineRunInfo(md.getRowForRunNumber(runNumber))
            
            if not dm.checkIfFileAvailable("pulse"):
                continue
            
            pulseAnalysisPerRun()
            
        print "Done with batch", batchNumber, "Time analysing: "+str(dm.getTime()-startTimeBatch)+"\n"
    
    print "Done with PULSE analysis. Time analysing: "+str(dm.getTime()-startTime)+"\n"
def timingPlots():

    print "\nStart producing TIMING RESOLUTION plots, batches:", md.batchNumbers, "\n"

    global var_names
    
    for batchNumber in md.batchNumbers:

        runNumbers = md.getAllRunNumbers(batchNumber)
        
        # Create numpy arrays for linear time difference (one element per "channel")
        numpy_arrays = [np.empty(0, dtype = dm.getDTYPE(batchNumber)) for _ in range(2)]

        # Create numpy arrays for system of equations (three elements per "channel")
        numpy_arrays.append(np.empty(0, dtype = dm.getDTYPESysEq()))
        numpy_arrays.append(np.empty(0, dtype = dm.getDTYPESysEq()))

        var_names = ["normal_peak", "normal_cfd", "system_peak", "system_cfd"]
       
        for runNumber in runNumbers:
        
            md.defineRunInfo(md.getRowForRunNumber(runNumber))
            
            if not dm.checkIfROOTDataFileExists("timing", "normal_peak"):
                t_calc.createTimingFiles(batchNumber)
            
            # Skip runs which are not in synch
            if runNumber not in md.getRunsWithSensor() or runNumber in [3697, 3698, 3701]:
                continue
        
            for index in range(0, len(var_names)):
            
                # omit batch 60X for solving system of equations
                if var_names[index].find("system") != -1 and md.getBatchNumber()/100 == 6:
                    continue
                
                else:
                    numpy_arrays[index] = np.concatenate((numpy_arrays[index], dm.exportImportROOTData("timing", var_names[index])), axis = 0)

        if numpy_arrays[0].size != 0:
            
            for index in range(0, len(var_names)):
            
                # omit batch 60X for solving system of equations
                if var_names[index].find("system") != -1 and md.getBatchNumber()/100 == 6:
                    continue
                
                else:
                    produceTimingDistributionPlots(numpy_arrays[index], var_names[index])


    print "\nDone with producing TIMING RESOLUTION plots.\n"
Example #4
0
def createTimingFiles(batchNumber):

    runNumbers = md.getAllRunNumbers(batchNumber)

    startTimeBatch = dm.getTime()

    print "\nBatch:", batchNumber, len(runNumbers), "run files.\n"
    
    for runNumber in runNumbers:
        
        md.defineRunInfo(md.getRowForRunNumber(runNumber))
        
        if not dm.checkIfFileAvailable("timing"):
            continue
    
        print "Run", runNumber, "\n"
        
        # Import files per run
        peak_time = dm.exportImportROOTData("pulse", "peak_time")
        cfd = dm.exportImportROOTData("pulse", "cfd")
        
        # Perform linear calculations
        time_diff_peak = getTimeDifferencePerRun(peak_time)
        time_diff_cfd = getTimeDifferencePerRun(cfd)
        
        # Export per run number linear
        dm.exportImportROOTData("timing", "normal_peak", time_diff_peak)
        dm.exportImportROOTData("timing", "normal_cfd", time_diff_cfd)
        
        if batchNumber/100 != 6:
            # Perform calculations sys eq
            time_diff_peak_sys_eq = getTimeDifferencePerRunSysEq(peak_time)
            time_diff_cfd_sys_eq = getTimeDifferencePerRunSysEq(cfd)

            # Export per run number sys eq
            dm.exportImportROOTData("timing", "system_peak", time_diff_peak_sys_eq)
            dm.exportImportROOTData("timing", "system_cfd", time_diff_cfd_sys_eq)

        print "Done with run", runNumber, "\n"
    
    print "Done with batch", batchNumber, "Time analysing: "+str(dm.getTime()-startTimeBatch)+"\n"
Example #5
0
def importResultsValues(sensor_data, category_subcategory):

    global oneSensorInLegend

    if category_subcategory.endswith('gain'):
        category_subcategory = category_subcategory[:-5]
        gain_category = True

    else:
        gain_category = False

    # here are imported all files, that is for each pad, temperature and bias voltage
    for batchNumber in md.getAllBatchNumbers():
        for chan in md.getAllChannelsForSensor(batchNumber, processed_sensor):

            if batchNumber not in md.getAllBatchNumberForSensor(
                    processed_sensor) or omitBadData(batchNumber,
                                                     category_subcategory):
                continue

            md.defineRunInfo(
                md.getRowForRunNumber(md.getAllRunNumbers(batchNumber)[0]))

            md.setChannelName(chan)

            if md.getDUTPos() in ["3_0", "3_1", "3_3", "8_1", "7_2", "7_3"]:
                continue

            # Define the name for the histogram, depending on type
            if category_subcategory.find(
                    "pulse_amplitude") == -1 and category_subcategory.find(
                        "charge") == -1:

                group = "timing"
                chan2 = ""
                parameter_number = 2

                if category_subcategory.find("system") != -1:
                    if md.chan_name not in [
                            "chan0", "chan1", "chan2", "chan3"
                    ]:
                        continue

                    category = "system"
                    chan2 = "chan" + str((int(md.chan_name[-1]) + 1) % 4)

                else:
                    category = "normal"

                if category_subcategory.endswith('cfd'):

                    subcategory = "cfd"

                else:

                    subcategory = "peak"

                if category_subcategory.find(
                        "rise_time") != -1 or category_subcategory.find(
                            "noise") != -1:
                    group = "pulse"
                    category = category_subcategory
                    subcategory = ""
                    chan2 = ""

                # Here, import the histogram which contain the results
                histogram = dm.exportImportROOTHistogram(
                    group, category, subcategory, chan2)

                if histogram:
                    fit_function = histogram.GetFunction("gaus")

                    if category_subcategory.find(
                            "noise") != -1 or category_subcategory.find(
                                "rise_time") != -1:
                        parameter_number = 1

                    results = [
                        fit_function.GetParameter(parameter_number),
                        fit_function.GetParError(parameter_number)
                    ]

                else:
                    continue

            # pulse and gain
            else:

                histogram = dm.exportImportROOTHistogram(
                    "pulse", category_subcategory)
                if histogram:
                    th_name = "_" + str(
                        md.getBatchNumber()) + "_" + md.chan_name
                    function_name = "Fitfcn_" + category_subcategory + th_name
                    fit_function = histogram.GetFunction(function_name)
                    try:
                        fit_function.GetTitle()
                    except:
                        continue
                    results = [
                        fit_function.GetParameter(1),
                        fit_function.GetParError(1)
                    ]
                else:
                    continue

            if category_subcategory.find(
                    "normal") != -1 or category_subcategory.find(
                        "system") != -1:

                results[0] = np.sqrt(
                    np.power(results[0], 2) - np.power(md.getSigmaSiPM(), 2))

            value_error = [results[0], results[1]]
            voltage = md.getBiasVoltage()

            # For the timing resolution vs gain, replace the bias voltage with gain
            if (category_subcategory.find("system") != -1
                    or category_subcategory.find("normal") != -1
                ) and gain_category:

                histogram = dm.exportImportROOTHistogram("pulse", "charge")
                th_name = "_" + str(md.getBatchNumber()) + "_" + md.chan_name
                function_name = "Fitfcn_" + "charge" + th_name
                fit_function = histogram.GetFunction(function_name)

                gain = fit_function.GetParameter(
                    1) / md.getChargeWithoutGainLayer()

                voltage = int(
                    gain
                )  # this takes the even number of gain (to select better values)

            temperature = str(md.getTemperature())

            DUT_pos = md.getDUTPos()

            omitRun = False

            # Among the all batches, choose one with smallest error.
            for index in range(0, len(sensor_data[temperature][DUT_pos])):
                sensor_results = sensor_data[temperature][DUT_pos][index]

                # Check if there is an earlier filled bias voltage, otherwise fill
                if voltage == sensor_results[0]:

                    omitRun = True

                    # For the same voltage, choose the one with smallest error.
                    if value_error[1] < sensor_results[1][1]:

                        sensor_data[temperature][DUT_pos][index] = [
                            voltage, value_error
                        ]

            if not omitRun:
                sensor_data[temperature][DUT_pos].append(
                    [voltage, value_error])

    oneSensorInLegend = True
Example #6
0
def produceResults():

    global canvas
    global processed_sensor
    global bias_voltage_max

    bias_voltage_max = 350

    categories = [
        "noise", "pulse_amplitude", "charge", "rise_time", "normal_peak",
        "system_peak", "normal_cfd", "system_cfd", "normal_peak_gain",
        "system_peak_gain", "normal_cfd_gain", "system_cfd_gain",
        "normal_peak_gain_zoom", "system_peak_gain_zoom",
        "normal_cfd_gain_zoom", "system_cfd_gain_zoom"
    ]

    canvas = ROOT.TCanvas("Results", "Results")

    sensorNames = md.getAvailableSensors()
    sensorNames.remove("SiPM-AFP")

    sensorNames.sort()

    if md.sensor != "":
        sensorNames = [md.sensor]

    resultsDict = dict()
    resultGraphs = dict()
    legend = dict()

    print "\nStart RESULTS"

    zoom = False

    # loop through each category
    for category in categories:

        print "\n", category, "\n"

        if category.endswith("zoom"):
            zoom = True
            category = category[:-5]

        category_graph = ROOT.TMultiGraph()
        legend_graph = ROOT.TLegend(0.7, 0.9, 0.9, 0.6)

        graph = dict()

        doOnce = True

        for processed_sensor in sensorNames:

            print processed_sensor

            md.defineRunInfo(
                md.getRowForRunNumber(
                    md.getRunsWithSensor(processed_sensor)[0]))
            md.setChannelName(md.getChannelNameForSensor(processed_sensor))

            graph[processed_sensor] = dict()
            sensor_data = dict()

            # Create TGraphErrors for each sensor, temperature and position (in the case of array pads)
            for temperature in md.getAvailableTemperatures():

                graph[processed_sensor][temperature] = dict()
                sensor_data[temperature] = dict()

                if processed_sensor == "W4-S204_6e14" and doOnce:
                    graph["W4-S204_6e14"]["22"] = dict()
                    graph["W4-S204_6e14"]["22"]["7_0"] = ROOT.TGraphErrors()
                    r_plot.setMarkerType(graph["W4-S204_6e14"]["22"]["7_0"],
                                         DUT_pos, temperature)
                    doOnce = False

                for DUT_pos in availableDUTPositions(processed_sensor):
                    graph[processed_sensor][temperature][
                        DUT_pos] = ROOT.TGraphErrors()
                    sensor_data[temperature][DUT_pos] = []

                    # Change each marker type and color
                    r_plot.setMarkerType(
                        graph[processed_sensor][temperature][DUT_pos], DUT_pos,
                        temperature)

            importResultsValues(sensor_data, category)

            r_plot.addValuesToGraph(
                [sensor_data, category, legend_graph, graph, category_graph])

        r_plot.drawAndExportResults(category, category_graph, legend_graph,
                                    zoom)
def printWaveform(runNumber, sensor, event = 0):
    
    # Define global variables
    md.defineRunInfo(md.getRowForRunNumber(runNumber))
    dm.defineDataFolderPath()
    chan = md.getChannelNameForSensor(sensor)
    md.setChannelName(chan)
    
    # Create TMultigraph and define underlying graphs
    multi_graph = ROOT.TMultiGraph()
    canvas = ROOT.TCanvas("Waveforms","Waveforms")
    legend = ROOT.TLegend(0.65, 0.9, 0.9, 0.6)

    # Import the event from the oscilloscope file
    data_import = dm.getOscilloscopeData(event, event+1)
    data = -data_import[chan][0]
    
    # Set find noise and pedestal and define the threshold of finding signals
    timeScope = 0.1
    N = 4.27
    noise, pedestal = p_calc.calculateNoiseAndPedestal(data)
    threshold = N * noise + pedestal

    # Define point difference for second degree fit and maximum signal limit (saturated signals)
    signal_limit_DUT = 0.3547959
    point_difference = 2
    
    # Calculate pulse characteristics (based on the methods from pulse_calculations.py)
    peak_value, peak_time, poly_fit = p_calc.calculatePulseAmplitude(data, pedestal, signal_limit_DUT, True)
    rise_time, cfd, linear_fit, linear_fit_indices = p_calc.calculateRiseTime(data, pedestal, True)
    charge = p_calc.calculateCharge(data, pedestal)
    point_count = p_calc.calculatePoints(data, threshold)
    max_sample = np.amax(data) - pedestal


    # Define ROOT objects for each type of graph
    graph_waveform = ROOT.TGraph(len(data))
    graph_threshold = ROOT.TGraph(2)
    graph_pulse_amplitude = ROOT.TGraph(2)
    graph_max_sample = ROOT.TGraph(2)
    graph_cfd = ROOT.TGraph(2)
    graph_peak_time = ROOT.TGraph(2)
    graph_10 = ROOT.TGraph(2)
    graph_90 = ROOT.TGraph(2)
    graph_pedestal = ROOT.TGraph(2)
    graph_noise = ROOT.TGraph(2)
    graph_linear_fit = ROOT.TGraph(len(linear_fit_indices))
    graph_2nd_deg_fit = ROOT.TGraph(point_difference*2+1)

   
    # Find points to draw the shade showing the charge
    pedestal_points = p_calc.getConsecutiveSeries(data, np.argwhere(data > pedestal).flatten())
    n = len(pedestal_points)+1
    charge_fill = ROOT.TGraph(2*n)
    fillOnce = True

    # Draw the waveform and the charge fill
    for index in range(0, len(data)):
        
        graph_waveform.SetPoint(index, index*0.1, data[index]*1000)

        if index > pedestal_points[0]-1 and fillOnce:

            for i in range(0, n):

                charge_fill.SetPoint(i,   0.1 * (i+index),     data[i+index] * 1000)
                charge_fill.SetPoint(n+i, 0.1 * (n-i+index-1), pedestal * 1000)

            fillOnce = False


    # Draw the second degree fit
    first_index = np.argmax(data) - point_difference
    last_index = np.argmax(data) + point_difference
    poly_fit_range = np.arange(first_index, last_index, 0.1)

    i = 0
    for index in range(0, len(poly_fit_range)):
        time = poly_fit_range[index]*timeScope
        value = poly_fit[0] * np.power(time, 2) + poly_fit[1] * time + poly_fit[2] + pedestal
        graph_2nd_deg_fit.SetPoint(i, time, value*1000)
        i += 1
    
    # Draw the linear fit
    i = 0
    for index in range(0, len(linear_fit_indices)):
        time = linear_fit_indices[index]*timeScope
        value = linear_fit[0]*time + linear_fit[1]
        graph_linear_fit.SetPoint(i, time, value*1000)
        i+=1

    # Draw lines (by setting two points at the beginning and the end)
    graph_threshold.SetPoint(0,0, threshold*1000)
    graph_threshold.SetPoint(1,1002, threshold*1000)

    graph_noise.SetPoint(0,0, (noise+pedestal)*1000)
    graph_noise.SetPoint(1,1002, (noise+pedestal)*1000)

    graph_pedestal.SetPoint(0,0, pedestal*1000)
    graph_pedestal.SetPoint(1,1002, pedestal*1000)
    
    graph_pulse_amplitude.SetPoint(0,0, peak_value*1000)
    graph_pulse_amplitude.SetPoint(1,1002, peak_value*1000)
    
    graph_max_sample.SetPoint(0,0, max_sample*1000)
    graph_max_sample.SetPoint(1,1002, max_sample*1000)
    
    graph_cfd.SetPoint(0, cfd, -30)
    graph_cfd.SetPoint(1, cfd, 500)

    graph_peak_time.SetPoint(0, peak_time, -30)
    graph_peak_time.SetPoint(1, peak_time, 500)

    graph_10.SetPoint(0,0, peak_value*0.1*1000)
    graph_10.SetPoint(1,1002, peak_value*0.1*1000)
    graph_90.SetPoint(0,0, peak_value*0.9*1000)
    graph_90.SetPoint(1,1002, peak_value*0.9*1000)


    # Define line and marker attributes
    graph_waveform.SetLineWidth(2)
    graph_waveform.SetMarkerStyle(6)
    graph_waveform.SetLineColor(2)

    graph_linear_fit.SetLineWidth(3)
    graph_linear_fit.SetLineColorAlpha(1, 0.75)
    graph_linear_fit.SetMarkerColorAlpha(1, 0.0)
    
    graph_2nd_deg_fit.SetLineWidth(3)
    graph_2nd_deg_fit.SetLineColorAlpha(3, 0.75)
    graph_2nd_deg_fit.SetMarkerColorAlpha(1, 0.0)

    graph_cfd.SetLineStyle(7)
    graph_cfd.SetLineColor(8)
   
    graph_pulse_amplitude.SetLineColor(4)

    graph_peak_time.SetLineColor(8)
    graph_pedestal.SetLineColor(6)
    graph_noise.SetLineColor(7)
    graph_threshold.SetLineColor(1)
    graph_max_sample.SetLineColor(2)
    graph_10.SetLineColor(7)
    graph_90.SetLineColor(7)
    charge_fill.SetFillStyle(3013)
    charge_fill.SetFillColor(4)
    

    # Add the graphs to multigraph
    multi_graph.Add(graph_waveform)
    multi_graph.Add(graph_noise)
    multi_graph.Add(graph_threshold)
    multi_graph.Add(graph_2nd_deg_fit)
    multi_graph.Add(graph_linear_fit)
    multi_graph.Add(graph_pulse_amplitude)
    multi_graph.Add(graph_max_sample)
    multi_graph.Add(graph_10)
    multi_graph.Add(graph_90)
    multi_graph.Add(graph_cfd)
    multi_graph.Add(graph_peak_time)
    multi_graph.Add(graph_pedestal)
    multi_graph.Add(charge_fill, "f")

    
    # Add the information to a legend box
    legend.AddEntry(graph_waveform, "Waveform " + md.getSensor(), "l")
    legend.AddEntry(graph_noise, "Noise: "+str(noise*1000)[:4]+" mV", "l")
    legend.AddEntry(graph_pedestal, "Pedestal: "+str(pedestal*1000)[:4]+" mV", "l")
    legend.AddEntry(graph_threshold, "Threshold: "+str(threshold*1000)[:5]+" mV", "l")
    legend.AddEntry(graph_max_sample, "Max sample: "+str(max_sample*1000)[:5]+" mV", "l")
    legend.AddEntry(graph_waveform, "Points above threshold: "+str(point_count), "l")
    legend.AddEntry(graph_pulse_amplitude, "Pulse amplitude: "+str(peak_value[0]*1000)[:5]+" mV", "l")
    legend.AddEntry(graph_peak_time, "Time at peak: " + str(peak_time[0])[0:5] + " ns", "l")
    legend.AddEntry(graph_linear_fit, "Rise time: "+str(rise_time*1000)[:5]+" ps", "l")
    legend.AddEntry(graph_90, "10% and 90% limit", "l")
    legend.AddEntry(graph_cfd, "CFD 0.5: " + str(cfd)[0:5] + " ns", "l")
    legend.AddEntry(charge_fill, "Charge: "+str(charge*10**15)[:5]+" fC", "f")


    # Define the titles and draw the graph
    xAxisTitle = "Time [ns]"
    yAxisTitle = "Voltage [mV]"
    headTitle = "Waveform " + md.getSensor()
    multi_graph.Draw("ALP")
    legend.Draw()
    multi_graph.SetTitle(headTitle)
    multi_graph.GetXaxis().SetTitle(xAxisTitle)
    multi_graph.GetYaxis().SetTitle(yAxisTitle)

    # Set ranges on axes
    multi_graph.GetYaxis().SetRangeUser(-30,350)
    multi_graph.GetXaxis().SetRangeUser(cfd-5,cfd+5)


    # Export the PDF file
    fileName = dm.getPlotsSourceFolder()+"/waveforms/waveform"+"_"+str(md.getBatchNumber())+"_"+str(runNumber)+"_event_"+str(event)+"_"+str(sensor)+".pdf"

    canvas.Print(fileName)

    print "PDF produced at", fileName+"."
Example #8
0
def trackingPlots():
    
    global var_names
    
    startTime = dm.getTime()
    
    print "\nStart TRACKING analysis, batches:", md.batchNumbers, "\n"
    
    for batchNumber in md.batchNumbers:
        
        startTimeBatch = dm.getTime()
        runNumbers = md.getAllRunNumbers(batchNumber)
        
        # Omit batches with less than 3 runs
        if len(runNumbers) < 3:
            
            print "Batch", batchNumber, "omitted, < 3 runs.\n"
            continue
        
        print "BATCH", batchNumber, "\n"
    
        var_names = [["pulse", "pulse_amplitude"], ["pulse", "charge"], ["pulse", "rise_time"], ["timing", "normal_peak"], ["timing", "normal_cfd"]]
        
        numpy_arrays = [np.empty(0, dtype = dm.getDTYPE(batchNumber)) for _ in range(len(var_names))]
        numpy_arrays.append(np.empty(0, dtype = dm.getDTYPETracking()))
        
        max_sample = np.empty(0, dtype = dm.getDTYPE(batchNumber))
        
        for runNumber in runNumbers:
            
            md.defineRunInfo(md.getRowForRunNumber(runNumber))
            
            # Produce timing resolution files if they not exist
            if not dm.checkIfROOTDataFileExists("timing", "normal_peak"):
                t_calc.createTimingFiles(batchNumber)
            
            if not dm.checkIfFileAvailable("tracking"):
                continue
        
            tracking_run = dm.exportImportROOTData("tracking", "tracking")
            
            # This strips the event number to match the ones with the tracking. It assumes that the tracking have fewer number of events than the oscilloscope events.
            for index in range(0, len(var_names)):
                numpy_arrays[index] = np.concatenate((numpy_arrays[index], np.take(dm.exportImportROOTData(var_names[index][0], var_names[index][1]), np.arange(0, len(tracking_run)))), axis=0)
            
            max_sample = np.concatenate((max_sample, np.take(dm.exportImportROOTData("pulse", "max_sample"), np.arange(0, len(tracking_run)))), axis=0)
            
            
            # Concatenate tracking arrays
            numpy_arrays[-1] = np.concatenate((numpy_arrays[-1], tracking_run), axis=0)
    
    
        [pulse_amplitude, gain, rise_time, time_difference_peak, time_difference_cfd, tracking] = [i for i in numpy_arrays]
        
        # This checks if the position file exists, otherwise it will create it
        if not dm.checkIfROOTDataFileExists("tracking", "position"):
            t_calc.calculateCenterOfSensorPerBatch(pulse_amplitude, tracking)
    
        declareTCanvas()
        defineBinSizes()

        if md.getBatchNumber()/100 == 7:
            updateBinSize(1.5)

        t_calc.setArrayPadExportBool(False)

        createSinglePadGraphs(numpy_arrays, max_sample)
        
        createArrayPadGraphs(distance_x, distance_y)
        
        print "\nDone with batch", batchNumber, "Time analysing: "+str(md.dm.getTime()-startTimeBatch)+"\n"
                                
                                
    print "\nDone with TRACKING analysis. Time analysing: "+str(md.dm.getTime()-startTime)+"\n"