def get_data_objects(contains, is_remote_lt3_measurement, **kw):

    analysis_computer = kw.pop('analysis_computer', 'lt4')
    ssro_calib_contains = kw.pop('ssro_calib_contains', 'SSROCalib')

    if analysis_computer == 'lt4':

        folder = tb.latest_data(contains, **kw)
        a = ppq.purifyPQAnalysis(folder, hdf5_mode='r')

        if is_remote_lt3_measurement:
            lt3_folder = tb.latest_data(contains, folder='Z:\data')
            b = ppq.purifyPQAnalysis(lt3_folder, hdf5_mode='r')
            analysis_file = b
            filtering_file = a
            ssro_calib_folder = tb.latest_data(ssro_calib_contains,
                                               folder='Z:\data')
            if 'p' in a.g.attrs['electron_transition']:
                trans = 'msp1'
            else:
                trans = 'msm1'

        else:
            analysis_file = a
            filtering_file = a
            ssro_calib_folder = tb.latest_data(ssro_calib_contains)
            trans = None

    if analysis_computer == 'lt3_analysis':

        folder = tb.latest_data(contains, folder=r'X:\data', **kw)
        a = ppq.purifyPQAnalysis(folder, hdf5_mode='r')

        if is_remote_lt3_measurement:
            lt3_folder = tb.latest_data(contains, folder='Y:\data')
            b = ppq.purifyPQAnalysis(lt3_folder, hdf5_mode='r')
            analysis_file = b
            filtering_file = a
            ssro_calib_folder = tb.latest_data(ssro_calib_contains,
                                               folder='Y:\data')
            if 'p' in a.g.attrs['electron_transition']:
                trans = 'msp1'
            else:
                trans = 'msm1'

        else:
            analysis_file = a
            filtering_file = a
            ssro_calib_folder = tb.latest_data(ssro_calib_contains,
                                               folder=r'X:\data')
            trans = None

    return analysis_file, filtering_file, ssro_calib_folder, trans
Exemple #2
0
def get_coincidences_and_adwin_data_from_folder(folder_primary,use_invalid_data_marker = False, folder_secondary = None,syncs_per_CR_check = 1, index = 1,force_coincidence_evaluation = False,contains = '', save = True,Verbose=False):

    sync_num_name = 'PQ_sync_number-' + str(index)
    # print 'this is the save!', save
    filepaths_primary = tb.get_all_msmt_filepaths(folder_primary, pattern = contains)

    if folder_secondary != None:
        load_secondary = True
        filepaths_secondary = tb.get_all_msmt_filepaths(folder_secondary, pattern = contains) 
    else:
        load_secondary = False

    first_data = True 

    # print filepaths
    for i,(fp,fp2) in enumerate(zip(filepaths_primary,filepaths_secondary)):
        if Verbose:
            print fp
            print fp2
        if abs(int(os.path.split(fp)[1][:6]) - int(os.path.split(fp2)[1][:6])) > 100:
            print fp
            print fp2
            raise Exception('Time difference too great!')
           
        
        pqf = pq_tools.pqf_from_fp(fp)
        if load_secondary:
            fs = os.path.split(fp2)[0]
            purifyPQ_s = purify_pq.purifyPQAnalysis(fs)

        if sync_num_name in pqf.keys():

            coinc = pq_tools.get_coincidences(pqf,force_coincidence_evaluation =force_coincidence_evaluation,save = save)

            if coinc.size:

                new_vars = np.array([])

                if use_invalid_data_marker:
                    # Fixed number of LDE attempts per repetition, so straightforward to pull CR check value index
                    ind = (np.floor(coinc[:,3]/syncs_per_CR_check)).astype(int)

                    # print cr_check_ind
                    invalid_data_marker = (purifyPQ_s.agrp['invalid_data_markers'].value)[ind]
                    new_vars = np.hstack((new_vars, invalid_data_marker[:, np.newaxis])) if new_vars.size else invalid_data_marker[:, np.newaxis]

                if first_data:
            
                    co = np.hstack((coinc,new_vars)) if new_vars.size else coinc
                    first_data = False

                else:
                       
                    co = np.vstack((co, np.hstack((coinc,new_vars)))) if new_vars.size else np.vstack((co, coinc))

        pqf.close()
        if load_secondary:
            purifyPQ_s.finish()

    
    return co
Exemple #3
0
    def load_raw_data(self, lt3_timestamps, lt4_timestamps):
        """
        this script takes a list of timestamps for both setups and prefilters them according to adwin filters
        creates a list of arrays associated with each time stamp for adwin_ssro,syncs, time, special, channel, marker and attaches is it to the data object for further processing.
        """
        length = len(lt3_timestamps)
        print 'loading the data, total number of files ', length

        i = 0

        for t_lt3, t_lt4 in zip(lt3_timestamps, lt4_timestamps):

            # print 'tstamps', t_lt3,t_lt4
            # print tb.latest_data(t_lt4,folder = self.lt4_folder)

            a_lt3 = ppq.purifyPQAnalysis(tb.latest_data(
                t_lt3, folder=self.lt3_folder),
                                         hdf5_mode='r')
            a_lt4 = ppq.purifyPQAnalysis(tb.latest_data(
                t_lt4, folder=self.lt4_folder),
                                         hdf5_mode='r')
            # print a_lt3.agrp
            ### filter the timeharp data according to adwin events / syncs

            sync_filter = a_lt4.filter_pq_data_from_adwin_syncs(
            )  ### this syncfilter erases all data where from the PQ data where the adwin did NOT read out
            # print sync_filter
            if len(sync_filter) == 0:  # empty list --> no read outs.
                # print 'file empty, skipping these time stamps:',t_lt3,t_lt4
                # print
                continue

            ### store relevant adwin results
            self.lt3_dict['ssro_results'].append(
                np.array(a_lt3.agrp['ssro_results'].value))
            self.lt3_dict['counted_awg_reps'].append(
                np.array(a_lt3.agrp['counted_awg_reps'].value))
            self.lt3_dict['CR_after'].append(
                np.array(a_lt3.agrp['CR_after'].value))
            self.lt4_dict['ssro_results'].append(
                np.array(a_lt4.agrp['ssro_results'].value))
            self.lt4_dict['counted_awg_reps'].append(
                np.array(a_lt4.agrp['counted_awg_reps'].value))
            self.lt4_dict['CR_after'].append(
                np.array(a_lt4.agrp['CR_after'].value))
            self.lt3_dict['tstamp'].append(t_lt3)
            self.lt4_dict['tstamp'].append(t_lt4)
            self.lt3_dict['raw_data'].append(a_lt3)
            self.lt4_dict['raw_data'].append(a_lt4)

            for key in self.key_list_pq:
                self.lt4_dict[key].append(
                    np.array(a_lt4.pqf[key].value[sync_filter]))

            #### calculate the duty cycle for that specific file.
            # print 'lde length',a_lt3.joint_grp.attrs['LDE_element_length']
            # print'first and last time',a_lt4.pqf['/PQ_time-1'].value[0],a_lt4.pqf['/PQ_time-1'][-1]
            # print 'last elapsed time in sequence vs total elapsed time',
            # print self.lt4_dict['/PQ_sync_number-1'][-1][0]
            time_in_LDE_sequence = a_lt3.joint_grp.attrs[
                'LDE_element_length'] * a_lt4.pqf['/PQ_sync_number-1'][-1]
            total_elapsed_time = (a_lt4.pqf['/PQ_time-1'].value[-1] -
                                  a_lt4.pqf['/PQ_time-1'][0]) * 1e-12
            no_of_syncs = a_lt4.pqf['/PQ_sync_number-1'][-1]
            print i + 1, ' dc: ', round(
                100 * time_in_LDE_sequence / total_elapsed_time,
                1), ' %   syncs ', no_of_syncs
            i += 1
Exemple #4
0
def return_phase_stab(contains, start_rep_no=3,mode='only_meas',**kw):
    base_folder_lt4 = analysis_params.data_settings['base_folder_lt4']
    folder = os.path.join(base_folder_lt4,contains)
    filename_str = kw.pop('filename_str','XsweepY')
    measfiles=tb.latest_data(contains = filename_str,folder =folder,return_all=True)

    x0s = []
    sigmas = []
    for measfile in measfiles:
        a = ppq.purifyPQAnalysis(measfile, hdf5_mode='r')

        # general params
        delay_stable = np.float(a.g.attrs['count_int_time_stab'])
        delay_meas = np.float(a.g.attrs['count_int_time_meas'])
    

        if mode == 'only_meas':
        
            sample_counts_1 = a.g['adwindata']['sampling_counts_1'].value
            sample_counts_2 = a.g['adwindata']['sampling_counts_2'].value
            sample_counts_1 = sample_counts_1[start_rep_no:]
            sample_counts_2 = sample_counts_2[start_rep_no:]

        
            delay = delay_meas
          
            v_1 = sample_counts_1
            v_2 = sample_counts_2
            t = np.arange(0, (len(v_1)*delay/1000), (float(delay)/1000))



        elif mode == 'only_stab':

            pid_counts_1 = a.g['adwindata']['pid_counts_1'].value
            pid_counts_2 = a.g['adwindata']['pid_counts_2'].value
            pid_counts_1 = pid_counts_1[start_rep_no:]
            pid_counts_2 = pid_counts_2[start_rep_no:]

            delay = delay_stable
          
            v_1 = pid_counts_1
            v_2 = pid_counts_2
            t = np.arange(0, (len(v_1)*delay/1000), (float(delay)/1000))
            
        g_0 = a.g.attrs['Phase_Msmt_g_0']
        visibility = a.g.attrs['Phase_Msmt_Vis']
       
        cosvals = [2*(float(n0)/(float(n0)+float(n1)*g_0)-0.5)*visibility for n0,n1 in zip(v_1,v_2)]
        cosvals = [cosval if np.abs(cosval) < 1 else (1.0 * np.sign(cosval)) for cosval in cosvals]
        angle = 180*np.arccos(cosvals)/np.pi

        hist, bins = np.histogram(angle,bins= 100,normed = True)
        width = np.diff(bins)
        center = (bins[:-1] + bins[1:]) / 2
        

        g_a = 0.0
        g_x0 = 90
        g_sigma = 45
        g_A = 1/(np.sqrt(2 * np.pi) * g_sigma)

        p0, fitfunc,fitfunc_str = common.fit_gauss(g_a, g_A, g_x0, g_sigma)
        fit_result = fit.fit1d(center,hist, None, p0=p0, fitfunc=fitfunc,
                             ret=True,fixed=[])

        x0s.append(fit_result['params_dict']['x0'])
        sigmas.append(fit_result['params_dict']['sigma'])
    return x0s, sigmas
Exemple #5
0
    def __init__(self,folder_a,folder_b):

        self.a = ppq.purifyPQAnalysis(folder_a, hdf5_mode='r') 
        self.b = ppq.purifyPQAnalysis(folder_b, hdf5_mode='r')
Exemple #6
0
def analyze_phase(contains, mode, plot_zoomed = [], start_rep_no = 1,**kw):
    # Import
    lt3_analysis = kw.pop('lt3_analysis', False)

    if not(lt3_analysis):
        measfile= tb.latest_data(contains)
    else:
        base_folder_lt4 = analysis_params.data_settings['base_folder_lt4']
        folder = os.path.join(base_folder_lt4,contains)
        filename_str = kw.pop('filename_str','XsweepY')
        measfile=tb.latest_data(contains = filename_str,folder =folder,return_all=False)

    a = ppq.purifyPQAnalysis(measfile, hdf5_mode='r')

    # general params
    

    delay_stable = np.float(a.g.attrs['count_int_time_stab'])
    delay_meas = np.float(a.g.attrs['count_int_time_meas'])
    pid_cycles = a.g.attrs['pid_points_to_store']
    if a.g.attrs['do_post_ent_phase_msmt'] and mode == 'do_only_meas':
        sample_cycles = 1
        max_repetitions = a.g['adwindata']['completed_reps'].value
    else:
        sample_cycles = a.g.attrs['sample_points']
        max_repetitions = a.g['adwindata']['store_index_stab'].value/a.g.attrs['pid_points_to_store']

    

    g_0 = a.g.attrs['Phase_Msmt_g_0']
    visibility = a.g.attrs['Phase_Msmt_Vis']

    
   
    if mode == 'only_meas':
    
        sample_counts_1 = a.g['adwindata']['sampling_counts_1'].value
        sample_counts_2 = a.g['adwindata']['sampling_counts_2'].value
        sample_counts_1 = sample_counts_1[(start_rep_no - 1)*sample_cycles:]
        sample_counts_2 = sample_counts_2[(start_rep_no - 1)*sample_cycles:]

    
        delay = delay_meas
        total_cycles = sample_cycles

        v_1 = sample_counts_1
        v_2 = sample_counts_2
        t = np.arange(0, (len(v_1)*delay/1000), (float(delay)/1000))



    elif mode == 'only_stab':

        pid_counts_1 = a.g['adwindata']['pid_counts_1'].value
        pid_counts_2 = a.g['adwindata']['pid_counts_2'].value
        pid_counts_1 = pid_counts_1[(start_rep_no - 1)*pid_cycles:]
        pid_counts_2 = pid_counts_2[(start_rep_no - 1)*pid_cycles:]

        delay = delay_stable
        total_cycles = pid_cycles

        v_1 = pid_counts_1
        v_2 = pid_counts_2
        t = np.arange(0, (len(v_1)*delay/1000), (float(delay)/1000))



    else:
        pid_counts_1 = a.g['adwindata']['pid_counts_1'].value
        pid_counts_2 = a.g['adwindata']['pid_counts_2'].value
        sample_counts_1 = a.g['adwindata']['sampling_counts_1'].value
        sample_counts_2 = a.g['adwindata']['sampling_counts_2'].value
        sample_counts_1 = sample_counts_1[(start_rep_no - 1)*sample_cycles:]
        sample_counts_2 = sample_counts_2[(start_rep_no - 1)*sample_cycles:]
        pid_counts_1 = pid_counts_1[(start_rep_no - 1)*pid_cycles:]
        pid_counts_2 = pid_counts_2[(start_rep_no - 1)*pid_cycles:]

        v_1 = []
        v_2 = []
        t = []
        angle = []

        for i in xrange(len(pid_counts_1)/pid_cycles):
            
            v_1.extend(pid_counts_1[i*pid_cycles:((i+1)*pid_cycles)])
            v_2.extend(pid_counts_2[i*pid_cycles:((i+1)*pid_cycles)])
            if len(t) == 0:
                t.extend(delay_stable * (1 + np.arange(pid_cycles)))
            else:
                t.extend(t[-1] + delay_stable * (1 + np.arange(pid_cycles)))

            v_1.extend(sample_counts_1[i*sample_cycles:((i+1)*sample_cycles)])
            v_2.extend(sample_counts_2[i*sample_cycles:((i+1)*sample_cycles)])
            if len(t) == 0:
                t.extend(delay_meas * (1 + np.arange(sample_cycles)))
            else:
                t.extend(t[-1] + delay_meas * (1 + np.arange(sample_cycles)))
        t = np.array(t)
        total_cycles = pid_cycles + sample_cycles

    cosvals = [2*(float(n0)/(float(n0)+float(n1)*g_0)-0.5)*visibility for n0,n1 in zip(v_1,v_2)]
    cosvals = [cosval if np.abs(cosval) < 1 else (1.0 * np.sign(cosval)) for cosval in cosvals]
    angle = 180*np.arccos(cosvals)/np.pi

   


    # counts
    fig = plt.figure(figsize=(17,6))
    ax = plt.subplot(211)
    plt.plot(t, v_1, 'b')
    plt.title('Counts ZPL detector 1 {0}'.format(a.folder))
    ax.set_xlabel('elapsed time (ms)')
    ax.set_ylabel('counts')
    ax2 = plt.subplot(212)
    plt.plot(t, v_2, 'b')
    plt.title('Counts ZPL detector 2 {0}'.format(a.folder))
    ax2.set_xlabel('elapsed time (ms)')
    ax2.set_ylabel('counts')
    plt.tight_layout()  
    fig.savefig(os.path.join(a.folder, 'trace_counts.png'))

    if len(plot_zoomed):
        
        fig = plt.figure(figsize=(17,6))
        ax = plt.subplot(111)
        plt.plot(t[total_cycles*plot_zoomed[0]:total_cycles*plot_zoomed[1]], angle[total_cycles*plot_zoomed[0]:total_cycles*plot_zoomed[1]])
        plt.title('Zoomed trace {0}'.format(a.folder))
        ax.set_xlabel('elapsed time (milliseconds)')
        ax.set_ylabel('Phase')
        plt.tight_layout()
        fig.savefig(os.path.join(a.folder, 'trace_zoomed.png'))

    # phase
    fig = plt.figure(figsize=(17,6))
    ax = plt.subplot(111)
    plt.plot(t, angle, 'r')
    plt.title('Phase of ZPL photons {0}'.format(a.folder))
    plt.ylim([0,180])
    ax.set_xlabel('elapsed time (ms)')
    ax.set_ylabel('angle ($^o$)')
    fig.savefig(os.path.join(a.folder, 'trace_angle.png'))


    # fft

    if mode == 'only_meas' or mode == 'only_stab':

        yf = np.abs(scipy.fftpack.fft(angle))
        xf = np.linspace(0, 1.0/(2*delay*1e-6), len(angle)/2)

        fig, ax = plt.subplots()
        ymax = 1.2*np.max(yf[15:-15])
        plt.ylim([0,ymax])
        ax.plot(xf[:len(yf)], yf[:len(yf)/2])
        xlim = plt.xlim()
        if (xlim[1]>1000):
            plt.xlim(0,1000)
        plt.title('FFT {0}'.format(a.folder))
        ax.set_xlabel('frequency (Hz)')
        ax.set_ylabel('Amplitude (a.u.)')
        fig.savefig(os.path.join(a.folder, 'fft.png'))

    # histogram
    fig = plt.figure()
    ax = plt.subplot(111)
    hist, bins = np.histogram(angle,bins= 100,normed = True)
    width = np.diff(bins)
    center = (bins[:-1] + bins[1:]) / 2
    ax.bar(center, hist, align='center', width=width)
    g_a = 0.0
    g_x0 = 90
    g_sigma = 45
    g_A = 1/(np.sqrt(2 * np.pi) * g_sigma)

    p0, fitfunc,fitfunc_str = common.fit_gauss(g_a, g_A, g_x0, g_sigma)
    fit_result = fit.fit1d(center,hist, None, p0=p0, fitfunc=fitfunc,
                         ret=True,fixed=[])
    plot.plot_fit1d(fit_result, np.linspace(center[0],center[-1],201), ax=ax, 
                        plot_data=False,print_info = True)

    ax.set_xlabel('Phase')
    fig.savefig(os.path.join(a.folder, 'histogram.png'))
    if len(fit_result['params_dict']):
        print 'x0, sigma ', fit_result['params_dict']['x0'] , fit_result['params_dict']['sigma'] 

    # standard dev
    fig = plt.figure()
    ax = plt.subplot(111)
    angle_reshape = (np.reshape(angle,[total_cycles,-1]))
    binsize = 1.0
    var_array = np.zeros([int(np.floor(total_cycles/binsize)),1])
    for x in range(int(np.floor(total_cycles/binsize))):
        var_array[x] = np.sqrt(np.var(angle_reshape[binsize*x:binsize*(x+1),:]))


    plt.plot(binsize*t[0:(int(np.floor(total_cycles/binsize)))], var_array)
    plt.title('Standard deviation of Phase {0}'.format(a.folder))
    ax.set_xlabel('time (ms)')
    ax.set_ylabel('std dev ($^o$)')