コード例 #1
0
def extract_and_pickle(nc_filename):
    basedir = os.path.split(nc_filename)[0] # should make this more robust, currently assumes all nc files are in top
    #  level of /data/<machine>/*.nc
    try:
        print("Processing {}".format(nc_filename))
        snms = []
        rnc = ReadoutNetCDF(nc_filename)
        if len(rnc.sweeps) != len(rnc.timestreams):
            raise ValueError("The number of sweeps does not match the number of timestreams in {}".format(nc_filename))
        for index, (sweep, timestream) in enumerate(zip(rnc.sweeps, rnc.timestreams)):
            for resonator_index in set(sweep.index):
                snm = SweepNoiseMeasurement(nc_filename, sweep_group_index=index, timestream_group_index=index,
                                            resonator_index=resonator_index)
                try:
                    snm.zbd_voltage = timestream.zbd_voltage[0]
                except AttributeError:
                    pass
                snms.append(snm)
        rnc.close()
        # We decided to keep the .pkl files in /home/data regardless of origin.
        pkl_filename = os.path.join(basedir,'pkl', os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl')
        save_noise_pkl(pkl_filename, snms)
        print("Saved {}".format(pkl_filename))
    except KeyboardInterrupt:
        pass
コード例 #2
0
def extract_and_pickle(nc_filename):
    """
    Ignore the coarse sweeps and create two SweepNoiseMeasurements that both use the fine sweep.
    """
    print("Processing {}".format(nc_filename))
    snms = []
    rnc = readoutnc.ReadoutNetCDF(nc_filename)
    if len(rnc.sweeps) != len(rnc.timestreams):
        raise ValueError("The number of sweeps does not match the number of timestreams in {}".format(nc_filename))
    for fine_index in range(1, len(rnc.sweeps), 2):
        fine_sweep = rnc.sweeps[fine_index]
        off_index = fine_index - 1
        on_index = fine_index
        for resonator_index in set(fine_sweep.index):
            off_snm = SweepNoiseMeasurement(nc_filename, sweep_group_index=fine_index,
                                            timestream_group_index=off_index, resonator_index=resonator_index)
            snms.append(off_snm)
            on_snm = SweepNoiseMeasurement(nc_filename, sweep_group_index=fine_index,
                                            timestream_group_index=on_index, resonator_index=resonator_index)
            snms.append(on_snm)
    rnc.close()
    # We decided to keep the .pkl files in /home/data regardless of origin.
    pkl_filename = os.path.join('/home/data/pkl', os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl')
    save_noise_pkl(pkl_filename, snms)
    print("Saved {}".format(pkl_filename))
コード例 #3
0
def extract_and_pickle(nc_filename):
    basedir = os.path.split(
        nc_filename
    )[0]  # should make this more robust, currently assumes all nc files are in top
    #  level of /data/<machine>/*.nc
    try:
        print("Processing {}".format(nc_filename))
        snms = []
        rnc = ReadoutNetCDF(nc_filename)
        if len(rnc.sweeps) != len(rnc.timestreams):
            raise ValueError(
                "The number of sweeps does not match the number of timestreams in {}"
                .format(nc_filename))
        for index, (sweep,
                    timestream) in enumerate(zip(rnc.sweeps, rnc.timestreams)):
            for resonator_index in set(sweep.index):
                snm = SweepNoiseMeasurement(nc_filename,
                                            sweep_group_index=index,
                                            timestream_group_index=index,
                                            resonator_index=resonator_index)
                try:
                    snm.zbd_voltage = timestream.zbd_voltage[0]
                except AttributeError:
                    pass
                snms.append(snm)
        rnc.close()
        # We decided to keep the .pkl files in /home/data regardless of origin.
        pkl_filename = os.path.join(
            basedir, 'pkl',
            os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl')
        save_noise_pkl(pkl_filename, snms)
        print("Saved {}".format(pkl_filename))
    except KeyboardInterrupt:
        pass
コード例 #4
0
def extract_and_pickle(nc_filename):
    basedir = os.path.split(nc_filename)[0] # should make this more robust, currently assumes all nc files are in top
    #  level of /data/<machine>/*.nc
    machine = os.path.split(basedir)[1]
    cryostat = cryostats[machine]
    try:
        print("Processing {}".format(nc_filename))
        snms = []
        rnc = ReadoutNetCDF(nc_filename)
        for timestream_index,timestream in enumerate(rnc.timestreams):
            if timestream.epoch.shape[0] == 0:
                print "no timestreams in", nc_filename
                return
            start_epoch = timestream.epoch.min()
            sweep_index = find_closest_sweep(timestream,rnc.sweeps)
            sweep = rnc.sweeps[sweep_index]
            sweep_epoch = sweep.end_epoch
            resonator_indexes = np.array(list(set(sweep.index)))
            resonator_indexes.sort()
            print "%s: timestream[%d] at %s, associated sweep[%d] at %s, %d resonators" % (nc_filename,timestream_index,
                                                                                       time.ctime(start_epoch),
                                                                                       sweep_index,
                                                                                       time.ctime(sweep_epoch),
                                                                                       len(resonator_indexes))
            for resonator_index in resonator_indexes:
                snm = SweepNoiseMeasurement(rnc, sweep_group_index=sweep_index,
                                            timestream_group_index=timestream_index,
                                            resonator_index=resonator_index, cryostat=cryostat)
                if nc_filename in atten_map:
                    atten = atten_map[nc_filename][timestream_index]
                    ntone_correction = ntone_power_correction(16)
                    print "overriding attenuation",atten
                    snm.atten = atten
                    snm.total_dac_atten = atten +ntone_correction
                    snm.power_dbm = snm.dac_chain_gain - snm.total_dac_atten

                try:
                    snm.zbd_voltage = timestream.zbd_voltage[0]
                except AttributeError:
                    pass
                pkld = cPickle.dumps(snm,cPickle.HIGHEST_PROTOCOL)
                del snm
                snm = cPickle.loads(pkld)
                snms.append(snm)
        rnc.close()
        pkl_filename = os.path.join(basedir,'pkl', os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl')
        save_noise_pkl(pkl_filename, snms)
        print("Saved {}".format(pkl_filename))
    except KeyboardInterrupt:
        pass
    except Exception as e:
        print "failed on",nc_filename,e
コード例 #5
0
def process_file(filename):
    print filename
    try:
        rnc = kid_readout.measurement.io.readoutnc.ReadoutNetCDF(filename)
        num_timestreams = len(rnc.timestreams)
        num_sweeps = len(rnc.sweeps)
        if num_timestreams == num_sweeps:
            has_source_off_timestream = False
            num_power_steps = num_timestreams - 1  # last time stream is modulated measurement
        elif num_timestreams == num_sweeps + 1:
            has_source_off_timestream = True
            num_power_steps = num_timestreams - 2
        else:
            raise Exception(
                "Found unexpected number of timestreams %d and number of sweeps %d for file %s"
                % (num_timestreams, num_sweeps, filename)
            )
        resonator_ids = np.unique(rnc.sweeps[0].index)
        noise_on_measurements = []
        noise_modulated_measurements = []
        noise_off_sweep_params = []
        for resonator_id in resonator_ids:
            power_steps_mmw_on = []
            for idx in range(num_power_steps):
                noise_on_measurement = SweepNoiseMeasurement(
                    sweep_filename=filename,
                    sweep_group_index=idx,
                    timestream_group_index=idx,
                    resonator_index=resonator_id,
                )
                power_steps_mmw_on.append(noise_on_measurement)
                noise_on_measurement._close_files()
            noise_modulated_measurement = SweepNoiseMeasurement(
                sweep_filename=filename,
                sweep_group_index=0,
                timestream_group_index=num_power_steps,
                resonator_index=resonator_id,
                deglitch_threshold=None,
            )
            if has_source_off_timestream:
                noise_off_measurement = SweepNoiseMeasurement(
                    sweep_filename=filename,
                    sweep_group_index=num_sweeps - 1,
                    timestream_group_index=num_timestreams - 1,
                    resonator_index=resonator_id,
                )

            # all the zbd_voltages are the same, so we can grab any of them
            noise_modulated_measurement.zbd_voltage = rnc.timestreams[num_power_steps].zbd_voltage[0]
            for noise_on_measurement in power_steps_mmw_on:
                noise_on_measurement.zbd_voltage = rnc.timestreams[num_power_steps].zbd_voltage[0]
            if noise_modulated_measurement.timestream_modulation_period_samples != 0:
                noise_modulated_measurement.folded_projected_timeseries = noise_modulated_measurement.projected_timeseries.reshape(
                    (-1, noise_modulated_measurement.timestream_modulation_period_samples)
                )
                folded = noise_modulated_measurement.folded_projected_timeseries.mean(0)
                high, low, rising_edge = kid_readout.analysis.fit_pulses.find_high_low(folded)
                noise_modulated_measurement.folded_projected_timeseries = np.roll(
                    noise_modulated_measurement.folded_projected_timeseries, -rising_edge, axis=1
                )
                noise_modulated_measurement.folded_normalized_timeseries = np.roll(
                    noise_modulated_measurement.normalized_timeseries.reshape(
                        (-1, noise_modulated_measurement.timestream_modulation_period_samples)
                    ),
                    -rising_edge,
                    axis=1,
                )
            else:
                noise_modulated_measurement.folded_projected_timeseries = None

            if not has_source_off_timestream:
                fr, s21, err = rnc.sweeps[-1].select_by_index(resonator_id)
                noise_off_sweep = kid_readout.analysis.resonator.fit_best_resonator(fr, s21, errors=err)
                noise_off_sweep_params.append(noise_off_sweep.result.params)
            else:
                noise_off_measurement.zbd_voltage = rnc.timestreams[num_power_steps].zbd_voltage[0]
                noise_off_sweep_params.append(noise_off_measurement)

            noise_on_measurements.extend(power_steps_mmw_on)
            noise_modulated_measurements.append(noise_modulated_measurement)
            noise_modulated_measurement._close_files()
        rnc.close()
        data = dict(
            noise_on_measurements=noise_on_measurements,
            noise_modulated_measurements=noise_modulated_measurements,
            noise_off_sweeps=noise_off_sweep_params,
        )
        blah, fbase = os.path.split(filename)
        fbase, ext = os.path.splitext(fbase)
        pklname = os.path.join("/home/data/pkl", fbase + ".pkl")
        save_noise_pkl(pklname, data)
        return data
    except KeyboardInterrupt:
        return None
コード例 #6
0
def extract_and_pickle(nc_filename, deglitch_threshold=5):
    basedir = os.path.split(nc_filename)[0] # should make this more robust, currently assumes all nc files are in top
    #  level of /data/<machine>/*.nc
    machine = os.path.split(basedir)[1]
    cryostat = cryostats[machine]
    print cryostat
    try:
        print("Processing {}".format(nc_filename))
        snms = []
        rnc = ReadoutNetCDF(nc_filename)
        for timestream_index,timestream in enumerate(rnc.timestreams):
            if timestream.epoch.shape[0] == 0:
                print "no timestreams in", nc_filename
                return
            start_epoch = timestream.epoch.min()
            sweep_index = find_closest_sweep(timestream,rnc.sweeps)
            sweep = rnc.sweeps[sweep_index]
            sweep_epoch = sweep.end_epoch
            modulation_state,modulation_frequency = rnc.get_modulation_state_at(start_epoch)
            try:
                manual_modulation_frequency = timestream.mmw_source_modulation_frequency[0]
            except AttributeError:
                manual_modulation_frequency = 0
            if modulation_state == 2 or manual_modulation_frequency > 0:
                this_deglitch_threshold = None
            else:
                this_deglitch_threshold = deglitch_threshold
            resonator_indexes = np.array(list(set(sweep.index)))
            resonator_indexes.sort()
            print "%s: timestream[%d] at %s, associated sweep[%d] at %s, %d resonators" % (nc_filename,timestream_index,
                                                                                       time.ctime(start_epoch),
                                                                                       sweep_index,
                                                                                       time.ctime(sweep_epoch),
                                                                                       len(resonator_indexes))
            if this_deglitch_threshold is None:
                print "Found modulation, not deglitching"
            for resonator_index in resonator_indexes:
                tic = time.time()
                snm = SweepNoiseMeasurement(rnc, sweep_group_index=sweep_index,
                                            timestream_group_index=timestream_index,
                                            resonator_index=resonator_index, cryostat=cryostat,
                                            deglitch_threshold=this_deglitch_threshold,
                                            )
                print "created snm for",rnc.filename,timestream_index,resonator_index,deglitch_threshold,"in",\
                    (time.time()-tic)
                try:
                    snm.zbd_voltage = timestream.zbd_voltage[0]
                except AttributeError:
                    pass
                tic = time.time()
                if snm.timestream_modulation_period_samples != 0:
                    snm.folded_projected_timeseries = snm.projected_timeseries.reshape((-1, snm.timestream_modulation_period_samples))
                    folded = snm.folded_projected_timeseries.mean(0)
                    high, low, rising_edge = kid_readout.analysis.fit_pulses.find_high_low(folded)
                    snm.folded_projected_timeseries = np.roll(snm.folded_projected_timeseries,-rising_edge,
                                                              axis=1).mean(0)
                    snm.folded_normalized_timeseries = np.roll(
                        snm.normalized_timeseries.reshape((-1,snm.timestream_modulation_period_samples)),
                        -rising_edge, axis=1).mean(0)
                    print "folded time series in ",(time.time()-tic)

                pkld = cPickle.dumps(snm,cPickle.HIGHEST_PROTOCOL)
                del snm
                snm = cPickle.loads(pkld)
                snms.append(snm)
        rnc.close()
        pkl_filename = os.path.join(basedir,'pkl', os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl')
        save_noise_pkl(pkl_filename, snms)
        print("Saved {}".format(pkl_filename))
    except KeyboardInterrupt:
        pass
    except Exception as e:
        print "failed on",nc_filename,e
コード例 #7
0
def process_file(filename):
    print filename
    try:
        rnc = kid_readout.measurement.io.readoutnc.ReadoutNetCDF(filename)
        num_timestreams = len(rnc.timestreams)
        num_sweeps = len(rnc.sweeps)
        if num_timestreams == num_sweeps:
            has_source_off_timestream = False
            num_power_steps = num_timestreams - 1  # last time stream is modulated measurement
        elif num_timestreams == num_sweeps + 1:
            has_source_off_timestream = True
            num_power_steps = num_timestreams - 2
        else:
            raise Exception(
                "Found unexpected number of timestreams %d and number of sweeps %d for file %s"
                % (num_timestreams, num_sweeps, filename))
        resonator_ids = np.unique(rnc.sweeps[0].index)
        noise_on_measurements = []
        noise_modulated_measurements = []
        noise_off_sweep_params = []
        for resonator_id in resonator_ids:
            power_steps_mmw_on = []
            for idx in range(num_power_steps):
                noise_on_measurement = SweepNoiseMeasurement(
                    sweep_filename=filename,
                    sweep_group_index=idx,
                    timestream_group_index=idx,
                    resonator_index=resonator_id,
                )
                power_steps_mmw_on.append(noise_on_measurement)
                noise_on_measurement._close_files()
            noise_modulated_measurement = SweepNoiseMeasurement(
                sweep_filename=filename,
                sweep_group_index=0,
                timestream_group_index=num_power_steps,
                resonator_index=resonator_id,
                deglitch_threshold=None,
            )
            if has_source_off_timestream:
                noise_off_measurement = SweepNoiseMeasurement(
                    sweep_filename=filename,
                    sweep_group_index=num_sweeps - 1,
                    timestream_group_index=num_timestreams - 1,
                    resonator_index=resonator_id)

            #all the zbd_voltages are the same, so we can grab any of them
            noise_modulated_measurement.zbd_voltage = rnc.timestreams[
                num_power_steps].zbd_voltage[0]
            for noise_on_measurement in power_steps_mmw_on:
                noise_on_measurement.zbd_voltage = rnc.timestreams[
                    num_power_steps].zbd_voltage[0]
            if noise_modulated_measurement.timestream_modulation_period_samples != 0:
                noise_modulated_measurement.folded_projected_timeseries = noise_modulated_measurement.projected_timeseries.reshape(
                    (-1, noise_modulated_measurement.
                     timestream_modulation_period_samples))
                folded = noise_modulated_measurement.folded_projected_timeseries.mean(
                    0)
                high, low, rising_edge = kid_readout.analysis.fit_pulses.find_high_low(
                    folded)
                noise_modulated_measurement.folded_projected_timeseries = np.roll(
                    noise_modulated_measurement.folded_projected_timeseries,
                    -rising_edge,
                    axis=1)
                noise_modulated_measurement.folded_normalized_timeseries = np.roll(
                    noise_modulated_measurement.normalized_timeseries.reshape(
                        (-1, noise_modulated_measurement.
                         timestream_modulation_period_samples)),
                    -rising_edge,
                    axis=1)
            else:
                noise_modulated_measurement.folded_projected_timeseries = None

            if not has_source_off_timestream:
                fr, s21, err = rnc.sweeps[-1].select_by_index(resonator_id)
                noise_off_sweep = kid_readout.analysis.resonator.fit_best_resonator(
                    fr, s21, errors=err)
                noise_off_sweep_params.append(noise_off_sweep.result.params)
            else:
                noise_off_measurement.zbd_voltage = rnc.timestreams[
                    num_power_steps].zbd_voltage[0]
                noise_off_sweep_params.append(noise_off_measurement)

            noise_on_measurements.extend(power_steps_mmw_on)
            noise_modulated_measurements.append(noise_modulated_measurement)
            noise_modulated_measurement._close_files()
        rnc.close()
        data = dict(noise_on_measurements=noise_on_measurements,
                    noise_modulated_measurements=noise_modulated_measurements,
                    noise_off_sweeps=noise_off_sweep_params)
        blah, fbase = os.path.split(filename)
        fbase, ext = os.path.splitext(fbase)
        pklname = os.path.join('/home/data/pkl', fbase + '.pkl')
        save_noise_pkl(pklname, data)
        return data
    except KeyboardInterrupt:
        return None
def extract_and_pickle(nc_filename):
    """
    The format is that the file contains equal numbers of sweeps and timestreams. The first sweep is used to locate
    the resonances and is taken with the source off at the lowest power level, i.e. the maximum attenuation. The
    first timestream is taken under the same conditions except that the source is modulated. Subsequent sweeps and
    timestreams are paired.

    :param nc_filename: the file name of the netCDF4 file with the above format.
    :return: a dictionary
    """
    try:
        all_noise_on = []
        all_noise_off = []
        all_noise_modulated = []
        all_coarse_sweep_params = []
        coarse_sweep_index = 0
        modulated_timestream_index = 0

        print("Processing {}".format(nc_filename))
        rnc = ReadoutNetCDF(nc_filename)
        resonator_indices = sorted(set(rnc.sweeps[0].index))
        n_attenuations = len(rnc.sweeps) - 1

        for resonator_index in resonator_indices:
            noise_on = []
            for on_index in range(1, n_attenuations, 2):
                noise_on.append(SweepNoiseMeasurement(nc_filename, resonator_index=resonator_index,
                                                      sweep_group_index=on_index, timestream_group_index=on_index))
            all_noise_on.extend(noise_on)

            noise_off = []
            for off_index in range(2, n_attenuations + 1, 2):
                noise_off.append(SweepNoiseMeasurement(nc_filename, resonator_index=resonator_index,
                                                       sweep_group_index=off_index, timestream_group_index=off_index))
            all_noise_off.extend(noise_off)

            # Create the modulated measurement from the modulated timestream and the noise off sweep at the same power.
            # Skip deglitching.
            attenuations = [snm.atten for snm in noise_off]
            off_max_attenuation_index = 1 + 2 * attenuations.index(max(attenuations)) + 1
            noise_modulated = SweepNoiseMeasurement(nc_filename, resonator_index=resonator_index,
                                                    sweep_group_index=off_max_attenuation_index,
                                                    timestream_group_index=modulated_timestream_index,
                                                    deglitch_threshold=None)
            noise_modulated.folded_projected_timeseries = noise_modulated.projected_timeseries.reshape(
                (-1, noise_modulated.timestream_modulation_period_samples))
            folded = noise_modulated.folded_projected_timeseries.mean(0)
            high, low, rising_edge = find_high_low(folded)
            noise_modulated.folded_projected_timeseries = np.roll(noise_modulated.folded_projected_timeseries,
                                                                  -rising_edge, axis=1)
            noise_modulated.folded_normalized_timeseries = np.roll(
                noise_modulated.normalized_timeseries.reshape((-1,
                                                               noise_modulated.timestream_modulation_period_samples)),
                -rising_edge, axis=1)
            all_noise_modulated.append(noise_modulated)

            # Add the ZBD voltage from the modulated timestream to the modulated and static on measurements:
            zbd_voltage = rnc.timestreams[modulated_timestream_index].zbd_voltage[0]
            noise_modulated.zbd_voltage = zbd_voltage
            for snm in noise_on:
                snm.zbd_voltage = zbd_voltage

            # Save only the Parameters object from a fit to the coarse sweep.
            freq, s21, err = rnc.sweeps[coarse_sweep_index].select_by_index(resonator_index)
            coarse_resonator = fit_best_resonator(freq, s21, errors=err)
            all_coarse_sweep_params.append(coarse_resonator.result.params)

        rnc.close()
        data = {'noise_on_measurements': all_noise_on,
                'noise_off_measurements': all_noise_off,
                'noise_modulated_measurements': all_noise_modulated,
                'coarse_sweep_params': all_coarse_sweep_params}
        # We decided to keep the .pkl files in /home/data regardless of origin.
        pkl_filename = os.path.join('/home/data/pkl', os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl')
        save_noise_pkl(pkl_filename, data)
        print("Saved {}".format(pkl_filename))
    except KeyboardInterrupt:
        print("Aborting {}".format(nc_filename))
コード例 #9
0
def extract_and_pickle(nc_filename, deglitch_threshold=5):
    basedir = os.path.split(
        nc_filename
    )[0]  # should make this more robust, currently assumes all nc files are in top
    #  level of /data/<machine>/*.nc
    machine = os.path.split(basedir)[1]
    cryostat = cryostats[machine]
    print cryostat
    try:
        print("Processing {}".format(nc_filename))
        snms = []
        rnc = ReadoutNetCDF(nc_filename)
        for timestream_index, timestream in enumerate(rnc.timestreams):
            if timestream.epoch.shape[0] == 0:
                print "no timestreams in", nc_filename
                return
            start_epoch = timestream.epoch.min()
            sweep_index = find_closest_sweep(timestream, rnc.sweeps)
            sweep = rnc.sweeps[sweep_index]
            sweep_epoch = sweep.end_epoch
            modulation_state, modulation_frequency = rnc.get_modulation_state_at(
                start_epoch)
            try:
                manual_modulation_frequency = timestream.mmw_source_modulation_frequency[
                    0]
            except AttributeError:
                manual_modulation_frequency = 0
            if modulation_state == 2 or manual_modulation_frequency > 0:
                this_deglitch_threshold = None
            else:
                this_deglitch_threshold = deglitch_threshold
            resonator_indexes = np.array(list(set(sweep.index)))
            resonator_indexes.sort()
            print "%s: timestream[%d] at %s, associated sweep[%d] at %s, %d resonators" % (
                nc_filename, timestream_index, time.ctime(start_epoch),
                sweep_index, time.ctime(sweep_epoch), len(resonator_indexes))
            if this_deglitch_threshold is None:
                print "Found modulation, not deglitching"
            for resonator_index in resonator_indexes:
                tic = time.time()
                snm = SweepNoiseMeasurement(
                    rnc,
                    sweep_group_index=sweep_index,
                    timestream_group_index=timestream_index,
                    resonator_index=resonator_index,
                    cryostat=cryostat,
                    deglitch_threshold=this_deglitch_threshold,
                )
                print "created snm for",rnc.filename,timestream_index,resonator_index,deglitch_threshold,"in",\
                    (time.time()-tic)
                try:
                    snm.zbd_voltage = timestream.zbd_voltage[0]
                except AttributeError:
                    pass
                tic = time.time()
                if snm.timestream_modulation_period_samples != 0:
                    snm.folded_projected_timeseries = snm.projected_timeseries.reshape(
                        (-1, snm.timestream_modulation_period_samples))
                    folded = snm.folded_projected_timeseries.mean(0)
                    high, low, rising_edge = kid_readout.analysis.fit_pulses.find_high_low(
                        folded)
                    snm.folded_projected_timeseries = np.roll(
                        snm.folded_projected_timeseries, -rising_edge,
                        axis=1).mean(0)
                    snm.folded_normalized_timeseries = np.roll(
                        snm.normalized_timeseries.reshape(
                            (-1, snm.timestream_modulation_period_samples)),
                        -rising_edge,
                        axis=1).mean(0)
                    print "folded time series in ", (time.time() - tic)

                pkld = cPickle.dumps(snm, cPickle.HIGHEST_PROTOCOL)
                del snm
                snm = cPickle.loads(pkld)
                snms.append(snm)
        rnc.close()
        pkl_filename = os.path.join(
            basedir, 'pkl',
            os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl')
        save_noise_pkl(pkl_filename, snms)
        print("Saved {}".format(pkl_filename))
    except KeyboardInterrupt:
        pass
    except Exception as e:
        print "failed on", nc_filename, e
コード例 #10
0
def extract_and_pickle(nc_filename):
    """
    The format is that the file contains equal numbers of sweeps and timestreams. The first sweep is used to locate
    the resonances and is taken with the source off at the lowest power level, i.e. the maximum attenuation. The
    first timestream is taken under the same conditions except that the source is modulated. Subsequent sweeps and
    timestreams are paired.

    :param nc_filename: the file name of the netCDF4 file with the above format.
    :return: a dictionary
    """
    try:
        all_noise_on = []
        all_noise_off = []
        all_noise_modulated = []
        all_coarse_sweep_params = []
        coarse_sweep_index = 0
        modulated_timestream_index = 0

        print("Processing {}".format(nc_filename))
        rnc = ReadoutNetCDF(nc_filename)
        resonator_indices = sorted(set(rnc.sweeps[0].index))
        n_attenuations = len(rnc.sweeps) - 1

        for resonator_index in resonator_indices:
            noise_on = []
            for on_index in range(1, n_attenuations, 2):
                noise_on.append(
                    SweepNoiseMeasurement(nc_filename,
                                          resonator_index=resonator_index,
                                          sweep_group_index=on_index,
                                          timestream_group_index=on_index))
            all_noise_on.extend(noise_on)

            noise_off = []
            for off_index in range(2, n_attenuations + 1, 2):
                noise_off.append(
                    SweepNoiseMeasurement(nc_filename,
                                          resonator_index=resonator_index,
                                          sweep_group_index=off_index,
                                          timestream_group_index=off_index))
            all_noise_off.extend(noise_off)

            # Create the modulated measurement from the modulated timestream and the noise off sweep at the same power.
            # Skip deglitching.
            attenuations = [snm.atten for snm in noise_off]
            off_max_attenuation_index = 1 + 2 * attenuations.index(
                max(attenuations)) + 1
            noise_modulated = SweepNoiseMeasurement(
                nc_filename,
                resonator_index=resonator_index,
                sweep_group_index=off_max_attenuation_index,
                timestream_group_index=modulated_timestream_index,
                deglitch_threshold=None)
            noise_modulated.folded_projected_timeseries = noise_modulated.projected_timeseries.reshape(
                (-1, noise_modulated.timestream_modulation_period_samples))
            folded = noise_modulated.folded_projected_timeseries.mean(0)
            high, low, rising_edge = find_high_low(folded)
            noise_modulated.folded_projected_timeseries = np.roll(
                noise_modulated.folded_projected_timeseries,
                -rising_edge,
                axis=1)
            noise_modulated.folded_normalized_timeseries = np.roll(
                noise_modulated.normalized_timeseries.reshape(
                    (-1,
                     noise_modulated.timestream_modulation_period_samples)),
                -rising_edge,
                axis=1)
            all_noise_modulated.append(noise_modulated)

            # Add the ZBD voltage from the modulated timestream to the modulated and static on measurements:
            zbd_voltage = rnc.timestreams[
                modulated_timestream_index].zbd_voltage[0]
            noise_modulated.zbd_voltage = zbd_voltage
            for snm in noise_on:
                snm.zbd_voltage = zbd_voltage

            # Save only the Parameters object from a fit to the coarse sweep.
            freq, s21, err = rnc.sweeps[coarse_sweep_index].select_by_index(
                resonator_index)
            coarse_resonator = fit_best_resonator(freq, s21, errors=err)
            all_coarse_sweep_params.append(coarse_resonator.result.params)

        rnc.close()
        data = {
            'noise_on_measurements': all_noise_on,
            'noise_off_measurements': all_noise_off,
            'noise_modulated_measurements': all_noise_modulated,
            'coarse_sweep_params': all_coarse_sweep_params
        }
        # We decided to keep the .pkl files in /home/data regardless of origin.
        pkl_filename = os.path.join(
            '/home/data/pkl',
            os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl')
        save_noise_pkl(pkl_filename, data)
        print("Saved {}".format(pkl_filename))
    except KeyboardInterrupt:
        print("Aborting {}".format(nc_filename))