def extract_and_pickle(nc_filename): basedir = os.path.split(nc_filename)[0] # should make this more robust, currently assumes all nc files are in top # level of /data/<machine>/*.nc try: print("Processing {}".format(nc_filename)) snms = [] rnc = ReadoutNetCDF(nc_filename) if len(rnc.sweeps) != len(rnc.timestreams): raise ValueError("The number of sweeps does not match the number of timestreams in {}".format(nc_filename)) for index, (sweep, timestream) in enumerate(zip(rnc.sweeps, rnc.timestreams)): for resonator_index in set(sweep.index): snm = SweepNoiseMeasurement(nc_filename, sweep_group_index=index, timestream_group_index=index, resonator_index=resonator_index) try: snm.zbd_voltage = timestream.zbd_voltage[0] except AttributeError: pass snms.append(snm) rnc.close() # We decided to keep the .pkl files in /home/data regardless of origin. pkl_filename = os.path.join(basedir,'pkl', os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl') save_noise_pkl(pkl_filename, snms) print("Saved {}".format(pkl_filename)) except KeyboardInterrupt: pass
def extract_and_pickle(nc_filename): basedir = os.path.split( nc_filename )[0] # should make this more robust, currently assumes all nc files are in top # level of /data/<machine>/*.nc try: print("Processing {}".format(nc_filename)) snms = [] rnc = ReadoutNetCDF(nc_filename) if len(rnc.sweeps) != len(rnc.timestreams): raise ValueError( "The number of sweeps does not match the number of timestreams in {}" .format(nc_filename)) for index, (sweep, timestream) in enumerate(zip(rnc.sweeps, rnc.timestreams)): for resonator_index in set(sweep.index): snm = SweepNoiseMeasurement(nc_filename, sweep_group_index=index, timestream_group_index=index, resonator_index=resonator_index) try: snm.zbd_voltage = timestream.zbd_voltage[0] except AttributeError: pass snms.append(snm) rnc.close() # We decided to keep the .pkl files in /home/data regardless of origin. pkl_filename = os.path.join( basedir, 'pkl', os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl') save_noise_pkl(pkl_filename, snms) print("Saved {}".format(pkl_filename)) except KeyboardInterrupt: pass
def extract_and_pickle(nc_filename): basedir = os.path.split(nc_filename)[0] # should make this more robust, currently assumes all nc files are in top # level of /data/<machine>/*.nc machine = os.path.split(basedir)[1] cryostat = cryostats[machine] try: print("Processing {}".format(nc_filename)) snms = [] rnc = ReadoutNetCDF(nc_filename) for timestream_index,timestream in enumerate(rnc.timestreams): if timestream.epoch.shape[0] == 0: print "no timestreams in", nc_filename return start_epoch = timestream.epoch.min() sweep_index = find_closest_sweep(timestream,rnc.sweeps) sweep = rnc.sweeps[sweep_index] sweep_epoch = sweep.end_epoch resonator_indexes = np.array(list(set(sweep.index))) resonator_indexes.sort() print "%s: timestream[%d] at %s, associated sweep[%d] at %s, %d resonators" % (nc_filename,timestream_index, time.ctime(start_epoch), sweep_index, time.ctime(sweep_epoch), len(resonator_indexes)) for resonator_index in resonator_indexes: snm = SweepNoiseMeasurement(rnc, sweep_group_index=sweep_index, timestream_group_index=timestream_index, resonator_index=resonator_index, cryostat=cryostat) if nc_filename in atten_map: atten = atten_map[nc_filename][timestream_index] ntone_correction = ntone_power_correction(16) print "overriding attenuation",atten snm.atten = atten snm.total_dac_atten = atten +ntone_correction snm.power_dbm = snm.dac_chain_gain - snm.total_dac_atten try: snm.zbd_voltage = timestream.zbd_voltage[0] except AttributeError: pass pkld = cPickle.dumps(snm,cPickle.HIGHEST_PROTOCOL) del snm snm = cPickle.loads(pkld) snms.append(snm) rnc.close() pkl_filename = os.path.join(basedir,'pkl', os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl') save_noise_pkl(pkl_filename, snms) print("Saved {}".format(pkl_filename)) except KeyboardInterrupt: pass except Exception as e: print "failed on",nc_filename,e
def extract_and_pickle(nc_filename, deglitch_threshold=5): basedir = os.path.split(nc_filename)[0] # should make this more robust, currently assumes all nc files are in top # level of /data/<machine>/*.nc machine = os.path.split(basedir)[1] cryostat = cryostats[machine] print cryostat try: print("Processing {}".format(nc_filename)) snms = [] rnc = ReadoutNetCDF(nc_filename) for timestream_index,timestream in enumerate(rnc.timestreams): if timestream.epoch.shape[0] == 0: print "no timestreams in", nc_filename return start_epoch = timestream.epoch.min() sweep_index = find_closest_sweep(timestream,rnc.sweeps) sweep = rnc.sweeps[sweep_index] sweep_epoch = sweep.end_epoch modulation_state,modulation_frequency = rnc.get_modulation_state_at(start_epoch) try: manual_modulation_frequency = timestream.mmw_source_modulation_frequency[0] except AttributeError: manual_modulation_frequency = 0 if modulation_state == 2 or manual_modulation_frequency > 0: this_deglitch_threshold = None else: this_deglitch_threshold = deglitch_threshold resonator_indexes = np.array(list(set(sweep.index))) resonator_indexes.sort() print "%s: timestream[%d] at %s, associated sweep[%d] at %s, %d resonators" % (nc_filename,timestream_index, time.ctime(start_epoch), sweep_index, time.ctime(sweep_epoch), len(resonator_indexes)) if this_deglitch_threshold is None: print "Found modulation, not deglitching" for resonator_index in resonator_indexes: tic = time.time() snm = SweepNoiseMeasurement(rnc, sweep_group_index=sweep_index, timestream_group_index=timestream_index, resonator_index=resonator_index, cryostat=cryostat, deglitch_threshold=this_deglitch_threshold, ) print "created snm for",rnc.filename,timestream_index,resonator_index,deglitch_threshold,"in",\ (time.time()-tic) try: snm.zbd_voltage = timestream.zbd_voltage[0] except AttributeError: pass tic = time.time() if snm.timestream_modulation_period_samples != 0: snm.folded_projected_timeseries = snm.projected_timeseries.reshape((-1, snm.timestream_modulation_period_samples)) folded = snm.folded_projected_timeseries.mean(0) high, low, rising_edge = kid_readout.analysis.fit_pulses.find_high_low(folded) snm.folded_projected_timeseries = np.roll(snm.folded_projected_timeseries,-rising_edge, axis=1).mean(0) snm.folded_normalized_timeseries = np.roll( snm.normalized_timeseries.reshape((-1,snm.timestream_modulation_period_samples)), -rising_edge, axis=1).mean(0) print "folded time series in ",(time.time()-tic) pkld = cPickle.dumps(snm,cPickle.HIGHEST_PROTOCOL) del snm snm = cPickle.loads(pkld) snms.append(snm) rnc.close() pkl_filename = os.path.join(basedir,'pkl', os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl') save_noise_pkl(pkl_filename, snms) print("Saved {}".format(pkl_filename)) except KeyboardInterrupt: pass except Exception as e: print "failed on",nc_filename,e
def extract_and_pickle(nc_filename): """ The format is that the file contains equal numbers of sweeps and timestreams. The first sweep is used to locate the resonances and is taken with the source off at the lowest power level, i.e. the maximum attenuation. The first timestream is taken under the same conditions except that the source is modulated. Subsequent sweeps and timestreams are paired. :param nc_filename: the file name of the netCDF4 file with the above format. :return: a dictionary """ try: all_noise_on = [] all_noise_off = [] all_noise_modulated = [] all_coarse_sweep_params = [] coarse_sweep_index = 0 modulated_timestream_index = 0 print("Processing {}".format(nc_filename)) rnc = ReadoutNetCDF(nc_filename) resonator_indices = sorted(set(rnc.sweeps[0].index)) n_attenuations = len(rnc.sweeps) - 1 for resonator_index in resonator_indices: noise_on = [] for on_index in range(1, n_attenuations, 2): noise_on.append(SweepNoiseMeasurement(nc_filename, resonator_index=resonator_index, sweep_group_index=on_index, timestream_group_index=on_index)) all_noise_on.extend(noise_on) noise_off = [] for off_index in range(2, n_attenuations + 1, 2): noise_off.append(SweepNoiseMeasurement(nc_filename, resonator_index=resonator_index, sweep_group_index=off_index, timestream_group_index=off_index)) all_noise_off.extend(noise_off) # Create the modulated measurement from the modulated timestream and the noise off sweep at the same power. # Skip deglitching. attenuations = [snm.atten for snm in noise_off] off_max_attenuation_index = 1 + 2 * attenuations.index(max(attenuations)) + 1 noise_modulated = SweepNoiseMeasurement(nc_filename, resonator_index=resonator_index, sweep_group_index=off_max_attenuation_index, timestream_group_index=modulated_timestream_index, deglitch_threshold=None) noise_modulated.folded_projected_timeseries = noise_modulated.projected_timeseries.reshape( (-1, noise_modulated.timestream_modulation_period_samples)) folded = noise_modulated.folded_projected_timeseries.mean(0) high, low, rising_edge = find_high_low(folded) noise_modulated.folded_projected_timeseries = np.roll(noise_modulated.folded_projected_timeseries, -rising_edge, axis=1) noise_modulated.folded_normalized_timeseries = np.roll( noise_modulated.normalized_timeseries.reshape((-1, noise_modulated.timestream_modulation_period_samples)), -rising_edge, axis=1) all_noise_modulated.append(noise_modulated) # Add the ZBD voltage from the modulated timestream to the modulated and static on measurements: zbd_voltage = rnc.timestreams[modulated_timestream_index].zbd_voltage[0] noise_modulated.zbd_voltage = zbd_voltage for snm in noise_on: snm.zbd_voltage = zbd_voltage # Save only the Parameters object from a fit to the coarse sweep. freq, s21, err = rnc.sweeps[coarse_sweep_index].select_by_index(resonator_index) coarse_resonator = fit_best_resonator(freq, s21, errors=err) all_coarse_sweep_params.append(coarse_resonator.result.params) rnc.close() data = {'noise_on_measurements': all_noise_on, 'noise_off_measurements': all_noise_off, 'noise_modulated_measurements': all_noise_modulated, 'coarse_sweep_params': all_coarse_sweep_params} # We decided to keep the .pkl files in /home/data regardless of origin. pkl_filename = os.path.join('/home/data/pkl', os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl') save_noise_pkl(pkl_filename, data) print("Saved {}".format(pkl_filename)) except KeyboardInterrupt: print("Aborting {}".format(nc_filename))
def extract_and_pickle(nc_filename, deglitch_threshold=5): basedir = os.path.split( nc_filename )[0] # should make this more robust, currently assumes all nc files are in top # level of /data/<machine>/*.nc machine = os.path.split(basedir)[1] cryostat = cryostats[machine] print cryostat try: print("Processing {}".format(nc_filename)) snms = [] rnc = ReadoutNetCDF(nc_filename) for timestream_index, timestream in enumerate(rnc.timestreams): if timestream.epoch.shape[0] == 0: print "no timestreams in", nc_filename return start_epoch = timestream.epoch.min() sweep_index = find_closest_sweep(timestream, rnc.sweeps) sweep = rnc.sweeps[sweep_index] sweep_epoch = sweep.end_epoch modulation_state, modulation_frequency = rnc.get_modulation_state_at( start_epoch) try: manual_modulation_frequency = timestream.mmw_source_modulation_frequency[ 0] except AttributeError: manual_modulation_frequency = 0 if modulation_state == 2 or manual_modulation_frequency > 0: this_deglitch_threshold = None else: this_deglitch_threshold = deglitch_threshold resonator_indexes = np.array(list(set(sweep.index))) resonator_indexes.sort() print "%s: timestream[%d] at %s, associated sweep[%d] at %s, %d resonators" % ( nc_filename, timestream_index, time.ctime(start_epoch), sweep_index, time.ctime(sweep_epoch), len(resonator_indexes)) if this_deglitch_threshold is None: print "Found modulation, not deglitching" for resonator_index in resonator_indexes: tic = time.time() snm = SweepNoiseMeasurement( rnc, sweep_group_index=sweep_index, timestream_group_index=timestream_index, resonator_index=resonator_index, cryostat=cryostat, deglitch_threshold=this_deglitch_threshold, ) print "created snm for",rnc.filename,timestream_index,resonator_index,deglitch_threshold,"in",\ (time.time()-tic) try: snm.zbd_voltage = timestream.zbd_voltage[0] except AttributeError: pass tic = time.time() if snm.timestream_modulation_period_samples != 0: snm.folded_projected_timeseries = snm.projected_timeseries.reshape( (-1, snm.timestream_modulation_period_samples)) folded = snm.folded_projected_timeseries.mean(0) high, low, rising_edge = kid_readout.analysis.fit_pulses.find_high_low( folded) snm.folded_projected_timeseries = np.roll( snm.folded_projected_timeseries, -rising_edge, axis=1).mean(0) snm.folded_normalized_timeseries = np.roll( snm.normalized_timeseries.reshape( (-1, snm.timestream_modulation_period_samples)), -rising_edge, axis=1).mean(0) print "folded time series in ", (time.time() - tic) pkld = cPickle.dumps(snm, cPickle.HIGHEST_PROTOCOL) del snm snm = cPickle.loads(pkld) snms.append(snm) rnc.close() pkl_filename = os.path.join( basedir, 'pkl', os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl') save_noise_pkl(pkl_filename, snms) print("Saved {}".format(pkl_filename)) except KeyboardInterrupt: pass except Exception as e: print "failed on", nc_filename, e
def extract_and_pickle(nc_filename): """ The format is that the file contains equal numbers of sweeps and timestreams. The first sweep is used to locate the resonances and is taken with the source off at the lowest power level, i.e. the maximum attenuation. The first timestream is taken under the same conditions except that the source is modulated. Subsequent sweeps and timestreams are paired. :param nc_filename: the file name of the netCDF4 file with the above format. :return: a dictionary """ try: all_noise_on = [] all_noise_off = [] all_noise_modulated = [] all_coarse_sweep_params = [] coarse_sweep_index = 0 modulated_timestream_index = 0 print("Processing {}".format(nc_filename)) rnc = ReadoutNetCDF(nc_filename) resonator_indices = sorted(set(rnc.sweeps[0].index)) n_attenuations = len(rnc.sweeps) - 1 for resonator_index in resonator_indices: noise_on = [] for on_index in range(1, n_attenuations, 2): noise_on.append( SweepNoiseMeasurement(nc_filename, resonator_index=resonator_index, sweep_group_index=on_index, timestream_group_index=on_index)) all_noise_on.extend(noise_on) noise_off = [] for off_index in range(2, n_attenuations + 1, 2): noise_off.append( SweepNoiseMeasurement(nc_filename, resonator_index=resonator_index, sweep_group_index=off_index, timestream_group_index=off_index)) all_noise_off.extend(noise_off) # Create the modulated measurement from the modulated timestream and the noise off sweep at the same power. # Skip deglitching. attenuations = [snm.atten for snm in noise_off] off_max_attenuation_index = 1 + 2 * attenuations.index( max(attenuations)) + 1 noise_modulated = SweepNoiseMeasurement( nc_filename, resonator_index=resonator_index, sweep_group_index=off_max_attenuation_index, timestream_group_index=modulated_timestream_index, deglitch_threshold=None) noise_modulated.folded_projected_timeseries = noise_modulated.projected_timeseries.reshape( (-1, noise_modulated.timestream_modulation_period_samples)) folded = noise_modulated.folded_projected_timeseries.mean(0) high, low, rising_edge = find_high_low(folded) noise_modulated.folded_projected_timeseries = np.roll( noise_modulated.folded_projected_timeseries, -rising_edge, axis=1) noise_modulated.folded_normalized_timeseries = np.roll( noise_modulated.normalized_timeseries.reshape( (-1, noise_modulated.timestream_modulation_period_samples)), -rising_edge, axis=1) all_noise_modulated.append(noise_modulated) # Add the ZBD voltage from the modulated timestream to the modulated and static on measurements: zbd_voltage = rnc.timestreams[ modulated_timestream_index].zbd_voltage[0] noise_modulated.zbd_voltage = zbd_voltage for snm in noise_on: snm.zbd_voltage = zbd_voltage # Save only the Parameters object from a fit to the coarse sweep. freq, s21, err = rnc.sweeps[coarse_sweep_index].select_by_index( resonator_index) coarse_resonator = fit_best_resonator(freq, s21, errors=err) all_coarse_sweep_params.append(coarse_resonator.result.params) rnc.close() data = { 'noise_on_measurements': all_noise_on, 'noise_off_measurements': all_noise_off, 'noise_modulated_measurements': all_noise_modulated, 'coarse_sweep_params': all_coarse_sweep_params } # We decided to keep the .pkl files in /home/data regardless of origin. pkl_filename = os.path.join( '/home/data/pkl', os.path.splitext(os.path.split(nc_filename)[1])[0] + '.pkl') save_noise_pkl(pkl_filename, data) print("Saved {}".format(pkl_filename)) except KeyboardInterrupt: print("Aborting {}".format(nc_filename))