def detect_meteors(rf_dir, id_dir, noise_dir, output_dir, t0=None, t1=None, rxch='zenith-l', txch='tx-h', snr_thresh=1, rmin_km=70, rmax_km=140, vmin_kps=7, vmax_kps=72, eps=0.5, min_samples=5, tscale=1, rscale=1, vscale=1, debug=False, ): """Function to detect and summarize meteor head echoes. Arguments --------- rf_dir : string or list RF data directory or directories. id_dir : string ID code metadata directory. noise_dir : string RX noise metadata directory. output_dir : string Meteor data output directory. t0 : float, optional Start time, seconds since epoch. If None, start at beginning of data. t1 : float, optional End time, seconds since epoch. If None, end at end of data. rxch : string, optional Receiver channel to process. txch : string, optional Transmitter channel. """ # set up reader objects for data and metadata rfo = drf.DigitalRFReader(rf_dir) ido = drf.DigitalMetadataReader(id_dir) no = drf.DigitalMetadataReader(noise_dir) # infer time window to process based on bounds of data and metadata if t0 is None or t1 is None: bounds = [] bounds.append(rfo.get_bounds(rxch)) bounds.append(rfo.get_bounds(txch)) bounds.append(ido.get_bounds()) bounds.append(no.get_bounds()) bounds = np.asarray(bounds) ss = np.max(bounds[:, 0]) se = np.min(bounds[:, 1]) fs = rfo.get_digital_rf_metadata(rxch)['samples_per_second'] if t0 is None: s0 = ss else: s0 = int(np.uint64(t0*fs)) if t1 is None: s1 = se else: s1 = int(np.uint64(t1*fs)) # load pulse/coding information tmm = TimingModeManager.TimingModeManager() if os.path.exists('/tmp/tmm.hdf5'): tmm.loadFromHdf5('/tmp/tmm.hdf5', skip_lowlevel=True) else: tmm.loadFromHdf5(skip_lowlevel=True) # initalize generator that steps through data pulse by pulse pulse_data = data_generator(rfo, ido, no, tmm, s0, s1, rxch, txch) # initialize clustering object for grouping detections clustering = Clustering(eps, min_samples, tscale, rscale, vscale) # initialize CSV file for saving meteor clusters csvpath = os.path.join(output_dir, 'cluster_summaries.txt') csvfile = open(csvpath, "wb", 1) # 1 => use line buffering cols = mp.summarize_meteor(None) csvwriter = csv.DictWriter(csvfile, cols) csvwriter.writeheader() # loop that steps through data one pulse at a time for k, (tx, rx) in enumerate(pulse_data): # marching periods as status update if (k % 100) == 0: sys.stdout.write('.') sys.stdout.flush() # matched filter mf_rx = mp.matched_filter(tx, rx, rmin_km, rmax_km) # meteor signal detection meteors = mp.detect_meteors(mf_rx, snr_thresh, vmin_kps, vmax_kps) # clustering of detections into single meteor head echoes for meteor in meteors: sys.stdout.write('*') sys.stdout.flush() new_clusters = clustering.addnext(pulse_num=k, **meteor) for c in new_clusters: sys.stdout.write('{0}'.format(c.cluster.values[0])) # summarize head echo and save to a data file cluster_summary = mp.summarize_meteor(c, debug=debug) csvwriter.writerow(cluster_summary) # tell clustering object that data is exhausted and to return any final clusters new_clusters = clustering.finish() for c in new_clusters: # summarize head echo and save to a data file cluster_summary = mp.summarize_meteor(c) csvwriter.writerow(cluster_summary) csvfile.close()