def read_fileset(fileset): """ Extract required data from the sdoss fileset. """ feat_data = { 'DATE_OBS': [], 'FEAT_HG_LONG_DEG': [], 'FEAT_HG_LAT_DEG': [], 'FEAT_X_PIX': [], 'FEAT_Y_PIX': [], 'FEAT_AREA_DEG2': [], 'FEAT_FILENAME': [] } for current_file in fileset: current_date = get_date_obs(current_file) current_data = read_csv(current_file) if (len(current_data) == 0): LOG.error("Empty file: %s!", current_file) return None for cd in current_data: feat_data['DATE_OBS'].append(current_date) feat_data['FEAT_HG_LONG_DEG'].append(float(cd['FEAT_HG_LONG_DEG'])) feat_data['FEAT_HG_LAT_DEG'].append(float(cd['FEAT_HG_LAT_DEG'])) feat_data['FEAT_X_PIX'].append(int(cd['FEAT_X_PIX'])) feat_data['FEAT_Y_PIX'].append(int(cd['FEAT_Y_PIX'])) feat_data['FEAT_AREA_DEG2'].append(float(cd['FEAT_AREA_DEG2'])) feat_data['FEAT_FILENAME'].append(current_file) return feat_data
def get_date_obs(filename): init_file = filename.split("_feat.csv")[0] + "_init.csv" init_data = read_csv(init_file, quiet=True) if (init_data is None): LOG.error("Cannot read %s!", init_data) sys.exit(1) date_obs = init_data[0]['DATE_OBS'] return datetime.strptime(date_obs, INPUT_TFORMAT)
def load_trackid(trackset, feat_data, max_track_id): """ Load track ids from list of track files, updating if feature matching occurs. """ tdset = [] tid = [] for current_file in trackset: current_data = read_csv(current_file, quiet=True) if (current_data is None): continue for td in current_data: tdset.append([ datetime.strptime(td['DATE_OBS'], INPUT_TFORMAT), int(td['FEAT_X_PIX']), int(td['FEAT_Y_PIX']) ]) tid.append(np.int64(td['TRACK_ID'])) if (len(tid) == 0): return [] #max_tid = np.max(tid) max_tid = np.max([max_track_id, np.max(tid)]) track_id = [] count = 1 for i, current_date in enumerate(feat_data['DATE_OBS']): current_set = [ current_date, feat_data['FEAT_X_PIX'][i], feat_data['FEAT_Y_PIX'][i] ] if (current_set in tdset): index = tdset.index(current_set) track_id.append(tid[index]) else: track_id.append(max_tid + count) count += 1 return track_id