def process_interval_jobs(dataset, tag, job, conn): data = bUtil.process_common_data(dataset, tag, 'interval', job) sharpness = deepcopy(data) sharpness.extend(["sharpness", job["sharpness"]]) bUtil.insert_benchmark(sharpness, conn) resolution = deepcopy(data) resolution.extend(["resolution", job["resolution"]]) bUtil.insert_benchmark(resolution, conn) coverage = deepcopy(data) coverage.extend(["coverage", job["coverage"]]) bUtil.insert_benchmark(coverage, conn) time = deepcopy(data) time.extend(["time", job["time"]]) bUtil.insert_benchmark(time, conn) Q05 = deepcopy(data) Q05.extend(["Q05", job["Q05"]]) bUtil.insert_benchmark(Q05, conn) Q25 = deepcopy(data) Q25.extend(["Q25", job["Q25"]]) bUtil.insert_benchmark(Q25, conn) Q75 = deepcopy(data) Q75.extend(["Q75", job["Q75"]]) bUtil.insert_benchmark(Q75, conn) Q95 = deepcopy(data) Q95.extend(["Q95", job["Q95"]]) bUtil.insert_benchmark(Q95, conn) W05 = deepcopy(data) W05.extend(["winkler05", job["winkler05"]]) bUtil.insert_benchmark(W05, conn) W25 = deepcopy(data) W25.extend(["winkler25", job["winkler25"]]) bUtil.insert_benchmark(W25, conn)
def process_point_jobs(dataset, tag, job, conn): """ Extract information from a dictionary with point benchmark results and save it on a database :param dataset: the benchmark dataset name :param tag: alias for the benchmark group being executed :param job: a dictionary with the benchmark results :param conn: a connection to a Sqlite database :return: """ data = bUtil.process_common_data(dataset, tag, 'point',job) rmse = deepcopy(data) rmse.extend(["rmse", job["rmse"]]) bUtil.insert_benchmark(rmse, conn) smape = deepcopy(data) smape.extend(["smape", job["smape"]]) bUtil.insert_benchmark(smape, conn) u = deepcopy(data) u.extend(["u", job["u"]]) bUtil.insert_benchmark(u, conn) time = deepcopy(data) time.extend(["time", job["time"]]) bUtil.insert_benchmark(time, conn)
def get_time(self, ls): time = [str(datetime.datetime.now().year)] time.extend(ls) time = ' '.join(time) time = datetime.datetime.strptime(time, '%Y %b %d %H:%M:%S') time = datetime.datetime.strftime(time, '%Y-%m-%d %H:%M:%S') return time.split(' ')
def get_time(hdu_list, time): ''' Gets the time stamp for each image. Parameters ---------- :type hdu_list : list :param hdu_list: content of fits file. :type time : 1D array :param time: Array of existing time stamps. Returns ------- :return: time (1D array) - Updated time stamp array ''' h, w, l = hdu_list[0].data.shape sec2day = 1.0 / (3600.0 * 24.0) step = hdu_list[0].header['FRAMTIME'] * sec2day t = np.linspace(hdu_list[0].header['BMJD_OBS'] + step / 2, hdu_list[0].header['BMJD_OBS'] + (h - 1) * step, h) time.extend(t) return time
def process_probabilistic_jobs(dataset, tag, job, conn): data = bUtil.process_common_data(dataset, tag, 'density', job) crps = deepcopy(data) crps.extend(["crps", job["CRPS"]]) bUtil.insert_benchmark(crps, conn) time = deepcopy(data) time.extend(["time", job["time"]]) bUtil.insert_benchmark(time, conn) brier = deepcopy(data) brier.extend(["brier", job["brier"]]) bUtil.insert_benchmark(brier, conn)
def process_point_jobs(dataset, tag, job, conn): data = bUtil.process_common_data(dataset, tag, 'point', job) rmse = deepcopy(data) rmse.extend(["rmse", job["rmse"]]) bUtil.insert_benchmark(rmse, conn) smape = deepcopy(data) smape.extend(["smape", job["smape"]]) bUtil.insert_benchmark(smape, conn) u = deepcopy(data) u.extend(["u", job["u"]]) bUtil.insert_benchmark(u, conn) time = deepcopy(data) time.extend(["time", job["time"]]) bUtil.insert_benchmark(time, conn)
def build_input_feature(sequences): input_feature = [] notes = [] velocity = [] time = [] for seq in sequences: input, note, v, t = devide_single_sequence(seq) input_feature.extend(input) notes.extend(note) velocity.extend(v) time.extend(t) input_feature = np.array(input_feature) notes = np.array(notes) velocity = np.array(velocity) time = np.array(time) return input_feature, notes, velocity, time
def process_interval_jobs(dataset, tag, job, conn): """ Extract information from an dictionary with interval benchmark results and save it on a database :param dataset: the benchmark dataset name :param tag: alias for the benchmark group being executed :param job: a dictionary with the benchmark results :param conn: a connection to a Sqlite database :return: """ data = bUtil.process_common_data(dataset, tag, 'interval', job) sharpness = deepcopy(data) sharpness.extend(["sharpness", job["sharpness"]]) bUtil.insert_benchmark(sharpness, conn) resolution = deepcopy(data) resolution.extend(["resolution", job["resolution"]]) bUtil.insert_benchmark(resolution, conn) coverage = deepcopy(data) coverage.extend(["coverage", job["coverage"]]) bUtil.insert_benchmark(coverage, conn) time = deepcopy(data) time.extend(["time", job["time"]]) bUtil.insert_benchmark(time, conn) Q05 = deepcopy(data) Q05.extend(["Q05", job["Q05"]]) bUtil.insert_benchmark(Q05, conn) Q25 = deepcopy(data) Q25.extend(["Q25", job["Q25"]]) bUtil.insert_benchmark(Q25, conn) Q75 = deepcopy(data) Q75.extend(["Q75", job["Q75"]]) bUtil.insert_benchmark(Q75, conn) Q95 = deepcopy(data) Q95.extend(["Q95", job["Q95"]]) bUtil.insert_benchmark(Q95, conn) W05 = deepcopy(data) W05.extend(["winkler05", job["winkler05"]]) bUtil.insert_benchmark(W05, conn) W25 = deepcopy(data) W25.extend(["winkler25", job["winkler25"]]) bUtil.insert_benchmark(W25, conn)
def process_probabilistic_jobs(dataset, tag, job, conn): """ Extract information from an dictionary with probabilistic benchmark results and save it on a database :param dataset: the benchmark dataset name :param tag: alias for the benchmark group being executed :param job: a dictionary with the benchmark results :param conn: a connection to a Sqlite database :return: """ data = bUtil.process_common_data(dataset, tag, 'density', job) crps = deepcopy(data) crps.extend(["crps",job["CRPS"]]) bUtil.insert_benchmark(crps, conn) time = deepcopy(data) time.extend(["time", job["time"]]) bUtil.insert_benchmark(time, conn) brier = deepcopy(data) brier.extend(["brier", job["brier"]]) bUtil.insert_benchmark(brier, conn)
def time_add(self): import re driver = self.driver try: time = [] rating = [] thubs = driver.find_elements_by_class_name("thumb-info__right") for thub in thubs: rate = thub.find_element_by_class_name("thumb-info__text").text rating.append(rate) for i in rating: if "seconds" in i: zn = re.findall('(\d+)', i) zn1 = zn[0] print zn1 seconds = (int(zn1)) * 1 print seconds time.extend([seconds]) elif "minute" in i: zn = re.findall('(\d+)', i) zn1 = zn[0] print zn1 seconds = (int(zn1)) * 60 print seconds time.extend([seconds]) if "minutes" in i: zn = re.findall('(\d+)', i) zn1 = zn[0] print zn1 seconds = (int(zn1)) * 60 print seconds time.extend([seconds]) if "hour" in i: zn = re.findall('(\d+)', i) zn1 = zn[0] seconds = (int(zn1)) * 3600 print seconds time.extend([seconds]) assert time[0] < time[1] return True except WebDriverException: return False
def filter(frames_files, rng, target, chunk_size=20, return_data=False, return_path=False, write=True, return_avgs_and_sigmas=False, no_mag_err=False, plot=True, flagged=None): ''' :param frames_files: A list of files names within the current working directory/Data :param rng: integer or float indicating the picture range :param target: String name of the target star :return: A chart of the filtered and unfiltered data over multiple datasets ''' try: from numpy import ndarray as a except ImportError: print("Library, 'numpy' not installed.") sys.exit() file_dir = os.path.join(cwd, 'Data') mjs = [] merr_ens = [] allraw_mag_errs = [] all_raw_mags = [] star_list = [] time = [] '''frames = read(os.path.join(file_dir,frames_files[0]), fast_reader = {'chunk_size': Mb * 1000000}, format = 'csv').group_by('StarName')''' #frames = frames_files[0].group_by('StarName') #mask = frames.groups.keys['StarName'] == target #target_name = frames.groups[mask]['StarName'][0] print('Reading File...') for frames in frames_files: star_keys = None print(frames) frames = read(os.path.join(file_dir, frames), fast_reader={'chunk_size': chunk_size * 1000000}, format='csv') frames = calc_error_and_name_handler(frames, no_mag_err) frames = get_kstars2(frames, rng, target, flagged) '''if star_keys is None: frames = get_kstars2(frames, rng, target) star_keys = frames.groups.keys().... efficiency update coming soon! should double the speed of the algorithm''' frames, ensemble_sigs = preprocess(frames, target, flagged) frames = frames.group_by('StarName') star_list.extend(frames.groups.keys['StarName']) mask = frames.groups.keys['StarName'] == target target_data = frames.groups[mask] raw_mags = target_data['MAG'] raw_mag_errs = target_data['MagError'] frames = frames.group_by('MJD') keys = frames.groups.keys time.extend(keys['MJD']) all_raw_mags.extend(raw_mags) allraw_mag_errs.extend(raw_mag_errs) for i in range(len(keys)): key = keys[i]['MJD'] key = keys['MJD'] == key frame = frames.groups[key] mj, merr_j = get_mj(frame) mjs.append(mj) merr_ens.append(merr_j) list_of_lists = [mjs, merr_ens, allraw_mag_errs, all_raw_mags, time] list_of_lists = list_to_array(list_of_lists) mjs, merr_ens, allraw_mag_errs, all_raw_mags, time = flatten_arrays( list_of_lists) corerr = [ sqrt(merr_i * merr_i + merens * merens) for merr_i, merens in zip(allraw_mag_errs, merr_ens) ] num_found = len(set(star_list)) num_frames = len(time) if num_frames == 0: print( 'Error: No stars were found for the ensemble, this star might not appear in all plates.' ) return None M = sum(mjs) / num_frames mcor = [mag - (mj - M) for mag, mj in zip(all_raw_mags, mjs)] avg_cor_mag = np.mean(mcor) avg_raw_mag = np.mean(all_raw_mags) raw_std, cor_std = list(map(np.std, [all_raw_mags, mcor])) num_frames = len(time) target_name = target print("number of stars found", num_found) '''return {'raw_sigma': raw_std, 'corrected_sigma': cor_std, 'num_found': num_found, 'time': time}''' data = [ time, all_raw_mags, mcor, rng, target_name, num_found, raw_std, cor_std ] tabled_data = build_table(data) path = None if write: path = write_handler(tabled_data, target_name, 'LcUnplotted', return_path=True) if return_data: return [ time, all_raw_mags, mcor, rng, target_name, num_found, raw_std, cor_std ] if return_path: if path is None: print('Warning: path returns only when param: write = True') return path if return_avgs_and_sigmas: return avg_cor_mag, avg_raw_mag, raw_std, cor_std if plot: if path is None: print( "Can't plot because there is no path, make sure write = True") sys.exit() plot_data4(path)
for i in range(0, len(at_a_glance)): # split the text aux = at_a_glance[i].split('\n') # separate blocks using \n # filter information # greater than zero drops empty obs, and not_needed list extra text aux = [ x for x in aux if len(x) > 0 and not any(y in aux for y in scope_tax) ] # get the text text_panel.extend(aux) # get the year time.extend([year[i]] * len(aux)) # get the country (and get rid of \n in the country name) country_name[i] = country_name[i].replace('\n', '').lower() countryname.extend([country_name[i]] * len(aux)) # some cleaning symbols = [ '(%)', '(a)', '(b)', '(c)', '(d)', '(e)', '(f)', '(g)', '(h)', '(i)', '(j)', '(k)', '(l)', '(m)', '(n)', '*', '%' ] for i in range(0, len(text_panel)): for s in symbols: text_panel[i] = text_panel[i].replace(s, '') VAT = 'Standard'
def change_lane(self, current_lane, next_lane, current_index, meters_of_merging): current_lane_traj = [] if current_lane == self.RIGHT_LANE_STRING: current_lane_traj = self.right_lane_traj else: if current_lane == self.CENTER_LANE_STRING: current_lane_traj = self.center_lane_traj else: if current_lane == self.LEFT_LANE_STRING: current_lane_traj = self.left_lane_traj else: print "In PlatooningVehicle change_lane(): current_lane not found!" next_lane_traj = [] if next_lane == self.RIGHT_LANE_STRING: next_lane_traj = self.right_lane_traj else: if next_lane == self.CENTER_LANE_STRING: next_lane_traj = self.center_lane_traj else: if next_lane == self.LEFT_LANE_STRING: next_lane_traj = self.left_lane_traj else: print "In PlatooningVehicle change_lane(): current_lane not found!" print "len( current_lane_traj ) = " + str(len( current_lane_traj )) print "len( current_lane_traj[0] ) = " + str(len( current_lane_traj[0] )) print "len( next_lane_traj ) = " + str(len( next_lane_traj )) print "len( next_lane_traj[0] ) = " + str(len( next_lane_traj[0] )) current_lane_traj_len = len( current_lane_traj[0] ) travelled_distance = 0 final_merge_id_offset = 0 while travelled_distance < meters_of_merging: temp_id = current_index + final_merge_id_offset temp_id = temp_id%current_lane_traj_len temp_next_id = temp_id + 1 temp_next_id = temp_next_id%current_lane_traj_len travelled_distance += math.hypot( current_lane_traj[0][temp_next_id] - current_lane_traj[0][temp_id] , current_lane_traj[1][temp_next_id] - current_lane_traj[1][temp_id] ) final_merge_id_offset += 1 final_merge_id = current_index + final_merge_id_offset final_merge_id = final_merge_id%current_lane_traj_len final_merge_point = [current_lane_traj[0][final_merge_id], current_lane_traj[1][final_merge_id]] current_lane_final_merge_id = final_merge_id whole_search_range = range(len(self.traj[0])) next_lane_traj_len = len( next_lane_traj[0] ) temp_distance = [math.hypot( final_merge_point[0] - next_lane_traj[0][ (i)%next_lane_traj_len ] , final_merge_point[1] - next_lane_traj[1][ (i)%next_lane_traj_len ] ) for i in whole_search_range] # Find the closest trajectory point that matches my desired speed and current heading best_distance = min(temp_distance) best_idx = temp_distance.index(best_distance) next_lane_final_merge_id = best_idx initial_merge_point = [current_lane_traj[0][current_index], current_lane_traj[1][current_index]] temp_distance = [math.hypot( initial_merge_point[0] - self.traj[0][ (i)%next_lane_traj_len ] , initial_merge_point[1] - self.traj[1][ (i)%next_lane_traj_len ] ) for i in whole_search_range] # Find the closest trajectory point that matches my desired speed and current heading best_distance = min(temp_distance) best_idx = temp_distance.index(best_distance) current_lane_start_merge_id = current_index next_lane_start_merge_id = best_idx print "current_lane_start_merge_id = " + str(current_lane_start_merge_id) print "current_lane_final_merge_id = " + str(current_lane_final_merge_id) print "next_lane_start_merge_id = " + str(next_lane_start_merge_id) print "next_lane_final_merge_id = " + str(next_lane_final_merge_id) num_points_next_traj_merge = next_lane_final_merge_id - next_lane_start_merge_id if num_points_next_traj_merge < 0: num_points_next_traj_merge += next_lane_traj_len num_points_current_traj_merge = current_lane_final_merge_id - current_lane_start_merge_id if num_points_current_traj_merge < 0: num_points_current_traj_merge += current_lane_traj_len current_lane_merging_traj = [] next_lane_merging_traj = [] if next_lane_final_merge_id > next_lane_start_merge_id: next_lane_merging_traj = [ next_lane_traj[0][next_lane_start_merge_id:next_lane_final_merge_id], next_lane_traj[1][next_lane_start_merge_id:next_lane_final_merge_id], next_lane_traj[2][next_lane_start_merge_id:next_lane_final_merge_id], next_lane_traj[3][next_lane_start_merge_id:next_lane_final_merge_id] ] else: next_lane_merging_traj = [ next_lane_traj[0][next_lane_start_merge_id:], next_lane_traj[0][:next_lane_final_merge_id], next_lane_traj[1][next_lane_start_merge_id:], next_lane_traj[1][:next_lane_final_merge_id], next_lane_traj[2][next_lane_start_merge_id:], next_lane_traj[2][:next_lane_final_merge_id], next_lane_traj[3][next_lane_start_merge_id:], next_lane_traj[3][:next_lane_final_merge_id] ] if current_lane_final_merge_id > current_lane_start_merge_id: # print "HERE" # print "current_lane_start_merge_id = " + str(current_lane_start_merge_id) # print "current_lane_final_merge_id = " + str(current_lane_final_merge_id) # print "len(current_lane_traj) = " + str(len(current_lane_traj)) # print "len(current_lane_traj[0]) = " + str(len(current_lane_traj[0])) # print "[current_lane_start_merge_id:current_lane_final_merge_id] = " + str([current_lane_start_merge_id:current_lane_final_merge_id]) current_lane_merging_traj = [current_lane_traj[0][current_lane_start_merge_id:current_lane_final_merge_id], current_lane_traj[1][current_lane_start_merge_id:current_lane_final_merge_id], current_lane_traj[2][current_lane_start_merge_id:current_lane_final_merge_id], current_lane_traj[3][current_lane_start_merge_id:current_lane_final_merge_id]] # print "current_lane_merging_traj = " + str(current_lane_merging_traj) else: current_lane_merging_traj = [current_lane_traj[0][current_lane_start_merge_id:], current_lane_traj[0][:current_lane_final_merge_id], current_lane_traj[1][current_lane_start_merge_id:], current_lane_traj[1][:current_lane_final_merge_id], current_lane_traj[2][current_lane_start_merge_id:], current_lane_traj[2][:current_lane_final_merge_id], current_lane_traj[3][current_lane_start_merge_id:], current_lane_traj[3][:current_lane_final_merge_id]] # current_lane_merging_traj = [ current_lane_traj[:][current_lane_start_merge_id:], current_lane_traj[:][:current_lane_final_merge_id] ] merging_traj = [] # print "current_lane_final_merge_id = " + str(current_lane_final_merge_id) # print "current_lane_start_merge_id = " + str(current_lane_start_merge_id) # print "current_lane_merging_traj = " + str(current_lane_merging_traj) print "num_points_next_traj_merge = " + str(num_points_next_traj_merge) print "num_points_current_traj_merge = " + str(num_points_current_traj_merge) print "num_points_next_traj_merge > num_points_current_traj_merge = " + str(num_points_next_traj_merge > num_points_current_traj_merge) if num_points_next_traj_merge > num_points_current_traj_merge: current_traj_merge_dist = [0] for temp_idx in xrange(1, len( current_lane_merging_traj[0] ) ): current_traj_merge_dist.append( current_traj_merge_dist[-1] + math.hypot( current_lane_merging_traj[0][temp_idx] - current_lane_merging_traj[0][temp_idx-1] , current_lane_merging_traj[1][temp_idx] - current_lane_merging_traj[1][temp_idx-1] ) ) next_traj_merge_dist = [0] for temp_idx in xrange(1, len( next_lane_merging_traj[0] ) ): next_traj_merge_dist.append( next_traj_merge_dist[-1] + math.hypot( next_lane_merging_traj[0][temp_idx] - next_lane_merging_traj[0][temp_idx-1] , next_lane_merging_traj[1][temp_idx] - next_lane_merging_traj[1][temp_idx-1] ) ) for temp_idx in xrange( len( next_lane_merging_traj[0] ) ): current_next_lane_point = [next_lane_merging_traj[0][temp_idx], next_lane_merging_traj[1][temp_idx], next_lane_merging_traj[2][temp_idx], next_lane_merging_traj[3][temp_idx]] # current_next_lane_point = next_lane_merging_traj[:][temp_idx] # print "next_traj_merge_dist = " + str(next_traj_merge_dist) normalized_next_traj_dist = [ next_traj_merge_dist_value/next_traj_merge_dist[-1] for next_traj_merge_dist_value in next_traj_merge_dist ] # print "normalized_next_traj_dist = " + str(normalized_next_traj_dist) normalized_current_traj_dist = [ current_traj_merge_dist_value/current_traj_merge_dist[-1] for current_traj_merge_dist_value in current_traj_merge_dist ] for temp_idx in xrange( len( next_lane_merging_traj[0] ) ): point_a = [ normalized_next_traj_dist[temp_idx]*next_lane_merging_traj[0][temp_idx], normalized_next_traj_dist[temp_idx]*next_lane_merging_traj[1][temp_idx], normalized_next_traj_dist[temp_idx]*next_lane_merging_traj[2][temp_idx], normalized_next_traj_dist[temp_idx]*next_lane_merging_traj[3][temp_idx] ] point_b = [] if temp_idx == 0: point_b = [ current_lane_merging_traj[0][temp_idx], current_lane_merging_traj[1][temp_idx], current_lane_merging_traj[2][temp_idx], current_lane_merging_traj[3][temp_idx] ] else: if temp_idx == len( next_lane_merging_traj[0] ) - 1: point_b = [ 0, 0 , 0, 0] else: desired_dist = normalized_next_traj_dist[temp_idx] best_below_index = -1 best_above_index = + 1000000 for temp_j in xrange( len( normalized_current_traj_dist ) ): if normalized_current_traj_dist[temp_j] < desired_dist: best_below_index = temp_j if normalized_current_traj_dist[-temp_j-1] > desired_dist: best_above_index = temp_j # print "normalized_current_traj_dist = " + str(normalized_current_traj_dist) dist_to_below = desired_dist - normalized_current_traj_dist[best_below_index] dist_to_above = normalized_current_traj_dist[best_above_index] - desired_dist # print "temp_idx = " + str(temp_idx) # print "dist_to_below = " + str(dist_to_below) # print "dist_to_above = " + str(dist_to_above) below_weight = dist_to_above/(dist_to_below + dist_to_above) above_weight = dist_to_below/(dist_to_below + dist_to_above) point_b = [ current_lane_merging_traj[0][best_below_index]*below_weight + current_lane_merging_traj[0][best_above_index]*above_weight , current_lane_merging_traj[1][best_below_index]*below_weight + current_lane_merging_traj[1][best_above_index]*above_weight , current_lane_merging_traj[2][best_below_index]*below_weight + current_lane_merging_traj[2][best_above_index]*above_weight , current_lane_merging_traj[3][best_below_index]*below_weight + current_lane_merging_traj[3][best_above_index]*above_weight ] point_b = [ point_b[0]*(1.-normalized_next_traj_dist[temp_idx]), point_b[1]*(1.-normalized_next_traj_dist[temp_idx]), point_b[2]*(1.-normalized_next_traj_dist[temp_idx]), point_b[3]*(1.-normalized_next_traj_dist[temp_idx]) ] merging_traj.append( [ point_a[0] + point_b[0] , point_a[1] + point_b[1] , point_a[2] + point_b[2] , point_a[3] + point_b[3] ] ) else: print "NOT IMPLEMENTED YET" # merging_traj is the section of the trajectory in which the merge occurs # now we must append the remainder of the next_lane_traj next_lane_traj_to_append = [] print "next_lane_final_merge_id = " + str(next_lane_final_merge_id) print "next_lane_start_merge_id = " + str(next_lane_start_merge_id) if next_lane_final_merge_id > next_lane_start_merge_id: x = [] print "len(x) = " + str(len(x)) x.extend(next_lane_traj[0][next_lane_final_merge_id:]) print "len(x) = " + str(len(x)) x.extend(next_lane_traj[0][:next_lane_start_merge_id]) print "len(x) = " + str(len(x)) y = [] y.extend(next_lane_traj[1][next_lane_final_merge_id:]) y.extend(next_lane_traj[1][:next_lane_start_merge_id]) theta = [] theta.extend(next_lane_traj[2][next_lane_final_merge_id:]) theta.extend(next_lane_traj[2][:next_lane_start_merge_id]) time = [] time.extend(next_lane_traj[3][next_lane_final_merge_id:]) time.extend(next_lane_traj[3][:next_lane_start_merge_id]) next_lane_traj_to_append = [x, y, theta, time] # next_lane_traj_to_append = [ [next_lane_traj[0][next_lane_final_merge_id:], next_lane_traj[0][:next_lane_start_merge_id] ], # [next_lane_traj[1][next_lane_final_merge_id:], next_lane_traj[1][:next_lane_start_merge_id] ], # [next_lane_traj[2][next_lane_final_merge_id:], next_lane_traj[2][:next_lane_start_merge_id] ], # [next_lane_traj[3][next_lane_final_merge_id:], next_lane_traj[3][:next_lane_start_merge_id] ] ] else: next_lane_traj_to_append = [ next_lane_traj[0][next_lane_final_merge_id:next_lane_start_merge_id], next_lane_traj[1][next_lane_final_merge_id:next_lane_start_merge_id], next_lane_traj[2][next_lane_final_merge_id:next_lane_start_merge_id], next_lane_traj[3][next_lane_final_merge_id:next_lane_start_merge_id] ] print "len( next_lane_traj_to_append ) = " + str(len( next_lane_traj_to_append )) print "len( next_lane_traj_to_append[0] ) = " + str(len( next_lane_traj_to_append[0] )) print "merging_traj = " + str(merging_traj) transposed_merging_traj = [] for i in len( merging_traj[0] ): temp_list = [] for j in len( merging_traj ): temp_list.extend( transposed_merging_traj[j][i] ) print "len(temp_list) = " + str(len(temp_list)) transposed_merging_traj.append(temp_list) new_traj = [merging_traj, next_lane_traj_to_append] print "len( merging_traj ) = " + str(len( merging_traj )) print "len( merging_traj[0] ) = " + str(len( merging_traj[0] )) print "len( next_lane_traj_to_append ) = " + str(len( next_lane_traj_to_append )) print "len( next_lane_traj_to_append[0] ) = " + str(len( next_lane_traj_to_append[0] )) self.traj = new_traj
bookdate = bookdate + " " + "and" + " " + str( todayday + datetime.timedelta(days=i)) dateflag = 1 forflag = 1 break if (dateflag == 1) and (forflag == 1): bookdate = bookdate.lower().strip().split(' ', 1)[1] time = ([i for i in time if i not in stop]) for x in range(len(time)): timestr = timestr + " " + ''.join(time[x]).lower().strip() for x in range(0, len(time)): pos = (dictword[''.join(time[x]).lower().strip()]) if (str1.split()[pos] == "pm"): time.pop(x) time.pop(x) time.extend( [str(text2num(str1.split()[pos - 1])) + str(str1.split()[pos])]) break for x in range(0, len(time)): if (("pm" in ''.join(time[x]).lower().strip()) or ("am" in ''.join(time[x]).lower().strip())): pos = ''.join(time[x]).lower().strip().find("am") pos1 = ''.join(time[x]).lower().strip().find("pm") if ((''.join(time[x]).lower().strip()[pos - 1]).isdigit()) or ( (''.join(time[x]).lower().strip()[pos1 - 1]).isdigit()): booktime = ''.join(time[x]).lower().strip() timeflag = 1 else: posi = (dictword[''.join(time[x]).lower().strip()]) if (str1.split()[posi - 1] != "at"): if (((''.join(time[x]).strip()).isdigit()) and (seatflag == 0) and (dateflag == 0)):
#Look to sync data with the following time: # print("Initial audio sample: ", start_time) # 07/12/2017 - 16:33:00 ################################################################### ## audio samples are not continuos, extract time stamps ## ################################################################### dates = [] for t in tdms_file_list: # transform name into date: splitted = str.split(t[23:-5]) # ignore date and seconds time = splitted[1:-1] # add fixed date time.extend([ '00', '07', '12', '2018' ]) #07/12/2017 ------Error in time stamp from source, it is actually 2018 date_obj = datetime.datetime.strptime(' '.join(time), '%H %M %S %d %m %Y') date_string = date_obj.strftime('%Y-%m-%d %H:%M:%S') dates.append(date_string) # remove duplicates dates = list(set(dates)) # print("Date: ", dates) print("Unique sampled dates: ", len(dates)) ################################################################### ## Process audio_csv to create features ## ################################################################### # Create two different training sets, each features = pd.DataFrame()