def evaluation_measures(self): df_path = [] df_match = [] for i, j in tenumerate(self.select_rows()): a = converting_path_to_xy(j[1]) b = converting_path_to_xy(j[0]) df_path.append(a) df_match.append(b) dist_frech_cut = [] dist_frech_full = [] arc_length_diff_cut = [] arc_length_diff_full = [] tracked_vehicle = [] mode = [] for i, j in tenumerate(df_path): tracked_vehicle.append(j['Tracked Vehicle'].values[0]) mode.append(j['Type'].values[0]) p = j.loc[:, ['x', 'y']] q = df_match[i].loc[:, ['x', 'y']] if len(j) < 3: dist_frech_cut.append(0) arc_length_diff_cut.append(0) d2 = similaritymeasures.frechet_dist(p.values, q.values) dist_frech_full.append(d2) l_p_f = similaritymeasures.get_arc_length(p.values) l_p2 = l_p_f[0] l_m_f = similaritymeasures.get_arc_length(q.values) l_m2 = l_m_f[0] arc_length_diff_full.append(round(abs(l_p2 - l_m2), 3)) continue d1 = similaritymeasures.frechet_dist(p.values[1:-1], q.values[1:-1]) d2 = similaritymeasures.frechet_dist(p.values, q.values) l_p = similaritymeasures.get_arc_length(p.values[1:-1]) l_p1 = l_p[0] l_m = similaritymeasures.get_arc_length(q.values[1:-1]) l_m1 = l_m[0] l_p_f = similaritymeasures.get_arc_length(p.values) l_p2 = l_p_f[0] l_m_f = similaritymeasures.get_arc_length(q.values) l_m2 = l_m_f[0] dist_frech_full.append(d2) dist_frech_cut.append(d1) arc_length_diff_cut.append(round(abs(l_p1 - l_m1), 3)) arc_length_diff_full.append(round(abs(l_p2 - l_m2), 3)) evaluation = { 'ID': tracked_vehicle, 'Type': mode, 'Frechet_distance': dist_frech_full, 'Frechet_distance_cut': dist_frech_cut, 'Length_difference': arc_length_diff_full, 'Length_difference_cut': arc_length_diff_cut } evaluation = pd.DataFrame(evaluation) return evaluation
def calc_frechet_distance(ast_trial): """ Calculate the frechet distance between self.poses and a target path Uses frechet distance calculation from asterisk_calculations object """ o_x, o_y, o_path_ang = ast_trial.get_poses(use_filtered=False) o_path_t = np.column_stack((o_x, o_y)) t_fd = sm.frechet_dist(o_path_t, ast_trial.target_line) r_fd = sm.frechet_dist(o_path_ang, ast_trial.target_rotation) # just max error right now return t_fd, r_fd
def prediction(A, B, reference): """ The prediction algorithm: 1. Compute the log transformed curves for the gates. 2. Compute An and Bn, the normalised hill functions for A and B. 3. Compute the frechet distance between An and Bn. 4. Obtain the upper and lower bound curves for the log transformed normalised reference curve. 5. Scale the bounds back to linear unnormalised space by renormalising to a scaled ymin and ymax interval. """ An = A.log().normal(lower=(1.0, 1.0), upper=(2.0, 2.0)) Bn = B.log().normal(lower=(1.0, 1.0), upper=(2.0, 2.0)) refn = reference.log().normal(lower=(1.0, 1.0), upper=(2.0, 2.0)) leash = frechet_dist(An.points, Bn.points) scaled_ymin = ln(reference.ymin * B.ymin / A.ymin) scaled_ymax = ln(reference.ymax * B.ymax / A.ymax) upper_curve, lower_curve = boundary_curves(refn, leash) upper_curve = array([ upper_curve[:, 0], normalise(upper_curve[:, 1], scaled_ymin, scaled_ymax) ]).T lower_curve = array([ lower_curve[:, 0], normalise(lower_curve[:, 1], scaled_ymin, scaled_ymax) ]).T upper_curve = array([upper_curve[:, 0], exp(upper_curve[:, 1])]).T lower_curve = array([lower_curve[:, 0], exp(lower_curve[:, 1])]).T return upper_curve, lower_curve
def similarity(sketch, other_sketch): """ sketch = sketchpad json output other_sketch = saved json output from Django """ result = {} if isinstance(sketch, dict) and isinstance(other_sketch, dict): x, y = create_xy_coords('search', sketch) P = np.array([x, y]).T x1, y1 = create_xy_coords('other', other_sketch) Q = np.array([x1, y1]).T dh, ind1, ind2 = directed_hausdorff(P, Q) df = similaritymeasures.frechet_dist(P, Q) dtw, d = similaritymeasures.dtw(P, Q) pcm = similaritymeasures.pcm(P, Q) area = similaritymeasures.area_between_two_curves(P, Q) cl = similaritymeasures.curve_length_measure(P, Q) result = { "dh": dh, "df": df, "dtw": dtw, "pcw": pcm, "cl": cl, "area": area } return result else: return {"dh": 0}
def xySwitch(self): with open('csv/teststk_pick.pkl', 'rb') as file: test_stroke = pickle.load(file) query_stroke = np.zeros((len(test_stroke), 3)) query_stroke[:, 0] = test_stroke[:, 0] query_stroke[:, 2] = test_stroke[:, 1] for i in range(len(self.pkl_stroke)): df = similaritymeasures.frechet_dist(test_stroke, self.pkl_stroke[i]) print(df) # def keyPressEvent(self, event:QKeyEvent): # if event.key() == Qt.Key_Escape: # self.parent().quit() # elif event.key() == Qt.Key_S: # self.isPlaying = not self.isPlaying # elif event.key() == Qt.Key_F: # self.fastRatio *= 2.0 # elif event.key() == Qt.Key_D: # self.fastRatio /= 2.0 # elif event.key() == Qt.Key_Right: # self.frameCount += 1 # elif event.key() == Qt.Key_Left: # self.frameCount -= 1 # else: # None # self.update()
def _filter_spikes(self, waveforms, labels): unit_IDs = np.zeros_like(labels) check = np.unique(labels) for label in check: # --- compute the features ---------------- index = [i for i, x in enumerate(labels==label) if x] x = np.arange(1,49) y = waveforms[labels == label].mean(axis=0) which, ccorr = 0,0 for it, reference in enumerate(self.references): corr, _ = spearmanr(reference, y) if ccorr < corr: ccorr = corr which = it is_noise = False for it, antireference in enumerate(self.antireferences): corr, _ = spearmanr(antireference, y) if ccorr < corr: is_noise = True reference = self.references[which] z = np.polyfit(x, y, 1) p = np.poly1d(z) df = similaritymeasures.frechet_dist(np.vstack((zscore(x),zscore(reference))), np.vstack((zscore(x),zscore(y)))) yerr_zsc = StandardScaler().fit_transform(waveforms[labels == label].T).T.std(axis=0) std = np.sum(yerr_zsc) / 48 # -- plot the figures ------------------------ if label != -1 and not is_noise and ccorr > .7 and p[1]*100 > -100 and p[1]*100 < 150 and df < 5 and std < .6: unit_IDs[index] = 1 return unit_IDs
def mean_frechet_error(ref_trj_l, pred_tr_l): n_trj = len(pred_tr_l) error = 0 for pred_tr, ref_trj in zip(pred_tr_l, ref_trj_l): frecht_d = similaritymeasures.frechet_dist(ref_trj,pred_tr) error += frecht_d error_n = error/n_trj return error_n
def pathDistance(path1, path2): edges1 = [] for i in range(len(path1) - 1): edges1.append((path1[i], path1[i + 1])) edges2 = [] for i in range(len(path2) - 1): edges2.append((path2[i], path2[i + 1])) #return 1 - ( len(set(edges1).intersection(edges2)) / len(set(edges1).union(edges2)) ) return similaritymeasures.frechet_dist(np.array(path1), np.array(path2))
def _detect_similarUnits(self, units, spikes): means = [] for label in np.unique(units): positions = [ idx for idx, unit in enumerate(units) if unit == label ] means.append(spikes[positions].mean(axis=0)) mylist = [] my_index = list(range(len(means))) aux_means = means[1:] aux_myindex = my_index[1:] index = 0 for _ in range(len(means)): main_mean = means[index] equal, distinct = [], [] for aux, idx in zip(aux_means, aux_myindex): print('distances ', sm.frechet_dist(main_mean, aux)) if sm.frechet_dist(main_mean, aux) < 10: equal.append(idx) else: distinct.append(idx) if equal: equal.append(index) mylist.append(equal) elif not equal and len(distinct) >= 1: mylist.append([my_index[index]]) if not distinct: break elif len(distinct) == 1: mylist.append(distinct) break else: aux_means = [means[val] for val in distinct[1:]] aux_myindex = [my_index[val] for val in distinct[1:]] index = distinct[0] return mylist
def find_fdist(paths, save=False, show=False, player_list=None ): #route sample contains all the sumo routes the vehicle pass #print(paths) N = len(paths) print(f"Generating fdist path for {N} routes....") fd = np.zeros((N, N)) for i in range(N - 1): for j in range(i + 1, N): fd[i, j] = sm.frechet_dist(paths[i], paths[j]) #print("fdmatrix is ",fd) #compute diversities diversities = np.zeros(N) #alpha = 0.5 alpha = 5 / np.max(fd[:]) print("alpha value is ", alpha) for i in range(N): #print(f'shape {i} is {paths[i].shape}') diversity = 0 for j in range(N): if j >= i: diversity = diversity + np.exp(-alpha * fd[i, j]) else: diversity = diversity + np.exp(-alpha * fd[j, i]) diversities[i] = 1 / diversity print("Diversity Array is ", diversities) #diversity = 1/singma #for i in range(len(paths)): # plt.plot(paths[i][:,0], paths[i][:,1],label=str(i)) #ax = sns.heatmap(df) plt.legend() if show: plt.show() if save: plt.savefig(os.path.join("./", f'{"heatmap"}.eps'), dpi=300) plt.savefig(os.path.join("./", f'{"heatmap"}.png')) allrouteobj = [] for i, path in enumerate(paths): #allrouteobj.append(RouteObj(i, path, diversities[i], route_sample[i], node_hit[i])) if player_list: allrouteobj.append( RouteObj(i, path, diversities[i], player_list[i].start, player_list[i].destination)) else: allrouteobj.append( RouteObj(i, path, diversities[i], path[0], path[-1])) return allrouteobj
def ds_frechet(point1, point2): vectore1[:, 0] = point1[:128] vectore1[:, 1] = point1[128:] vectore2[:, 0] = point2[:128] vectore2[:, 1] = point2[128:] distance = sm.frechet_dist(vectore1, vectore2) return distance
def zySwitch(self): with open('csv/teststk_pick.pkl', 'rb') as file: test_stroke = pickle.load(file) query_stroke = np.zeros((len(test_stroke), 3)) query_stroke[:, 1] = test_stroke[:, 0] query_stroke[:, 2] = test_stroke[:, 1] for i in range(len(self.pkl_stroke)): df = similaritymeasures.frechet_dist(test_stroke, self.pkl_stroke[i]) print(df)
def calc_frechet_distance_all(ast_trial): """ TODO: NOT TESTED YET Calculate the frechet distance between self.poses and a target path, combining both translation and rotation Uses frechet distance calculation from asterisk_calculations object """ o_x, o_y, o_path_ang = ast_trial.get_poses(use_filtered=False) o_path = np.column_stack((o_x, o_y, o_path_ang)) t_rots = [ast_trial.target_rotation * len(ast_trial.target_line)] combined_target = np.column_stack((ast_trial.target_line, t_rots)) fd = sm.frechet_dist(o_path, combined_target) return fd
def shape_similarity_measures_all_to_all_unoptimized(data_dict: dict): """ Calculates different predefined shape similarity measures from https://pypi.org/project/similaritymeasures/ Requires the roads to be saved as polylines :param data_dict: Dict containing all the test data :return: None """ import similaritymeasures import time start_time = time.time() current_ops = 0 total_ops = len(data_dict) print("In total", total_ops * total_ops, "comparison passes and", total_ops, "loop iterations will have to be completed for shape based input.") for name1 in data_dict: road1 = data_dict.get(name1) road1_coords = road1.get(RoadDicConst.COORD_TUPLE_REP_ALLIGNED.value, None) if road1_coords is None: add_coord_tuple_representation(data_dict=data_dict) road1_coords = road1.get( RoadDicConst.COORD_TUPLE_REP_ALLIGNED.value, None) # TODO more dicc_dtw = {} dicc_dtw_opti = {} dicc_frechet = {} for name2 in data_dict: # TODO optimize road2 = data_dict.get(name2) road2_coords = road2.get( RoadDicConst.COORD_TUPLE_REP_ALLIGNED.value, None) d_dtw, _ = similaritymeasures.dtw(road1_coords, road2_coords) dicc_dtw[name2] = d_dtw d_frechet = similaritymeasures.frechet_dist( road1_coords, road2_coords) dicc_frechet[name2] = d_frechet road1[BehaviorDicConst.COORD_DTW_DIST.value] = dicc_dtw road1[BehaviorDicConst.COORD_FRECHET_DIST.value] = dicc_frechet current_ops += 1 print_remaining_time(start_time=start_time, completed_operations=current_ops, total_operations=total_ops)
def get_clustered_chains(new_chains): sim_array = np.zeros([len(new_chains), len(new_chains)]) dist_array = np.zeros([len(new_chains), len(new_chains)]) for i in range(len(new_chains)): for j in range(i + 1, len(new_chains)): v = np.array(new_chains[j][0]) - np.array(new_chains[i][0]) x_trans = np.array(new_chains[i]) + v sim_array[i][j] = frechet_dist(x_trans, new_chains[j]) # sim_array[i][j] = area_between_two_curves(new_chains[i], new_chains[j]) # sim_array[i][j] = curve_length_measure(new_chains[i], new_chains[j]) dist_array[i][j] = lines_dist(new_chains[i], new_chains[j]) sim_array = sim_array + sim_array.T dist_array = dist_array + dist_array.T return sim_array, dist_array
def similarity_measures(LOOKBACK=MS_IN_A_DAY, **kwargs): """ Calculate all similarity measures between two trajectories """ log.info(f'Loading GPS data for 1st trajectory...') gps1 = gps(**kwargs) if gps1: arr1 = pd.DataFrame(gps1)[['latitude', 'longitude']].to_numpy() else: return {'timestamp':kwargs['start'], 'frechet_distance': None, 'area_between': None, 'partial_curve_mapping': None, 'curve_length_similarity': None, 'fastDTW_score': None} log.info(f'Loading GPS data for 2nd trajectory...') start2 = kwargs['start'] - LOOKBACK end2 = kwargs['end'] - LOOKBACK gps2 = gps(id = kwargs['id'], start = start2, end = end2) log.info(f'Calculating all similarity measures...') if gps2: arr2 = pd.DataFrame(gps2)[['latitude', 'longitude']].to_numpy() log.info(f'Calculating Frechet...') discrete_frechet = similaritymeasures.frechet_dist(arr1, arr2) log.info(f'Calculating Area between...') area_between = similaritymeasures.area_between_two_curves(arr1, arr2) log.info(f'Calculating PCM...') pcm = similaritymeasures.pcm(arr1, arr2) log.info(f'Calculating curve length...') curve_length = similaritymeasures.curve_length_measure(arr1, arr2) log.info(f'Calculating FastDTW...') fastDTW_score, _ = fastdtw(arr1, arr2, dist=euclidean) else: return {'timestamp':kwargs['start'], 'frechet_distance': None, 'area_between': None, 'partial_curve_mapping': None, 'curve_length_similarity': None, 'fastDTW_score': None} return {'timestamp':kwargs['start'], 'frechet_distance': discrete_frechet, 'area_between': area_between, 'partial_curve_mapping': pcm, 'curve_length_similarity': curve_length, 'fastDTW_score': fastDTW_score}
def similarity(gateA, gateB): a_range = log(gateA.params["ymax"]) - log(gateA.params["ymin"]) b_range = log(gateB.params["ymax"]) - log(gateB.params["ymin"]) print(a_range) print(b_range) a_log_normal_ys = map(lambda y: y / a_range, map(log, gateA.ys)) b_log_normal_ys = map(lambda y: y / b_range, map(log, gateB.ys)) a_curve = numpy.array([[x, y] for (x, y) in zip(gateA.xs, a_log_normal_ys)]) b_curve = numpy.array([[x, y] for (x, y) in zip(gateB.xs, b_log_normal_ys)]) print(a_curve) print(b_curve) return sm.frechet_dist(a_curve, b_curve)
def compute_distance_matrix(cls, trajectories, method="Frechet"): """ :param method: "Frechet" or "Area" """ n = len(trajectories) dist_m = np.zeros((n, n)) for i in range(n - 1): p = trajectories[i] for j in range(i + 1, n): q = trajectories[j] if method == "Frechet": dist_m[i, j] = similaritymeasures.frechet_dist(p, q) else: dist_m[i, j] = similaritymeasures.area_between_two_curves(p, q) dist_m[j, i] = dist_m[i, j] return dist_m
def distance(ovitrap_eggs_i,lwO): if(sys.argv[2]==RMSE): return utils.rmse(ovitrap_eggs_i[ovitrap_eggs_i!=[None]], lwO[ovitrap_eggs_i!=[None]]) elif(sys.argv[2]==D): return utils.D(ovitrap_eggs_i[ovitrap_eggs_i!=[None]], lwO[ovitrap_eggs_i!=[None]]) elif(sys.argv[2] in [FRECHET,DTW]): ovitrap_eggs_i=np.array(ovitrap_eggs_i,dtype=np.float)#this change None for np.nan valid_ovi_idx=~np.isnan(ovitrap_eggs_i) reversed_valid_ovi_idx=valid_ovi_idx[::-1] first,last=np.argmax(valid_ovi_idx), len(reversed_valid_ovi_idx)-np.argmax(reversed_valid_ovi_idx)-1 x=np.array([[time_range[idx],lwO[idx]] for idx in range(first,last)]) y=np.array([ [time_range[idx],ovitrap_eggs_i[idx] ] for idx,isValid in enumerate(valid_ovi_idx) if isValid]) if(sys.argv[2]==FRECHET): return sm.frechet_dist(x,y) if(sys.argv[2]==DTW): return sm.dtw(x,y)[0] else: print('Metric %s not found'%sys.argv[2]) quit()
def compute_dfd(self, x1, y1, x2, y2): """Computes the discrete Frechet distance between two curves Parameters ---------- x2 : np.ndarray An array containing x coordinates of the other curve y2 : np.ndarray An array containing y coordinates of the other curve fit_or_outerfit : str If equals to "fit", will use fit data for DFD. Otherwise, will use outerfit data Returns ------- float The Frechet distance between the two curves """ return similaritymeasures.frechet_dist((x1, y1), (x2, y2))
def mouseReleaseEvent(self, event): self.flag = False # 06.16 self.test_stroke = -np.zeros((len(self.queryx), 3)) self.test_stroke[:, 0] = self.queryx self.test_stroke[:, 1] = self.queryy # print(self.test_stroke) pickfile = open('csv/teststk_pick.pkl', 'wb') pickle.dump(self.test_stroke, pickfile) pickfile.close() with open('csv/test_pick.pkl', 'rb') as file: pkl_stroke = pickle.load(file) for i in range(len(pkl_stroke)): df = similaritymeasures.frechet_dist(self.test_stroke, pkl_stroke[i]) print(df) ''' for row in self.reader: if self.reader.line_num == 1: continue for col in row: if col: self.csv_stroke.append(col) self.dictio[self.reader.line_num - 1] = self.csv_stroke self.csv_stroke = [] ''' # print(type(self.dictio[32]) # for row in self.df: # # for i in range(len(row)): # # self.csv_stroke.append(row[i]) # print(row) # for i in range(len(self.dictio)): # self.dictio[i+1].remove(str(i)) # similarity_res = self.frechet_distance(self.query_stroke, self.dictio[i+1]) # print(similarity_res) res_ttl = [] res_rank = [] num_rank = [] '''
def frechet(LOOKBACK=MS_IN_A_DAY, **kwargs): """ Calculate Frechet Distance between two trajectories """ log.info(f'Loading GPS data for 1st trajectory...') gps1 = gps(**kwargs)['data'] if gps1: arr1 = pd.DataFrame(gps1)[['latitude', 'longitude']].to_numpy() else: return None log.info(f'Loading GPS data for 2nd trajectory...') start2 = kwargs['start'] - LOOKBACK end2 = kwargs['end'] - LOOKBACK gps2 = gps(id=kwargs['id'], start=start2, end=end2) log.info(f'Calculating Frechet Distance...') if gps2: arr2 = pd.DataFrame(gps2)[['latitude', 'longitude']].to_numpy() discrete_frechet = similaritymeasures.frechet_dist(arr1, arr2) else: arr2 = None #testing return {'timestamp': kwargs['start'], 'frechet_distance': discrete_frechet}
# Generate random numerical data num_data = np.zeros((20, 2)) num_data[:, 0] = X_2 num_data[:, 1] = S exp_data = np.array([range(3600), np.random.random(3600)]).T num_data = np.array([range(300), np.random.random(300)]).T # quantify the difference between the two curves using PCM # pcm = similaritymeasures.pcm(exp_data, num_data) # quantify the difference between the two curves using # Discrete Frechet distance df = similaritymeasures.frechet_dist(exp_data, num_data) # quantify the difference between the two curves using # area between two curves # area = similaritymeasures.area_between_two_curves(exp_data, num_data) # quantify the difference between the two curves using # Curve Length based similarity measure # cl = similaritymeasures.curve_length_measure(exp_data, num_data) # quantify the difference between the two curves using # Dynamic Time Warping distance # dtw, d = similaritymeasures.dtw(exp_data, num_data) # print the results # print(pcm, df, area, cl, dtw)
def stats_between_series( xaxis_1: pandas.Series, values_1: pandas.Series, xaxis_2: pandas.Series, values_2: pandas.Series, print_: bool = False, ) -> dict: """Dynamic time warping and discret frechet distance for measuring similarity between two temporal sequences Args: xaxis_1 (pandas.Series): index axis of the dataframe 1 values_1 (pandas.Series): value axis of the dataframe 1 xaxis_2 (pandas.Series): index axis of the dataframe 2 values_2 (pandas.Series): value axis of the dataframe 2 Returns: dict: `{"dtw": float, "frechet_dist": float}` """ dataframe_1 = pandas.merge(xaxis_1, values_1, right_index=True, left_index=True) dataframe_2 = pandas.merge(xaxis_2, values_2, right_index=True, left_index=True) dataframe_1.rename(columns={ xaxis_1.name: "id", values_1.name: "values_1" }, inplace=True) dataframe_2.rename(columns={ xaxis_2.name: "id", values_2.name: "values_2" }, inplace=True) dataframe_1.set_index("id", inplace=True) dataframe_2.set_index("id", inplace=True) unified = pandas.concat([dataframe_1, dataframe_2], axis=1) unified["values_1"] = (pandas.to_numeric( unified["values_1"], errors="coerce", downcast="float").interpolate().fillna(method="bfill").fillna( method="ffill")) unified["values_2"] = (pandas.to_numeric( unified["values_2"], errors="coerce", downcast="float").interpolate().fillna(method="bfill").fillna( method="ffill")) xaxis_arranged = numpy.arange(len(unified)) dataframe_values_2 = numpy.array( [xaxis_arranged, unified["values_2"].values]) dataframe_values_1 = numpy.array( [xaxis_arranged, unified["values_1"].values]) dtw, d = similaritymeasures.dtw(dataframe_values_1, dataframe_values_2) frechet_dist = similaritymeasures.frechet_dist(dataframe_values_1, dataframe_values_2) pcm = similaritymeasures.pcm(dataframe_values_1, dataframe_values_2) area = similaritymeasures.area_between_two_curves(dataframe_values_1, dataframe_values_2) std = numpy.abs( numpy.nanstd(dataframe_values_2[1]) - numpy.nanstd(dataframe_values_1[1])) if print_: print( { "dtw": dtw, "frechet_dist": frechet_dist, "pcm": pcm, "area": area, "std": std, }, dataframe_values_2, ) return { "dtw": dtw, "frechet_dist": frechet_dist, "pcm": pcm, "area": area, "std": std, }
list_of_vectors[k, :, :] = vectore1 list_of_vectors[k + 1, :, :] = vectore2 my_data[i, :-1] = np.concatenate((x1, y1, x2, y2)) #dataset = pd.DataFrame(data=my_data) dataset = my_data X = dataset[:, :-1] # ------------------------------------------------------------------------- tic1 = time.time() for i in range(0, 2000000, 2): df = sm.frechet_dist(list_of_vectors[i], list_of_vectors[i+1]) frechet_vector.append(df) toc1 = time.time() print("time of frechet dist with loop : ", str((toc1 - tic1)*1000), " ms") # -------------------------------------------------------------------------- tic2 = time.time() pred_test = regressor.predict(X) toc2 = time.time() print("time of regressor prediction : ", str((toc2 - tic2)*1000), " ms") # ---------------------------------------------------------------------------- frechet_vector = np.array(frechet_vector) print("frechet_vector shape ", frechet_vector.shape) print("regressor dataset ", pred_test.shape)
def gsm(x_data, y_data, name='', is_dmp_on=False, grid_size=5, grid_x_dist=-1.0, grid_y_dist=-1.0): plt_fpath = '../pictures/lte_writing/' + name + '/' + str( grid_size) + '_grid/' try: os.makedirs(plt_fpath) except OSError: print("Creation of the directory %s failed" % plt_fpath) else: print("Successfully created the directory %s" % plt_fpath) ## Optimize JA for trajectory ## print('Optimizing JA') lambda_x = optimize_ja.opt_lambda_traj_1d(x_data) lambda_y = optimize_ja.opt_lambda_traj_1d(y_data) ## Get deform grid ## print('Getting Deform Grid') #Constants--can be changed se_dist = get_start_end_dist(x_data, y_data) ttl_dist = get_total_dist(x_data, y_data) middle = (((se_dist + ttl_dist) / 2)**0.5) if (grid_x_dist < 0.0): grid_x_dist = middle if (grid_y_dist < 0.0): grid_y_dist = middle #Center should always be start of traj center = deform_hello_grid.point(x_data[0], y_data[0]) grid = deform_hello_grid.create_grid(grid_size, grid_x_dist, grid_y_dist, center) ## deform for each point on grid ## print('Deforming Trajectory') grid_deforms_x = [[ deform_hello_grid.deformation(x_data, grid[i][j].x, given_final=[], given_lambda=lambda_x, dmp_on=is_dmp_on) for i in range(grid_size) ] for j in range(grid_size)] grid_deforms_y = [[ deform_hello_grid.deformation(y_data, grid[i][j].y, given_final=[], given_lambda=lambda_y, dmp_on=is_dmp_on) for i in range(grid_size) ] for j in range(grid_size)] ## get hd/fd for each deformation ## print('Getting hd/fd') #set up arrays starts_x = np.zeros((grid_size, grid_size)) starts_y = np.zeros((grid_size, grid_size)) fd_lte = np.zeros((grid_size, grid_size)) hd_lte = np.zeros((grid_size, grid_size)) fd_ja = np.zeros((grid_size, grid_size)) hd_ja = np.zeros((grid_size, grid_size)) if is_dmp_on: fd_dmp = np.zeros((grid_size, grid_size)) hd_dmp = np.zeros((grid_size, grid_size)) org_traj = np.zeros((len(x_data), 2)) comp_traj = np.zeros((np.shape(org_traj))) org_traj[:, 0] = np.transpose(x_data) org_traj[:, 1] = np.transpose(y_data) for i in range(grid_size): for j in range(grid_size): print('Starting') starts_x[i][j] = grid_deforms_x[i][j].lte[0] starts_y[i][j] = grid_deforms_y[i][j].lte[0] #lte hd/fd comp_traj[:, 0] = np.transpose(grid_deforms_x[i][j].lte) comp_traj[:, 1] = np.transpose(grid_deforms_y[i][j].lte) fd_lte[i][j] = similaritymeasures.frechet_dist(org_traj, comp_traj) hd_lte[i][j] = max( directed_hausdorff(org_traj, comp_traj)[0], directed_hausdorff(comp_traj, org_traj)[0]) #ja hd/fd comp_traj[:, 0] = np.transpose(grid_deforms_x[i][j].ja) comp_traj[:, 1] = np.transpose(grid_deforms_y[i][j].ja) fd_ja[i][j] = similaritymeasures.frechet_dist(org_traj, comp_traj) hd_ja[i][j] = max( directed_hausdorff(org_traj, comp_traj)[0], directed_hausdorff(comp_traj, org_traj)[0]) #dmp hd/fd if is_dmp_on: comp_traj[:, 0] = np.transpose(grid_deforms_x[i][j].dmp) comp_traj[:, 1] = np.transpose(grid_deforms_y[i][j].dmp) fd_dmp[i][j] = similaritymeasures.frechet_dist( org_traj, comp_traj) hd_dmp[i][j] = max( directed_hausdorff(org_traj, comp_traj)[0], directed_hausdorff(comp_traj, org_traj)[0]) print(starts_x) print(starts_y) print(fd_lte) print(hd_lte) print(fd_ja) print(hd_ja) if is_dmp_on: print(fd_dmp) print(hd_dmp) ## normalize hd/fd ## print('Normalizing hd/fd') if is_dmp_on: #get maxes max_fd = max(np.amax(fd_dmp), np.amax(fd_dmp), np.amax(fd_dmp)) max_hd = max(np.amax(hd_dmp), np.amax(hd_dmp), np.amax(hd_dmp)) fd_dmp = np.ones((np.shape(fd_dmp))) - (fd_dmp / max_fd) hd_dmp = np.ones((np.shape(hd_dmp))) - (hd_dmp / max_hd) else: max_fd = max(np.amax(fd_lte), np.amax(fd_ja)) max_hd = max(np.amax(hd_lte), np.amax(hd_ja)) fd_lte = np.ones((np.shape(fd_lte))) - (fd_lte / max_fd) hd_lte = np.ones((np.shape(hd_lte))) - (hd_lte / max_hd) fd_ja = np.ones((np.shape(fd_ja))) - (fd_ja / max_fd) hd_ja = np.ones((np.shape(hd_ja))) - (hd_ja / max_hd) ## plot results ## print('Plotting Results') #plot deformations & store in h5 print(name + '_grid' + str(grid_size) + '.h5') fp = h5py.File(name + '_grid' + str(grid_size) + '.h5', 'w') dset_name = name for i in range(grid_size): for j in range(grid_size): ax = plt.subplot2grid((grid_size, grid_size), (i, j)) ax.plot(grid_deforms_x[i][j].traj, grid_deforms_y[i][j].traj, 'b') ax.plot(grid_deforms_x[i][j].lte, grid_deforms_y[i][j].lte, 'g') ax.plot(grid_deforms_x[i][j].ja, grid_deforms_y[i][j].ja, 'r') if is_dmp_on: ax.plot(grid_deforms_x[i][j].dmp, grid_deforms_y[i][j].dmp, 'm') fp.create_dataset(dset_name + '/original/(' + str(i) + ', ' + str(j) + ')/x', data=grid_deforms_x[i][j].traj) fp.create_dataset(dset_name + '/original/(' + str(i) + ', ' + str(j) + ')/y', data=grid_deforms_y[i][j].traj) fp.create_dataset(dset_name + '/lte/(' + str(i) + ', ' + str(j) + ')/x', data=grid_deforms_x[i][j].lte) fp.create_dataset(dset_name + '/lte/(' + str(i) + ', ' + str(j) + ')/y', data=grid_deforms_y[i][j].lte) fp.create_dataset(dset_name + '/ja/(' + str(i) + ', ' + str(j) + ')/x', data=grid_deforms_x[i][j].ja) fp.create_dataset(dset_name + '/ja/(' + str(i) + ', ' + str(j) + ')/y', data=grid_deforms_y[i][j].ja) if is_dmp_on: fp.create_dataset(dset_name + '/dmp/(' + str(i) + ', ' + str(j) + ')/x', data=grid_deforms_x[i][j].dmp) fp.create_dataset(dset_name + '/dmp/(' + str(i) + ', ' + str(j) + ')/y', data=grid_deforms_y[i][j].dmp) plt.xticks([]) plt.yticks([]) plt.savefig(plt_fpath + 'deforms.png') #store hd/fd data in h5 fp.create_dataset(dset_name + '/lte/fd', data=fd_lte) fp.create_dataset(dset_name + '/lte/hd', data=hd_lte) fp.create_dataset(dset_name + '/ja/fd', data=fd_ja) fp.create_dataset(dset_name + '/ja/hd', data=hd_ja) if is_dmp_on: fp.create_dataset(dset_name + '/dmp/fd', data=fd_dmp) fp.create_dataset(dset_name + '/dmp/hd', data=hd_dmp) #gradient maps gradient_plotting.gradient_map(fd_lte, name + ' LTE Frechet Distance Gradient', fpath=plt_fpath) gradient_plotting.gradient_map(hd_lte, name + ' LTE Haussdorf Distance Gradient', fpath=plt_fpath) gradient_plotting.gradient_map(fd_ja, name + ' JA Frechet Distance Gradient', fpath=plt_fpath) gradient_plotting.gradient_map(hd_ja, name + ' JA Haussdorf Distance Gradient', fpath=plt_fpath) if is_dmp_on: gradient_plotting.gradient_map(fd_dmp, name + ' DMP Frechet Distance Gradient', fpath=plt_fpath) gradient_plotting.gradient_map(hd_dmp, name + ' DMP Haussdorf Distance Gradient', fpath=plt_fpath) gradient_plotting.rgb_gradient( fd_ja, fd_lte, fd_dmp, name=(name + ' Frechet Distance Compared Reproductions'), fpath=plt_fpath) gradient_plotting.rgb_gradient( hd_ja, hd_lte, hd_dmp, name=(name + ' Haussdorf Distance Compared Reproductions'), fpath=plt_fpath) gradient_plotting.strongest_gradient( fd_ja, fd_lte, fd_dmp, name=(name + ' Frechet Distance Best Reproductions'), fpath=plt_fpath) gradient_plotting.strongest_gradient( hd_ja, hd_lte, hd_dmp, name=(name + ' Haussdorf Distance Best Reproductions'), fpath=plt_fpath) else: gradient_plotting.rgb_gradient( fd_ja, fd_lte, np.zeros((np.shape(fd_lte))), name=(name + ' Frechet Distance Compared Reproductions'), fpath=plt_fpath) gradient_plotting.rgb_gradient( hd_ja, hd_lte, np.zeros((np.shape(fd_lte))), name=(name + ' Haussdorf Distance Compared Reproductions'), fpath=plt_fpath) gradient_plotting.strongest_gradient( fd_ja, fd_lte, np.zeros((np.shape(fd_lte))), name=(name + ' Frechet Distance Best Reproductions'), fpath=plt_fpath) gradient_plotting.strongest_gradient( hd_ja, hd_lte, np.zeros((np.shape(fd_lte))), name=(name + ' Haussdorf Distance Best Reproductions'), fpath=plt_fpath) #set up grid for 3d surfaces x_vals = starts_x[0, :] y_vals = starts_y[:, 0] xnew = np.linspace(x_vals[0], x_vals[grid_size - 1], 1000) ynew = np.linspace(y_vals[0], y_vals[grid_size - 1], 1000) X, Y = np.meshgrid(xnew, ynew) #interpolate functions & plot interpolations fd_lte_func = interp2d(x_vals, y_vals, fd_lte) fd_lte_plot = fd_lte_func(xnew, ynew) hd_lte_func = interp2d(x_vals, y_vals, hd_lte) hd_lte_plot = fd_lte_func(xnew, ynew) fd_ja_func = interp2d(x_vals, y_vals, fd_ja) fd_ja_plot = fd_ja_func(xnew, ynew) hd_ja_func = interp2d(x_vals, y_vals, hd_ja) hd_ja_plot = hd_ja_func(xnew, ynew) if is_dmp_on: fd_dmp_func = interp2d(x_vals, y_vals, fd_dmp) fd_dmp_plot = fd_dmp_func(xnew, ynew) hd_dmp_func = interp2d(x_vals, y_vals, hd_dmp) hd_dmp_plot = hd_dmp_func(xnew, ynew) #plot all three on a single plot? #have all seperate plots? #how to show which sirface is better at a single point? #different color maps #how do I show hd vs. fd? f_size = 32 fig = plt.figure() ax = plt.axes(projection='3d') ax.plot_surface(X, Y, fd_lte_plot, cmap='viridis', edgecolor='none') ax.set_title(name + ' LTE Frechet Distance', fontsize=f_size) #plt.show() plt.savefig(plt_fpath + name + ' LTE Frechet Distance Surface.png') fig = plt.figure() ax = plt.axes(projection='3d') ax.plot_surface(X, Y, hd_lte_plot, cmap='viridis', edgecolor='none') ax.set_title(name + ' LTE Haussdorf Distance', fontsize=f_size) #plt.show() plt.savefig(plt_fpath + name + ' LTE Haussdorf Distance Surface.png') fig = plt.figure() ax = plt.axes(projection='3d') ax.plot_surface(X, Y, fd_ja_plot, cmap='viridis', edgecolor='none') ax.set_title(name + ' JA Frechet Distance', fontsize=f_size) #plt.show() plt.savefig(plt_fpath + name + ' JA Frechet Distance Surface.png') fig = plt.figure() ax = plt.axes(projection='3d') ax.plot_surface(X, Y, hd_ja_plot, cmap='viridis', edgecolor='none') ax.set_title(name + ' JA Haussdorf Distance', fontsize=f_size) #plt.show() plt.savefig(plt_fpath + name + ' JA Haussdorf Distance Surface.png') if is_dmp_on: fig = plt.figure() ax = plt.axes(projection='3d') ax.plot_surface(X, Y, fd_dmp_plot, cmap='viridis', edgecolor='none') ax.set_title(name + ' DMP Frechet Distance', fontsize=f_size) #plt.show() plt.savefig(plt_fpath + name + ' DMP Frechet Distance Surface.png') fig = plt.figure() ax = plt.axes(projection='3d') ax.plot_surface(X, Y, hd_dmp_plot, cmap='viridis', edgecolor='none') ax.set_title(name + ' DMP Haussdorf Distance', fontsize=f_size) #plt.show() plt.savefig(plt_fpath + name + ' DMP Haussdorf Distance Surface.png') fp.close() plt.close('all')
def test_c5_c6_df(self): df = similaritymeasures.frechet_dist(curve5, curve6) self.assertTrue(np.isclose(df, 90.0))
def test_c3_c4_df(self): df = similaritymeasures.frechet_dist(curve3, curve4) self.assertTrue(df, 1.0)
def test_c1_c2_df(self): df = similaritymeasures.frechet_dist(curve1, curve2) self.assertTrue(df, 1.0)
def test_random_fr(self): _ = similaritymeasures.frechet_dist(curve_a_rand, curve_b_rand) self.assertTrue(True)