def evaluate_shape_topo_mapping(u1, u2, u3): """ 1) horizontally, u1 and u2 have some common (or very similar)components such that an injective mapping exists by the correspondence of similar pairs. 2) vertically, u1 and u3 share the same topological structure by considering two relations, "inside" and "outside". :param u1: an binary image :param u2: a binary iamge :param u3: a binary image :return: MAT score """ u1_coms, _, _ = utils.decompose(u1, 8, trim=False) u2_coms, _, _ = utils.decompose(u2, 8, trim=False) u3_coms, _, _ = utils.decompose(u3, 8, trim=False) old_jcm_u1_com_ids, old_jcm_u2_com_ids, old_jcm_score = map.jaccard_map( u1_coms, u2_coms) jcm_u1_com_ids, jcm_u2_com_ids, jcm_score = map.soft_jaccard_map( u1_coms, u2_coms) tpm_u1_com_ids, tpm_u3_com_ids, tpm_score = map.topological_map( u1_coms, u3_coms) mat_score = (jcm_score + tpm_score) / 2 stub = utils.make_stub(u1_coms, u2_coms, u3_coms, jcm_u1_com_ids, jcm_u2_com_ids, tpm_u1_com_ids, tpm_u3_com_ids) return mat_score, stub
def evaluate_rearrange(u1, u2): u1_coms, u1_coms_x, u1_coms_y = utils.decompose(u1, 8) u2_coms, u2_coms_x, u2_coms_y = utils.decompose(u2, 8) if len(u1_coms) != len(u2_coms): return 0, [], [], [], [] max_scores = [] max_ids = [] chosen = [False] * len(u2_coms) for u1_com in u1_coms: max_score = -1 max_id = -1 for id, u2_com in enumerate(u2_coms): if not chosen[id]: score, _, _ = jaccard.jaccard_coef(u1_com, u2_com) if score > max_score: max_score = score max_id = id max_scores.append(max_score) max_ids.append(max_id) chosen[max_id] = True min_max_score = min(max_scores) if min_max_score < 0.6: return 0, [], [], [], [] else: u2_coms_x = [u2_coms_x[id] for id in max_ids] u2_coms_y = [u2_coms_y[id] for id in max_ids] return min_max_score, u1_coms_x, u1_coms_y, u2_coms_x, u2_coms_y
def evaluate_identity_shape_loc_isomorphism(u1, u2, u3): u1_coms, _, _ = utils.decompose(u1, 8, trim=False) u2_coms, _, _ = utils.decompose(u2, 8, trim=False) u3_coms, _, _ = utils.decompose(u3, 8, trim=False) if len(u1_coms) == len(u3_coms): sjm_u1_com_ids, sjm_u2_com_ids, sjm_score = map.soft_jaccard_map( u1_coms, u2_coms) lcm_u1_com_ids, lcm_u2_com_ids, lcm_score = map.location_map( u1_coms, u2_coms) if map.same_mappings(list(range(len(u1_coms))), list(range(len(u2_coms))), sjm_u1_com_ids, sjm_u2_com_ids, lcm_u1_com_ids, lcm_u2_com_ids): mat_score = (sjm_score + lcm_score) / 2 u1_com_ids = sjm_u1_com_ids u2_com_ids = sjm_u2_com_ids stub = utils.make_stub(u1_coms, u2_coms, u3_coms, u1_com_ids, u2_com_ids) return mat_score, stub mat_score = 0 u1_com_ids = [] u2_com_ids = [] stub = utils.make_stub(u1_coms, u2_coms, u3_coms, u1_com_ids, u2_com_ids) return mat_score, stub
def compute_ious(gt, predictions): gt_ = decompose(gt) predictions_ = decompose(predictions) gt_ = np.asarray([el.flatten() for el in gt_]) predictions_ = np.asarray([el.flatten() for el in predictions_]) ious = pairwise_distances(X=gt_, Y=predictions_, metric=iou) return ious
def compute_ious(gt, predictions): gt_ = decompose(gt) predictions_ = decompose(predictions) gt_ = np.asarray([el.flatten() for el in gt_]) predictions_ = np.asarray([el.flatten() for el in predictions_]) ious = calculate_iou_matrix(gt_, predictions_) return ious
def predict_topo_delta_shape_isomorphism(prob, anlg, tran, d): u1_coms = d.get("stub").get("u1_coms") u2_coms = d.get("stub").get("u2_coms") u3_coms = d.get("stub").get("u3_coms") tpm_u1_com_ids = d.get("stub").get("tpm_u1_com_ids") tpm_u2_com_ids = d.get("stub").get("tpm_u2_com_ids") pred_data = [] for ii, opt in enumerate(prob.options): print(prob.name, anlg.get("name"), tran.get("name"), ii) opt_coms, _, _ = utils.decompose(opt, 8, trim=False) tpm_u3_com_ids, tpm_opt_com_ids, tpm_score = map.topological_map( u3_coms, opt_coms) AC_A_com_ids, _, _, _, djcm_score = map.delta_shape_map( u1_coms, u2_coms, u3_coms, opt_coms, tpm_u1_com_ids, tpm_u2_com_ids, tpm_u3_com_ids, tpm_opt_com_ids) if 1 == len(tpm_u3_com_ids) or AC_A_com_ids is None: score = 0 else: score = (tpm_score + djcm_score) / 2 pred_data.append({ **d, "optn": ii + 1, "optn_score": score, "mato_score": (d.get("mat_score") + score) / 2, "pred": opt }) return pred_data
def predict_shape_topo_mapping(prob, anlg, tran, d): u1_coms = d.get("stub").get("u1_coms") u2_coms = d.get("stub").get("u2_coms") u3_coms = d.get("stub").get("u3_coms") jcm_u1_com_ids = d.get("stub").get("jcm_u1_com_ids") jcm_u2_com_ids = d.get("stub").get("jcm_u2_com_ids") tpm_u1_com_ids = d.get("stub").get("tpm_u1_com_ids") tpm_u3_com_ids = d.get("stub").get("tpm_u3_com_ids") pred_data = [] for ii, opt in enumerate(prob.options): print(prob.name, anlg.get("name"), tran.get("name"), ii) opt_coms, _, _ = utils.decompose(opt, 8, trim=False) # jcm_u3_com_ids, jcm_opt_com_ids, jcm_score = map.jaccard_map(u3_coms, opt_coms) jcm_u3_com_ids, jcm_opt_com_ids, jcm_score = map.soft_jaccard_map( u3_coms, opt_coms) tpm_u2_com_ids, tpm_opt_com_ids, tpm_score = map.topological_map( u2_coms, opt_coms) score = (jcm_score + tpm_score) / 2 if not map.are_consistent( list(range(len(u1_coms))), list(range(len(u2_coms))), list(range(len(u3_coms))), list(range( len(opt_coms))), jcm_u1_com_ids, jcm_u2_com_ids, jcm_u3_com_ids, jcm_opt_com_ids, tpm_u1_com_ids, tpm_u3_com_ids, tpm_u2_com_ids, tpm_opt_com_ids): score = 0 pred_data.append({ **d, "optn": ii + 1, "optn_score": score, "mato_score": (d.get("mat_score") + score) / 2, "pred": opt }) return pred_data
def rearrange(img, old_xs, old_ys, new_xs, new_ys): coms, coms_x, coms_y = utils.decompose(img, 8) if len(coms) != len(old_xs): return np.full_like(img, fill_value=False) new_coms = [] for com, com_x, com_y in zip(coms, coms_x, coms_y): closet_dist = np.inf closest_ii = -1 for ii, old_x, old_y in zip(range(len(old_xs)), old_xs, old_ys): dist = abs(com_x - old_x) + abs(com_y - old_y) if dist < closet_dist: closet_dist = dist closest_ii = ii x = new_xs[closest_ii] y = new_ys[closest_ii] bg = np.full((300, 300), fill_value=False) bg[y:y + com.shape[0], x:x + com. shape[1]] = com # A bug here, but I am too tired to fix it today. new_coms.append(bg) current = np.full((300, 300), fill_value=False) for new_com in new_coms: current = np.logical_or(current, new_com) return utils.trim_binary_image(current)
def predict_shape_delta_loc_isomorphism(prob, anlg, tran, d): u1_coms = d.get("stub").get("u1_coms") u2_coms = d.get("stub").get("u2_coms") u3_coms = d.get("stub").get("u3_coms") jcm_u1_com_ids = d.get("stub").get("jcm_u1_com_ids") jcm_u2_com_ids = d.get("stub").get("jcm_u2_com_ids") pred_data = [] for ii, opt in enumerate(prob.options): print(prob.name, anlg.get("name"), tran.get("name"), ii) opt_coms, _, _ = utils.decompose(opt, 8, trim=False) # old_jcm_u3_com_ids, old_jcm_opt_com_ids, old_jcm_score = map.jaccard_map(u3_coms, opt_coms) jcm_u3_com_ids, jcm_opt_com_ids, jcm_score = map.soft_jaccard_map( u3_coms, opt_coms) AC_A_com_ids, _, _, _, lcdm_score = map.delta_location_map( u1_coms, u2_coms, u3_coms, opt_coms, jcm_u1_com_ids, jcm_u2_com_ids, jcm_u3_com_ids, jcm_opt_com_ids) if 1 == len(jcm_u3_com_ids) or AC_A_com_ids is None: score = 0 else: score = (jcm_score + lcdm_score) / 2 pred_data.append({ **d, "optn": ii + 1, "optn_score": score, "mato_score": (d.get("mat_score") + score) / 2, "pred": opt }) return pred_data
def evaluate_topo_delta_shape_isomorphism(u1, u2, u3): u1_coms, _, _ = utils.decompose(u1, 8, trim=False) u2_coms, _, _ = utils.decompose(u2, 8, trim=False) u3_coms, _, _ = utils.decompose(u3, 8, trim=False) tpm_u1_com_ids, tpm_u2_com_ids, tpm_score = map.topological_map( u1_coms, u2_coms) if 1 == len(tpm_u1_com_ids): mat_score = 0 else: mat_score = tpm_score stub = utils.make_stub(u1_coms, u2_coms, u3_coms, tpm_u1_com_ids, tpm_u2_com_ids) return mat_score, stub
def evaluate_shape_delta_loc_isomorphism(u1, u2, u3): u1_coms, _, _ = utils.decompose(u1, 8, trim=False) u2_coms, _, _ = utils.decompose(u2, 8, trim=False) u3_coms, _, _ = utils.decompose(u3, 8, trim=False) # old_jcm_u1_com_ids, old_jcm_u2_com_ids, old_jcm_score = map.jaccard_map(u1_coms, u2_coms) jcm_u1_com_ids, jcm_u2_com_ids, jcm_score = map.soft_jaccard_map( u1_coms, u2_coms) if 1 == len(jcm_u1_com_ids): mat_score = 0 else: mat_score = jcm_score stub = utils.make_stub(u1_coms, u2_coms, u3_coms, jcm_u1_com_ids, jcm_u2_com_ids) return mat_score, stub
def evaluate_shape_loc_isomorphism(u1, u2, u3): """ 1) horizontally, u1 and u2 have some components that are located at the same (or close) positions such that an injective mapping can be formed by location correspondence. 2) horizontally, u1 and u2 have some components that have the same or similar shape such aht and injective mapping can be found by shape correspondence. 3) vertically, u1 and u3 forms a placeholder mapping that requires only that u1 and u3 have the same number of components. The placeholder mapping will be initiated by one or more mappings that express the isomorphism between (u1, u2) and (u3, opt), where (u1, u2) represents the mappings btw components in u1 and u2 and (u3, opt) represents the mappings btw components in u3 and opt. :param u1: an binary image :param u2: a binary iamge :param u3: a binary image :return: MAT score """ u1_coms, _, _ = utils.decompose(u1, 8, trim=False) u2_coms, _, _ = utils.decompose(u2, 8, trim=False) u3_coms, _, _ = utils.decompose(u3, 8, trim=False) lcm_u1_com_ids, lcm_u2_com_ids, lcm_score = map.location_map( u1_coms, u2_coms) # old_jcm_u1_com_ids, old_jcm_u2_com_ids, old_jcm_score = map.jaccard_map(u1_coms, u2_coms) jcm_u1_com_ids, jcm_u2_com_ids, jcm_score = map.soft_jaccard_map( u1_coms, u2_coms) if 1 != len(lcm_u1_com_ids) and \ len(lcm_u1_com_ids) == len(jcm_u1_com_ids) and \ len(lcm_u2_com_ids) == len(jcm_u2_com_ids) and \ (np.unique(lcm_u1_com_ids) == np.unique(jcm_u1_com_ids)).all() and \ (np.unique(lcm_u2_com_ids) == np.unique(jcm_u2_com_ids)).all(): phm_u1_com_ids, phm_u3_com_ids, phm_score = map.placeholder_map( u1_coms, u3_coms) else: phm_u1_com_ids, phm_u3_com_ids, phm_score = (None, None, 0) mat_score = min((lcm_score + jcm_score) / 2, phm_score) stub = utils.make_stub(u1_coms, u2_coms, u3_coms, lcm_u1_com_ids, lcm_u2_com_ids, jcm_u1_com_ids, jcm_u2_com_ids, phm_u1_com_ids, phm_u3_com_ids) return mat_score, stub
def predict_identity_shape_loc_isomorphism(prob, anlg, tran, d): u1_coms = d.get("stub").get("u1_coms") u2_coms = d.get("stub").get("u2_coms") u3_coms = d.get("stub").get("u3_coms") u12_u1_com_ids = d.get("stub").get("u1_com_ids") u12_u2_com_ids = d.get("stub").get("u2_com_ids") pred_data = [] for ii, opt in enumerate(prob.options): print(prob.name, anlg.get("name"), tran.get("name"), ii) opt_coms, _, _ = utils.decompose(opt, 8, trim=False) if len(u2_coms) == len(opt_coms): sjm_u3_com_ids, sjm_opt_com_ids, u3opt_sjm_score = map.soft_jaccard_map( u3_coms, opt_coms) lcm_u3_com_ids, lcm_opt_com_ids, lcm_score = map.location_map( u3_coms, opt_coms) if map.same_mappings(list(range(len(u3_coms))), list(range(len(opt_coms))), sjm_u3_com_ids, sjm_opt_com_ids, lcm_u3_com_ids, lcm_opt_com_ids) \ and len(sjm_u3_com_ids) == len(u12_u1_com_ids): u3opt_u3_com_ids = sjm_u3_com_ids u3opt_opt_com_ids = sjm_opt_com_ids u13_1_com_ids, u13_3_com_ids, u13_sjm_score = map.soft_jaccard_map( u1_coms, u3_coms) u2opt_1_com_ids, u2opt_opt_com_ids, u2opt_sjm_score = map.soft_jaccard_map( u2_coms, opt_coms) if map.are_consistent(list(range(len(u1_coms))), list(range(len(u2_coms))), list(range(len(u3_coms))), list(range(len(opt_coms))), u12_u1_com_ids, u12_u2_com_ids, u3opt_u3_com_ids, u3opt_opt_com_ids, u13_1_com_ids, u13_3_com_ids, u2opt_1_com_ids, u2opt_opt_com_ids): score = (u3opt_sjm_score + lcm_score + u13_sjm_score + u2opt_sjm_score) / 4 else: score = 0 else: score = 0 else: score = 0 pred_data.append({ **d, "optn": ii + 1, "optn_score": score, "mato_score": (d.get("mat_score") + score) / 2, "pred": opt }) return pred_data
def predict_shape_loc_isomorphism(prob, anlg, tran, d): u1_coms = d.get("stub").get("u1_coms") u2_coms = d.get("stub").get("u2_coms") u3_coms = d.get("stub").get("u3_coms") lcm_u1_com_ids = d.get("stub").get("lcm_u1_com_ids") lcm_u2_com_ids = d.get("stub").get("lcm_u2_com_ids") jcm_u1_com_ids = d.get("stub").get("jcm_u1_com_ids") jcm_u2_com_ids = d.get("stub").get("jcm_u2_com_ids") pred_data = [] for ii, opt in enumerate(prob.options): print(prob.name, anlg.get("name"), tran.get("name"), ii) opt_coms, _, _ = utils.decompose(opt, 8, trim=False) lcm_u3_com_ids, lcm_opt_com_ids, lcm_score = map.location_map( u3_coms, opt_coms) # old_jcm_u3_com_ids, old_jcm_opt_com_ids, old_jcm_score = map.jaccard_map(u3_coms, opt_coms) jcm_u3_com_ids, jcm_opt_com_ids, jcm_score = map.soft_jaccard_map( u3_coms, opt_coms) if 1 != len(lcm_u1_com_ids) and \ len(lcm_u1_com_ids) == len(jcm_u1_com_ids) and \ len(lcm_u2_com_ids) == len(jcm_u2_com_ids) and \ (np.unique(lcm_u1_com_ids) == np.unique(jcm_u1_com_ids)).all() and \ (np.unique(lcm_u2_com_ids) == np.unique(jcm_u2_com_ids)).all() and \ len(lcm_u1_com_ids) == len(lcm_u3_com_ids) and \ len(lcm_u3_com_ids) == len(jcm_u3_com_ids) and \ len(lcm_opt_com_ids) == len(jcm_opt_com_ids) and \ (np.unique(lcm_u3_com_ids) == np.unique(jcm_u3_com_ids)).all() and \ (np.unique(lcm_opt_com_ids) == np.unique(jcm_opt_com_ids)).all(): isomorphic_mappings = map.derive_isomorphic_mappings( list(range(len(u1_coms))), list(range(len(u2_coms))), list(range(len(u3_coms))), list(range(len(opt_coms))), lcm_u1_com_ids, lcm_u2_com_ids, jcm_u1_com_ids, jcm_u2_com_ids, lcm_u3_com_ids, lcm_opt_com_ids, jcm_u3_com_ids, jcm_opt_com_ids) if isomorphic_mappings is not None and 0 != len( isomorphic_mappings): phm_score = 1 else: phm_score = 0 else: phm_score = 0 score = min((lcm_score + jcm_score) / 2, phm_score) pred_data.append({ **d, "optn": ii + 1, "optn_score": score, "mato_score": (d.get("mat_score") + score) / 2, "pred": opt }) return pred_data
def tpm(A_coms, B_coms, cur_A, cur_B, cur_A_com_ids, cur_B_com_ids): if len(cur_A_com_ids) != len(cur_B_com_ids): return [], [] if 1 == len(cur_A_com_ids) and 1 == len(cur_B_com_ids): return cur_A_com_ids, cur_B_com_ids cur_A_filled = utils.fill_holes(cur_A) cur_B_filled = utils.fill_holes(cur_B) cur_A_filled_coms, _, _ = utils.decompose(cur_A_filled, 8, trim=False) cur_B_filled_coms, _, _ = utils.decompose(cur_B_filled, 8, trim=False) if len(cur_A_filled_coms) != len(cur_B_filled_coms): return [], [] if len(cur_A_filled_coms) == len(cur_A_com_ids): # TODO inside-outside topo mapping failed here. # TODO fallback mapping method can be applied here, but for the current problems, it is unnecessary. return [], [] A_com_groups = [[ com_id for com_id in cur_A_com_ids if (np.logical_and(A_coms[com_id], A_filled_com) == A_coms[com_id] ).all() ] for A_filled_com in cur_A_filled_coms] B_com_groups = [[ com_id for com_id in cur_B_com_ids if (np.logical_and(B_coms[com_id], B_filled_com) == B_coms[com_id] ).all() ] for B_filled_com in cur_B_filled_coms] if 1 == len(A_com_groups) and 1 == len(B_com_groups): return tpm_go_deeper(A_coms, B_coms, cur_A_com_ids, cur_A_filled, cur_B_com_ids, cur_B_filled) else: return tpm_go_wider(A_coms, B_coms, A_com_groups, B_com_groups)
def tokenize(expr, placeHolder=0): tokens={} for idxFinder in [ ssf.InnerParenthesisFinder().find, ssf.KeyWordsFinder().find, ssf.KeyWordsArgsFinder().find, ssf.FactorialFinder().find, ssf.PowRootFinder().find, ssf.MultDivFinder().find, ssf.AddSubFinder().find ]: expr, newTokens = decompose(expr, idxFinder, placeHolder+len(tokens.keys())) tokens = mergeDict(tokens, newTokens) return expr, tokens
def _get_desired_pose(self, idx): """Get the desired pose at the given path index. The desired heading is calculated from the velocity. Args: idx (int): Index of point in path Returns: ndarray: The desired pose at the given index """ x, y = self._pos[idx] _, direction = decompose(self._vel[idx]) theta = wrap_to_pi(np.arctan2(direction[1], direction[0])) return x, y, theta
def process(self, log=True): start_t = time.time() cnt = 0 self._preprocess() while True: curr_frame = self.VL.read_gray() if curr_frame is None: break P_1 = ncc(self.last_frame, curr_frame, self.P, self.n, self.M) tx, ty, rot = decompose(self.P, P_1) self.Tx.append(tx + self.Tx[-1]) self.Ty.append(ty + self.Ty[-1]) self.Rot.append(rot + self.Rot[-1]) self.last_frame = curr_frame cnt += 1 if time.time() - start_t > 5 and log: print(cnt / 5) cnt = 0 start_t = time.time() return self.Tx, self.Ty, self.Rot
def get_control(self, pose): """Calculate the values for the feedback control. Args: pose (ndarray): The current pose of the mobile base Returns: tuple: Linear and angular velocity """ # Get the nearest point's index on the path nearest = self._get_nearest(pose) # Calculate the desired pose on the nearest point on the path d_pose = self._get_desired_pose(nearest) # Calculate the difference between x, y and theta transformation = self._get_transform(nearest) e = transformation.dot(pose - d_pose) # Limit the difference of the orientation between -pi and pi e[2] = wrap_to_pi(e[2]) # Get the linear velocity control from the nearest point on the path linear, _ = decompose(self._vel[nearest]) # Pole-placement a = -2 b = -3 # Gains k2 = a * b k3 = -(a + b) # Angular velocity control calculation omega1 = -k2 * e[1] - k3 * e[2] s_dot = (linear * np.cos(e[2])) / (e[1] * self._curvature[nearest] - 1) angular = omega1 + self._curvature[nearest] * s_dot return linear, angular
def __call__(self, x): with workdps(self.dps): bits = decompose(math.fabs(x), self.gamma_pow, self.max_precision) ps = self.representation(bits) gammas = [mpf(self.gamma)]*bits.size return math.copysign(np.sum([c * gamma **(-power) for (c, gamma, power) in zip(ps, gammas, self.betas[:bits.size])]), x)
val = np.where((elec.apwr>=0)*(elec.rpwr>=0)*(elec.volt>=0)*(elec.itns>=0)*(elec.sm1>=0)*(elec.sm2>=0)*(elec.sm2>=0))[0] epoch = elec.epo[val] data = np.vstack(( elec.apwr[val], elec.rpwr[val], elec.volt[val], elec.itns[val], elec.sm1[val], elec.sm2[val], elec.sm3[val] )) # --- standardize data to mean 0 and std 1 data = utils.standardize(data=data) # --- PCA decomposition pc, var = utils.decompose(data=data, co=7) # --- eigenvalues coef = np.array(list(map(pc_proj, np.arange(co)))) # --- reconstructed data from PCs rlc = np.dot(coef.T, pc) # --- residuals res = data - rlc sres = utils.smooth(curv=res, fs=100) # --- fetch date and time info date = elec.date[val] time = elec.time[val]
for resource in resources: resource.attributes = [] for row in session.execute(query, (CONTEXT, resource.key)): resource.attributes.append((row.attribute, row.value)) print("%s: %s" % (resource.key, resource.attributes)) # Convert the times to timestamps expressed in seconds start_ts = time.mktime(start.timetuple()) end_ts = time.mktime(end.timetuple()) # Compute the partition keys first_partition = int(round_down(start_ts, SHARD)) last_partition = int(round_down(end_ts, SHARD)) + SHARD partitions = [] for partition in range(first_partition, last_partition, SHARD): print("Partition: %d includes data from: %s, to %s" % (partition, df(partition), df(partition + SHARD))) partitions.append(partition) # Gather the samples for every resource in every partition query = SimpleStatement( 'SELECT * from samples WHERE context = %s AND partition = %s and resource = %s' ) for resource in resources: # Execute the query for each partition for partition in partitions: for row in session.execute(query, (CONTEXT, partition, resource.key)): print(row.context, row.partition, row.resource, row.collected_at, row.metric_name, row.attributes, decompose(row.value))
last_frame = VL.read_gray() P = gen_track_P(last_frame, M) Tx = [0] Ty = [0] Rot = [0] start_t = time.time() cnt = 0 while True: curr_frame = VL.read_gray() if curr_frame is None: break P_1 = ncc(last_frame, curr_frame, P, n, M) tx, ty, rot = decompose(P, P_1) # tx, ty, rot = P_1[0, 1] - P[0, 1], P_1[1, 1] - P[1, 1], 0 Tx.append(tx + Tx[-1]) Ty.append(ty + Ty[-1]) Rot.append(rot + Rot[-1]) last_frame = curr_frame cnt += 1 if time.time() - start_t > 5: print(cnt / 5) cnt = 0 start_t = time.time() plt.figure(figsize=(15, 15)) frame_index = np.linspace(1, len(Tx), len(Tx)) plt.axis('on')
def lemma_candidate(l, r): def add_lemma(stem, ending): candidates.add((stem, ending)) candidates = {(l, r)} l_last = decompose(l[-1]) l_last_ = compose(l_last[0], l_last[1], ' ') l_front = l[:-1] r_first = decompose(r[0]) if r else ('', '', '') r_first_ = compose(r_first[0], r_first[1], ' ') if r else ' ' r_end = r[1:] # ㄷ 불규칙 활용: 깨달 + 아 -> 깨닫 + 아 if l_last[2] == 'ㄹ' and r_first[0] == 'ㅇ': l_stem = l_front + compose(l_last[0], l_last[1], 'ㄷ') add_lemma(l_stem, r) # 르 불규칙 활용: 굴 + 러 -> 구르 + 어 if (l_last[2] == 'ㄹ') and (r_first_ == '러' or r_first_ == '라'): l_stem = l_front + compose(l_last[0], l_last[1], ' ') + '르' r_canon = compose('ㅇ', r_first[1], r_first[2]) + r_end add_lemma(l_stem, r_canon) # ㅂ 불규칙 활용: 더러 + 워서 -> 더럽 + 어서 if (l_last[2] == ' ') and (r_first_ == '워' or r_first_ == '와'): l_stem = l_front + compose(l_last[0], l_last[1], 'ㅂ') r_canon = compose('ㅇ', 'ㅏ' if r_first_ == '와' else 'ㅓ', r_first[2]) + r_end add_lemma(l_stem, r_canon) # 어미의 첫글자가 종성일 경우 (-ㄴ, -ㄹ, -ㅂ, -ㅅ) # 입 + 니다 -> 이 + ㅂ니다 if l_last[2] == 'ㄴ' or l_last[2] == 'ㄹ' or l_last[2] == 'ㅂ' or l_last[ 2] == 'ㅆ': l_stem = l_front + compose(l_last[0], l_last[1], ' ') r_canon = l_last[2] + r add_lemma(l_stem, r_canon) # ㅅ 불규칙 활용: 부 + 어 -> 붓 + 어 # exception : 벗 + 어 -> 벗어 if (l_last[2] == ' ' and l[-1] != '벗') and (r_first[0] == 'ㅇ'): l_stem = l_front + compose(l_last[0], l_last[1], 'ㅅ') add_lemma(l_stem, r) # 우 불규칙 활용: 똥퍼 + '' -> 똥푸 + 어 if l_last_ == '퍼': l_stem = l_front + '푸' r_canon = compose('ㅇ', l_last[1], l_last[2]) + r add_lemma(l_stem, r_canon) # 우 불규칙 활용: 줬 + 어 -> 주 + 었어 if l_last[1] == 'ㅝ': l_stem = l_front + compose(l_last[0], 'ㅜ', ' ') r_canon = compose('ㅇ', 'ㅓ', l_last[2]) + r add_lemma(l_stem, r_canon) # 오 불규칙 활용: 왔 + 어 -> 오 + 았어 if l_last[1] == 'ㅘ': l_stem = l_front + compose(l_last[0], 'ㅗ', ' ') r_canon = compose('ㅇ', 'ㅏ', l_last[2]) + r add_lemma(l_stem, r_canon) # ㅡ 탈락 불규칙 활용: 꺼 + '' -> 끄 + 어 / 텄 + 어 -> 트 + 었어 if (l_last[1] == 'ㅓ' or l_last[1] == 'ㅏ'): l_stem = l_front + compose(l_last[0], 'ㅡ', ' ') r_canon = compose('ㅇ', l_last[1], l_last[2]) + r add_lemma(l_stem, r_canon) # 거라, 너라 불규칙 활용 # '-거라/-너라'를 어미로 취급하면 규칙 활용 # if (l[-1] == '가') and (r and (r[0] == '라' or r[:2] == '거라')): # # TODO # 러 불규칙 활용: 이르 + 러 -> 이르다 # if (r_first[0] == 'ㄹ' and r_first[1] == 'ㅓ'): # if self.is_stem(l): # # TODO # 여 불규칙 활용 # 하 + 였다 -> 하 + 았다 -> 하다: '였다'를 어미로 취급하면 규칙 활용 # 여 불규칙 활용 (2) # 했 + 다 -> 하 + 았다 / 해 + 라니깐 -> 하 + 아라니깐 / 했 + 었다 -> 하 + 았었다 if l_last[0] == 'ㅎ' and l_last[1] == 'ㅐ': l_stem = l_front + '하' r_canon = compose('ㅇ', 'ㅏ', l_last[2]) + r add_lemma(l_stem, r_canon) # ㅎ (탈락) 불규칙 활용 if (l_last[2] == ' ' or l_last[2] == 'ㄴ' or l_last[2] == 'ㄹ' or l_last[2] == 'ㅂ' or l_last[2] == 'ㅆ'): # 파라 + 면 -> 파랗 + 면 if (l_last[1] == 'ㅏ' or l_last[1] == 'ㅓ'): l_stem = l_front + compose(l_last[0], l_last[1], 'ㅎ') r_canon = r if l_last[2] == ' ' else l_last[2] + r add_lemma(l_stem, r_canon) # ㅎ (축약) 불규칙 할용 # 시퍼렜 + 다 -> 시퍼렇 + 었다, 파랬 + 다 -> 파랗 + 았다 if (l_last[1] == 'ㅐ') or (l_last[1] == 'ㅔ'): # exception : 그렇 + 아 -> 그래 if len(l) >= 2 and l[-2] == '그' and l_last[0] == 'ㄹ': l_stem = l_front + '렇' else: l_stem = l_front + compose( l_last[0], 'ㅓ' if l_last[1] == 'ㅔ' else 'ㅏ', 'ㅎ') r_canon = compose('ㅇ', 'ㅓ' if l_last[1] == 'ㅔ' else 'ㅏ', l_last[2]) + r add_lemma(l_stem, r_canon) # 이었 -> 였 규칙활용 # 좋아졌 + 어 -> 좋아지 + 었어, 좋아졋 + 던 -> 좋아지 + 었던, 좋아져 + 서 -> 좋아지 + 어서 # 였 + 어 -> 이 + 었어 # 종성 ㅆ 을 ㅅ 으로 쓰는 경우도 고려 if ((l_last[2] == 'ㅆ' or l_last[2] == 'ㅅ' or l_last[2] == ' ') and (l_last[1] == 'ㅕ') or (l_last[1] == 'ㅓ')): # except: -었 -> 이 + 었 (x) // -였-> 이 + 었 (o) // -졌 -> 지 + 었 (o) // -젔 -> 지 + 었 if ((l_last[0] == 'ㅇ') and (l_last[1] == 'ㅕ')) or not (l_last[0] == 'ㅇ'): l_stem = l_front + compose(l_last[0], 'ㅣ', ' ') r_canon = compose('ㅇ', 'ㅓ', l_last[2]) + r add_lemma(l_stem, r_canon) return candidates
import cv2 from utils import gen_track_P, decompose,get_2d_sample,BGR_2_gray from NCC import ncc, ncc_neighbor import matplotlib.pyplot as plt import time coord_X, coord_Y = 1000, 1200 window_sz = 32 neighbor_sz = 64 input_img1 = cv2.imread('../images/4.png') input_img2 = cv2.imread('../images/3.png') P = gen_track_P(input_img1, neighbor_sz) P_1, A = ncc(input_img1, input_img2, P, window_sz, neighbor_sz) tx, ty, rot = decompose(A) print(P) print(P_1) print(A) print('finish')