def orb_distance(self, descriptor): """TODO: Docstring for orb_distance. :descriptor: TODO :returns: TODO """ return min( [helper.hamming_distance(o, descriptor) for o in self.orb()])
def compute_single_likelihood(self, datum): alpha = datum.alpha true_val = datum.output.keys()[0] generated = self(*datum.input) generated = generated[:len(true_val)] dist = (hamming_distance(generated, true_val[:len(generated)]) + len(true_val) - len(generated)) return dist * log(alpha) + (len(true_val) - dist) * log(1. - alpha)
def orb_distance(self, descriptors): return min([hamming_distance(o, descriptors) for o in self.orb()])
def process_frame(img): start_time = time.time() img = cv2.resize(img, (W,H)) frame = Frame(mapp, img, K) if frame.id == 0: return f1 = mapp.frames[-1] f2 = mapp.frames[-2] idx1, idx2, Rt = match_frames(f1, f2) if frame.id < 5: # get initial positions from fundamental matrix f1.pose = np.dot(Rt, f2.pose) else: # kinematic model velocity = np.dot(f2.pose, np.linalg.inv(mapp.frames[-3].pose)) f1.pose = np.dot(velocity, f2.pose) # add new observations if the point is already observed in the previous frame # TODO: consider tradeoff doing this before/after search by projection for i,idx in enumerate(idx2): if f2.pts[idx] is not None and f1.pts[idx1[i]] is None: f2.pts[idx].add_observation(f1, idx1[i]) # pose optimization #print(f1.pose) pose_opt = mapp.optimize(local_window=1, fix_points=True) print("Pose: %f" % pose_opt) #print(f1.pose) # search by projection sbp_pts_count = 0 if len(mapp.points) > 0: map_points = np.array([p.homogeneous() for p in mapp.points]) projs = np.dot(np.dot(K, f1.pose[:3]), map_points.T).T projs = projs[:, 0:2] / projs[:, 2:] good_pts = (projs[:, 0] > 0) & (projs[:, 0] < W) & \ (projs[:, 1] > 0) & (projs[:, 1] < H) for i, p in enumerate(mapp.points): if not good_pts[i]: continue q = f1.kd.query_ball_point(projs[i], 5) for m_idx in q: if f1.pts[m_idx] is None: # if any descriptors within 32 for o in p.orb(): o_dist = hamming_distance(o, f1.des[m_idx]) if o_dist < 32.0: p.add_observation(f1, m_idx) sbp_pts_count += 1 break # triangulate the points we don't have matches for good_pts4d = np.array([f1.pts[i] is None for i in idx1]) # reject pts without enough "parallax" (this right?) pts4d = triangulate(f1.pose, f2.pose, f1.kps[idx1], f2.kps[idx2]) good_pts4d &= np.abs(pts4d[:, 3]) > 0.005 # homogeneous 3-D coords pts4d /= pts4d[:, 3:] # locally in front of camera # NOTE: This check is broken and maybe unneeded #pts_tri_local = np.dot(f1.pose, pts4d.T).T #good_pts4d &= pts_tri_local[:, 2] > 0 print("Adding: %d new points, %d search by projection" % (np.sum(good_pts4d), sbp_pts_count)) # adding new points to the map from pairwise matches for i,p in enumerate(pts4d): if not good_pts4d[i]: continue u,v = int(round(f1.kpus[idx1[i],0])), int(round(f1.kpus[idx1[i],1])) pt = Point(mapp, p[0:3], img[v,u]) pt.add_observation(f1, idx1[i]) pt.add_observation(f2, idx2[i]) # 2-D display if disp2d is not None: # paint annotations on the image for i1, i2 in zip(idx1, idx2): u1, v1 = int(round(f1.kpus[i1][0])), int(round(f1.kpus[i1][1])) u2, v2 = int(round(f2.kpus[i2][0])), int(round(f2.kpus[i2][1])) if f1.pts[i1] is not None: if len(f1.pts[i1].frames) >= 5: cv2.circle(img, (u1, v1), color=(0,255,0), radius=3) else: cv2.circle(img, (u1, v1), color=(0,128,0), radius=3) else: cv2.circle(img, (u1, v1), color=(0,0,0), radius=3) cv2.line(img, (u1, v1), (u2, v2), color=(255,0,0)) disp2d.paint(img) # optimize the map if frame.id >= 4 and frame.id%5 == 0: err = mapp.optimize() print("Optimize: %f units of error" % err) # 3-D display if disp3d is not None: disp3d.paint(mapp) print("Map: %d points, %d frames" % (len(mapp.points), len(mapp.frames))) print("Time: %.2f ms" % ((time.time()-start_time)*1000.0))
] h0 = MyHypothesis() best_post = None best_h = None best_ed = None s = 0 while (best_post == None or (best_ed != 0)): for h in MHSampler(h0, data, steps=steps): likelihood = h.likelihood if best_post == None or h.posterior_score >= best_post: best_post = h.posterior_score best_h = copy.deepcopy(h) true_val = to_seq generated = best_h(from_seq)[:len(true_val)] best_ed = (hamming_distance( generated, true_val[:len(generated)]) + len(true_val) - len(generated)) # best_ed = editdistance.eval(best_h(from_seq), to_seq) if s % 1000 == 0: #print s, to_seq, from_seq #print ("Rand: ", h, h(from_seq), # editdistance.eval(h(from_seq), to_seq), # exp(h.posterior_score)) print_star(s, "Best: ", best_h, best_h(from_seq), best_ed, to_seq, exp(best_h.posterior_score)) print s += 1
def sample(self, ebn0): data = list(helpers.generate_random(self.frame_length)) decoded_data = self.transmit(data, ebn0) return helpers.hamming_distance(data, decoded_data)