def frame(self, imarray): # No calibration params yet. if not self.vo: return if self.seq > 10000: sys.exit() if DEBUG: print "" print "" print "Frame ", self.seq print "" print "" im = imarray.images[1] im_r = imarray.images[0] if im.colorspace == "mono8": im_py = Image.fromstring("L", (im.width, im.height), im.data) im_r_py = Image.fromstring("L", (im_r.width, im_r.height), im_r.data) elif im.colorspace == "rgb24": use_color = True im_col_py = Image.fromstring("RGB", (im.width, im.height), im.data) im_py = im_col_py.convert("L") im_r_py = Image.fromstring("RGB", (im_r.width, im_r.height), im_r.data) im_r_py = im_r_py.convert("L") else: print "Unknown colorspace" return # Detect faces on the first frame if not self.current_keyframes: self.faces = self.p.detectAllFaces(im_py.tostring(), im.width, im.height, self.cascade_file, 1.0, None, None, True) if DEBUG: print "Faces ", self.faces sparse_pred_list = [] sparse_pred_list_2d = [] old_rect = [0, 0, 0, 0] ia = SparseStereoFrame(im_py, im_r_py) ia.matches = [] ia.desc_diffs = [] ia.good_matches = [] # Track each face iface = -1 for face in self.faces: iface += 1 (x, y, w, h) = copy.copy(self.faces[iface]) if DEBUG: print "A face ", (x, y, w, h) (old_center, old_diff) = self.rect_to_center_diff((x, y, w, h)) if self.face_centers_3d and iface < len(self.face_centers_3d): censize3d = list(copy.copy(self.face_centers_3d[iface])) censize3d.append(1.0 * self.real_face_sizes_3d[iface]) ###ZMULT self.get_features(ia, self.num_feats, (x, y, w, h), censize3d) else: self.get_features(ia, self.num_feats, (x, y, w, h), (0.0, 0.0, 0.0, 1000000.0)) if not ia.kp2d: continue # First frame: if len(self.current_keyframes) < iface + 1: (cen, diff) = self.rect_to_center_diff((x, y, w, h)) cen3d = self.cam.pix2cam(cen[0], cen[1], ia.avgd) ltf = self.cam.pix2cam(x, y, ia.avgd) rbf = self.cam.pix2cam(x + w, y + h, ia.avgd) fs3d = ((rbf[0] - ltf[0]) + (rbf[1] - ltf[1])) / 4.0 # Check that the face is a reasonable size. If not, skip this face. if 2 * fs3d < self.min_real_face_size or 2 * fs3d > self.max_real_face_size or iface > 1: #HACK: ONLY ALLOW ONE FACE self.faces.pop(iface) iface -= 1 continue if DESCRIPTOR == 'CALONDER': self.vo.collect_descriptors(ia) elif DESCRIPTOR == 'SAD': self.vo.collect_descriptors_sad(ia) else: pass self.current_keyframes.append(0) self.keyframes.append(copy.copy(ia)) self.feats_to_centers.append( self.make_face_model(cen, diff, ia.kp2d)) self.real_face_sizes_3d.append(copy.deepcopy(fs3d)) self.feats_to_centers_3d.append( self.make_face_model(cen3d, (fs3d, fs3d, fs3d), ia.kp3d)) self.face_centers_3d.append(copy.deepcopy(cen3d)) self.recent_good_frames.append(copy.copy(ia)) self.recent_good_rects.append(copy.deepcopy([x, y, w, h])) self.recent_good_motion.append([0.0] * 3) #dx,dy,dimfacesize self.recent_good_motion_3d.append([0.0] * 3) self.same_key_rgfs.append(True) # End first frame # Later frames else: if DESCRIPTOR == 'CALONDER': self.vo.collect_descriptors(ia) elif DESCRIPTOR == 'SAD': self.vo.collect_descriptors_sad(ia) else: pass done_matching = False bad_frame = False while not done_matching: # Try matching to the keyframe keyframe = self.keyframes[self.current_keyframes[iface]] temp_match = self.vo.temporal_match(ia, keyframe, want_distances=True) ia.matches = [(m2, m1) for (m1, m2, m3) in temp_match] ia.desc_diffs = [m3 for (m1, m2, m3) in temp_match] print "Scores", ia.desc_diffs #ia.matches = self.vo.temporal_match(keyframe,ia,want_distances=True) #ia.desc_diffs = [(VO.sad(keyframe.descriptors[a], ia.descriptors[b])) for (a,b) in ia.matches] ia.good_matches = [ s < self.desc_diff_thresh for s in ia.desc_diffs ] n_good_matches = len([ m for m in ia.desc_diffs if m < self.desc_diff_thresh ]) # Not enough matches, get a new keyframe if len(keyframe.kp) < 2 or n_good_matches < len( keyframe.kp) / 2.0: if DEBUG: print "New keyframe" # Make a new face model, either from a recent good frame, or from the current image if not self.same_key_rgfs[iface]: matched_z_list = [ tz for ((tx, ty, tz), is_good) in zip( self.recent_good_frames[iface].kp, self. recent_good_frames[iface].good_matches) if is_good ] if len(matched_z_list) == 0: matched_z_list = [ tz for (tx, ty, tz ) in self.recent_good_frames[iface].kp ] avgd_goodmatches = sum(matched_z_list) / len( matched_z_list) avg3d_goodmatches = self.cam.pix2cam( 0.0, 0.0, avgd_goodmatches) kp3d = [ self.cam.pix2cam(kp[0], kp[1], kp[2]) for kp in self.recent_good_frames[iface].kp ] print "kp ", self.recent_good_frames[iface].kp print "kp3d ", kp3d print avg3d_goodmatches kp3d_for_model = [ this_kp3d for this_kp3d in kp3d if math.fabs(this_kp3d[2] - avg3d_goodmatches[2]) < 2.0 * self.real_face_sizes_3d[iface] ] kp_for_model = [ this_kp for (this_kp, this_kp3d) in zip( self.recent_good_frames[iface].kp, kp3d) if math.fabs(this_kp3d[2] - avg3d_goodmatches[2]) < 2.0 * self.real_face_sizes_3d[iface] ] # If you're not left with enough points, just take all of them and don't worry about the depth constraints. if len(kp3d_for_model) < 2: kp3d_for_model = kp3d kp_for_model = copy.deepcopy( self.recent_good_frames[iface].kp) (cen, diff) = self.rect_to_center_diff( self.recent_good_rects[iface]) self.feats_to_centers[ iface] = self.make_face_model( cen, diff, [(kp0, kp1) for (kp0, kp1, kp2) in kp_for_model]) avgd = sum([ kp2 for (kp0, kp1, kp2) in kp_for_model ]) / len(kp_for_model) cen3d = self.cam.pix2cam(cen[0], cen[1], avgd) self.feats_to_centers_3d[ iface] = self.make_face_model( cen3d, [self.real_face_sizes_3d[iface]] * 3, kp3d_for_model) self.keyframes[ self.current_keyframes[iface]] = copy.copy( self.recent_good_frames[iface]) self.keyframes[self.current_keyframes[ iface]].kp = kp_for_model self.keyframes[ self.current_keyframes[iface]].kp2d = [ (k0, k1) for (k0, k1, k2) in kp_for_model ] self.keyframes[self.current_keyframes[ iface]].kp3d = kp3d_for_model self.keyframes[ self.current_keyframes[iface]].matches = [ (i, i) for i in range(len(kp_for_model)) ] self.keyframes[ self.current_keyframes[iface]].good_matches = [ True ] * len(kp_for_model) self.keyframes[self.current_keyframes[ iface]].desc_diffs = [0] * len(kp_for_model) self.face_centers_3d[iface] = copy.deepcopy(cen3d) # Not changing the face size self.current_keyframes[ iface] = 0 #### HACK: ONLY ONE KEYFRAME!!! self.same_key_rgfs[iface] = True # Don't need to change the recent good frame yet. else: # Making a new model off of the current frame but with the predicted new position. # HACK: The displacement computation assumes that the robot/head is still, fix this. bad_frame = True #done_matching = True if DEBUG: print "Bad frame ", self.seq, " for face ", iface (cen, diff) = self.rect_to_center_diff( self.faces[iface]) if DEBUG: print "Motion for bad frame ", self.recent_good_motion[ iface], self.recent_good_motion_3d[iface] new_cen = [ cen[0] + self.recent_good_motion[iface][0], cen[1] + self.recent_good_motion[iface][1] ] diff = [ diff[0] + self.recent_good_motion[iface][2], diff[1] + self.recent_good_motion[iface][2] ] self.faces[iface] = (new_cen[0] - diff[0], new_cen[1] - diff[1], 2.0 * diff[0], 2.0 * diff[1]) (x, y, w, h) = copy.deepcopy(self.faces[iface]) pred_cen_3d = [ o + n for (o, n) in zip( self.face_centers_3d[iface], self.recent_good_motion_3d[iface]) ] pred_cen_3d.append( 1.0 * self.real_face_sizes_3d[iface]) #### ZMULT self.get_features(ia, self.num_feats, (x, y, w, h), pred_cen_3d) if not ia.kp2d: break if DESCRIPTOR == 'CALONDER': self.vo.collect_descriptors(ia) elif DESCRIPTOR == 'SAD': self.vo.collect_descriptors_sad(ia) else: pass self.keyframes[ self.current_keyframes[iface]] = copy.copy(ia) self.current_keyframes[iface] = 0 (cen, diff) = self.rect_to_center_diff( self.faces[iface]) self.feats_to_centers[ iface] = self.make_face_model( cen, diff, ia.kp2d) cen3d = self.cam.pix2cam(cen[0], cen[1], ia.avgd) self.feats_to_centers_3d[ iface] = self.make_face_model( cen3d, [self.real_face_sizes_3d[iface]] * 3, ia.kp3d) self.face_centers_3d[iface] = copy.deepcopy(cen3d) self.same_key_rgfs[iface] = True # Good matches, mark this frame as good else: done_matching = True # END MATCHING # If we got enough matches for this frame, track. if ia.kp and ia.kp2d: # Track sparse_pred_list = [] sparse_pred_list_2d = [] probs = [] bandwidths = [] size_mult = 1.0 for ((match1, match2), score) in zip(ia.matches, ia.desc_diffs): if score < self.desc_diff_thresh: kp3d = self.cam.pix2cam(ia.kp[match2][0], ia.kp[match2][1], ia.kp[match2][2]) sparse_pred_list.append( (kp3d[0] + self.feats_to_centers_3d[iface][match1][0], kp3d[1] + self.feats_to_centers_3d[iface][match1][1], kp3d[2] + self.feats_to_centers_3d[iface][match1][2])) sparse_pred_list_2d.append( (ia.kp2d[match2][0] + self.feats_to_centers[iface][match1][0], ia.kp2d[match2][1] + self.feats_to_centers[iface][match1][1])) probs = [1.0] * len(sparse_pred_list_2d) bandwidths = [size_mult * self.real_face_sizes_3d[iface] ] * len(sparse_pred_list_2d) if DEBUG: print "Old center 3d ", self.face_centers_3d[iface] print "Old center 2d ", (x + (w - 1) / 2.0, y + (h - 1) / 2.0) old_rect = self.faces[iface] (old_center, old_diff) = self.rect_to_center_diff(old_rect) new_center = self.mean_shift_sparse( self.face_centers_3d[iface], sparse_pred_list, probs, bandwidths, 10, 5.0) new_center_2d = self.cam.cam2pix(new_center[0], new_center[1], new_center[2]) ltf = self.cam.cam2pix( new_center[0] - self.real_face_sizes_3d[iface], new_center[1] - self.real_face_sizes_3d[iface], new_center[2]) rbf = self.cam.cam2pix( new_center[0] + self.real_face_sizes_3d[iface], new_center[1] + self.real_face_sizes_3d[iface], new_center[2]) w = rbf[0] - ltf[0] h = rbf[1] - ltf[1] if DEBUG: print "new center 3d ", new_center print "new_center 2d ", new_center_2d (nx, ny, nw, nh) = (new_center_2d[0] - (w - 1) / 2.0, new_center_2d[1] - (h - 1) / 2.0, w, h) # Force the window back into the image. dx = max(0, 0 - nx) + min(0, im.width - nx + nw) dy = max(0, 0 - ny) + min(0, im.height - ny + nh) nx += dx ny += dy self.faces[iface] = [nx, ny, nw, nh] self.recent_good_rects[iface] = [nx, ny, nw, nh] if bad_frame: self.recent_good_motion[ iface] = self.recent_good_motion[iface] self.recent_good_motion_3d[ iface] = self.recent_good_motion_3d[iface] else: self.recent_good_motion[iface] = [ new_center_2d[0] - old_center[0], new_center_2d[1] - old_center[1], ((nw - 1.0) / 2.0) - old_diff[0] ] self.recent_good_motion_3d[iface] = [ new_center[i] - self.face_centers_3d[iface][i] for i in range(len(new_center)) ] self.face_centers_3d[iface] = copy.deepcopy(new_center) self.recent_good_frames[iface] = copy.copy(ia) self.same_key_rgfs[iface] = False if DEBUG: print "motion ", self.recent_good_motion[iface] print "face 2d ", self.faces[iface] print "face center 3d ", self.face_centers_3d[iface] # Output the location of this face center in the 3D camera frame (of the left camera), and rotate # the coordinates to match the robot's idea of the 3D camera frame. center_uvd = (nx + (nw - 1) / 2.0, ny + (nh - 1) / 2.0, (numpy.average(ia.kp, 0))[2]) center_camXYZ = self.cam.pix2cam(center_uvd[0], center_uvd[1], center_uvd[2]) center_robXYZ = (center_camXYZ[2], -center_camXYZ[0], -center_camXYZ[1]) ########### PUBLISH the face center for the head controller to track. ######## if not self.usebag: stamped_point = PointStamped() (stamped_point.point.x, stamped_point.point.y, stamped_point.point.z) = center_robXYZ stamped_point.header.frame_id = "stereo" stamped_point.header.stamp = imarray.header.stamp self.pub.publish(stamped_point) # End later frames ############ DRAWING ################ if SAVE_PICS: if not self.keyframes or len(self.keyframes) <= iface: bigim_py = im_py draw = ImageDraw.Draw(bigim_py) else: key_im = self.keyframes[self.current_keyframes[iface]] keyim_py = Image.fromstring("L", key_im.size, key_im.rawdata) bigim_py = Image.new( "RGB", (im_py.size[0] + key_im.size[0], im_py.size[1])) bigim_py.paste(keyim_py.convert("RGB"), (0, 0)) bigim_py.paste(im_py, (key_im.size[0] + 1, 0)) draw = ImageDraw.Draw(bigim_py) (x, y, w, h) = self.faces[iface] draw.rectangle((x, y, x + w, y + h), outline=(0, 255, 0)) draw.rectangle( (x + key_im.size[0], y, x + w + key_im.size[0], y + h), outline=(0, 255, 0)) (x, y, w, h) = old_rect draw.rectangle((x, y, x + w, y + h), outline=(255, 255, 255)) draw.rectangle( (x + key_im.size[0], y, x + w + key_im.size[0], y + h), outline=(255, 255, 255)) mstart = old_center mend = (old_center[0] + self.recent_good_motion[iface][0], old_center[1] + self.recent_good_motion[iface][1]) draw.rectangle((mstart[0] - 1, mstart[1] - 1, mstart[0] + 1, mstart[1] + 1), outline=(255, 255, 255)) draw.rectangle( (mend[0] - 1, mend[1] - 1, mend[0] + 1, mend[1] + 1), outline=(0, 255, 0)) draw.line(mstart + mend, fill=(255, 255, 255)) for (x, y) in key_im.kp2d: draw_x(draw, (x, y), (1, 1), (255, 0, 0)) for (x, y) in ia.kp2d: draw_x(draw, (x + key_im.size[0], y), (1, 1), (255, 0, 0)) if self.seq > 0: for (x, y) in sparse_pred_list_2d: draw_x(draw, (x, y), (1, 1), (0, 0, 255)) draw_x(draw, (x + key_im.size[0], y), (1, 1), (0, 0, 255)) if ia.matches: for ((m1, m2), score) in zip(ia.matches, ia.desc_diffs): if score > self.desc_diff_thresh: color = (255, 0, 0) else: color = (0, 255, 0) draw.line( (key_im.kp2d[m1][0], key_im.kp2d[m1][1], ia.kp2d[m2][0] + key_im.size[0], ia.kp2d[m2][1]), fill=color) bigim_py.save("/tmp/tiff/feats%06d_%03d.tiff" % (self.seq, iface)) #END DRAWING # END FACE LOOP self.seq += 1
def frame(self, imarray): # No calibration params yet. if not self.vo: return if self.seq > 10000: sys.exit() if DEBUG: print "" print "" print "Frame ", self.seq print "" print "" im = imarray.images[1] im_r = imarray.images[0] if im.colorspace == "mono8": im_py = Image.fromstring("L", (im.width, im.height), im.data) im_r_py = Image.fromstring("L", (im_r.width, im_r.height), im_r.data) elif im.colorspace == "rgb24": use_color = True im_col_py = Image.fromstring("RGB", (im.width, im.height), im.data) im_py = im_col_py.convert("L") im_r_py = Image.fromstring("RGB", (im_r.width, im_r.height), im_r.data) im_r_py = im_r_py.convert("L") else : print "Unknown colorspace" return # Detect faces on the first frame if not self.current_keyframes : self.faces = self.p.detectAllFaces(im_py.tostring(), im.width, im.height, self.cascade_file, 1.0, None, None, True) if DEBUG: print "Faces ", self.faces sparse_pred_list = [] sparse_pred_list_2d = [] old_rect = [0,0,0,0] ia = SparseStereoFrame(im_py,im_r_py) ia.matches = [] ia.desc_diffs = [] ia.good_matches = [] # Track each face iface = -1 for face in self.faces: iface += 1 (x,y,w,h) = copy.copy(self.faces[iface]) if DEBUG: print "A face ", (x,y,w,h) (old_center, old_diff) = self.rect_to_center_diff((x,y,w,h)) if self.face_centers_3d and iface<len(self.face_centers_3d): censize3d = list(copy.copy(self.face_centers_3d[iface])) censize3d.append(2.0*self.real_face_sizes_3d[iface]) ###ZMULT self.get_features(ia, self.num_feats, (x,y,w,h), censize3d) else: self.get_features(ia, self.num_feats, (x, y, w, h), (0.0,0.0,0.0,1000000.0)) if not ia.kp2d: continue # First frame: if len(self.current_keyframes) < iface+1: (cen,diff) = self.rect_to_center_diff((x,y,w,h)) cen3d = self.cam.pix2cam(cen[0],cen[1],ia.avgd) cen3d = list(cen3d) ltf = self.cam.pix2cam(x,y,ia.avgd) rbf = self.cam.pix2cam(x+w,y+h,ia.avgd) fs3d = ( (rbf[0]-ltf[0]) + (rbf[1]-ltf[1]) )/4.0 # This assumes that we're tracking the face plane center, not the center of the head sphere. # If you want to track the center of the sphere instead, do: cen3d[2] += fs3d # Check that the face is a reasonable size. If not, skip this face. if 2*fs3d < self.min_real_face_size or 2*fs3d > self.max_real_face_size or iface > 1: #HACK: ONLY ALLOW ONE FACE self.faces.pop(iface) iface -= 1 continue if DESCRIPTOR=='CALONDER': self.vo.collect_descriptors(ia) elif DESCRIPTOR=='SAD': self.vo.collect_descriptors_sad(ia) else: pass self.current_keyframes.append(0) self.keyframes.append(copy.copy(ia)) self.feats_to_centers.append(self.make_face_model( cen, diff, ia.kp2d )) self.real_face_sizes_3d.append( copy.deepcopy(fs3d) ) self.feats_to_centers_3d.append( self.make_face_model( cen3d, (fs3d,fs3d,fs3d), ia.kp3d) ) self.face_centers_3d.append( copy.deepcopy(cen3d) ) self.recent_good_frames.append(copy.copy(ia)) self.recent_good_rects.append(copy.deepcopy([x,y,w,h])) self.recent_good_centers_3d.append(copy.deepcopy(cen3d)) self.recent_good_motion.append([0.0]*3) #dx,dy,dimfacesize self.recent_good_motion_3d.append([0.0]*3) self.same_key_rgfs.append(True) if DEBUG: print "cen2d", cen print "cen3d", self.face_centers_3d[iface] # End first frame # Later frames else : if DESCRIPTOR=='CALONDER': self.vo.collect_descriptors(ia) elif DESCRIPTOR=='SAD': self.vo.collect_descriptors_sad(ia) else: pass done_matching = False bad_frame = False while not done_matching: # Try matching to the keyframe keyframe = self.keyframes[self.current_keyframes[iface]] temp_match = self.vo.temporal_match(ia,keyframe,want_distances=True) ia.matches = [(m2,m1) for (m1,m2,m3) in temp_match] ia.desc_diffs = [m3 for (m1,m2,m3) in temp_match] print "temp matches", temp_match ia.good_matches = [s < self.desc_diff_thresh for s in ia.desc_diffs] n_good_matches = len([m for m in ia.desc_diffs if m < self.desc_diff_thresh]) if DEBUG: if len(keyframe.kp)<2: print "Keyframe has less than 2 kps" if n_good_matches < len(keyframe.kp)/2.0: print "ngoodmatches, len key.kp, len key.kp/2", n_good_matches, len(keyframe.kp), len(keyframe.kp)/2.0 # Not enough matches, get a new keyframe if len(keyframe.kp)<2 or n_good_matches < len(keyframe.kp)/2.0 : if DEBUG: print "New keyframe" # Make a new face model, either from a recent good frame, or from the current image if not self.same_key_rgfs[iface] : if DEBUG: print "centers at beginning of new keyframe" print "cen2d", [self.faces[iface][0]+self.faces[iface][2]/2.0, self.faces[iface][1]+self.faces[iface][3]/2.0] print "cen3d", self.face_centers_3d[iface] matched_z_list = [kp3d[2] for (kp3d,is_good) in zip(self.recent_good_frames[iface].kp3d,self.recent_good_frames[iface].good_matches) if is_good] if len(matched_z_list) == 0: matched_z_list = [kp3d[2] for kp3d in self.recent_good_frames[iface].kp3d] avgz_goodmatches = sum(matched_z_list)/ len(matched_z_list) tokeep = [math.fabs(self.recent_good_frames[iface].kp3d[i][2]-avgz_goodmatches) < 2.0*self.real_face_sizes_3d[iface] for i in range(len(self.recent_good_frames[iface].kp3d))] kp3d_for_model = [kp3d for (kp3d,tk) in zip(self.recent_good_frames[iface].kp3d,tokeep) if tk] kp_for_model = [kp for (kp,tk) in zip(self.recent_good_frames[iface].kp,tokeep) if tk] # If you're not left with enough points, just take all of them and don't worry about the depth constraints. if len(kp3d_for_model) < 2: kp3d_for_model = copy.deepcopy(self.recent_good_frames[iface].kp3d) kp_for_model = copy.deepcopy(self.recent_good_frames[iface].kp) (cen, diff) = self.rect_to_center_diff(self.recent_good_rects[iface]) self.feats_to_centers[iface] = self.make_face_model( cen, diff, [(kp0,kp1) for (kp0,kp1,kp2) in kp_for_model]) cen3d = self.recent_good_centers_3d[iface] self.feats_to_centers_3d[iface] = self.make_face_model( cen3d, [self.real_face_sizes_3d[iface]]*3, kp3d_for_model) self.keyframes[self.current_keyframes[iface]] = copy.copy(self.recent_good_frames[iface]) self.keyframes[self.current_keyframes[iface]].kp = kp_for_model self.keyframes[self.current_keyframes[iface]].kp2d = [(k0,k1) for (k0,k1,k2) in kp_for_model] self.keyframes[self.current_keyframes[iface]].kp3d = kp3d_for_model self.keyframes[self.current_keyframes[iface]].matches = [(i,i) for i in range(len(kp_for_model))] self.keyframes[self.current_keyframes[iface]].good_matches = [True]*len(kp_for_model) self.keyframes[self.current_keyframes[iface]].desc_diffs = [0]*len(kp_for_model) if DESCRIPTOR=='CALONDER': self.vo.collect_descriptors(self.keyframes[self.current_keyframes[iface]]) elif DESCRIPTOR=='SAD': self.vo.collect_descriptors_sad(self.keyframes[self.current_keyframes[iface]]) else: pass self.face_centers_3d[iface] = copy.deepcopy(cen3d) # Not changing the face size self.current_keyframes[iface] = 0 #### HACK: ONLY ONE KEYFRAME!!! self.same_key_rgfs[iface] = True # Don't need to change the recent good frame yet. if DEBUG: print "centers at end of new keyframe" print "cen2d", [self.faces[iface][0]+self.faces[iface][2]/2.0, self.faces[iface][1]+self.faces[iface][3]/2.0] print "cen3d", self.face_centers_3d[iface] else : # Making a new model off of the current frame but with the predicted new position. # HACK: The displacement computation assumes that the robot/head is still, fix this. bad_frame = True #done_matching = True if DEBUG: print "Bad frame ", self.seq, " for face ", iface (cen,diff) = self.rect_to_center_diff(self.faces[iface]) if DEBUG: print "Motion for bad frame ", self.recent_good_motion[iface], self.recent_good_motion_3d[iface] new_cen = [cen[0]+self.recent_good_motion[iface][0], cen[1]+self.recent_good_motion[iface][1]] diff = [diff[0]+self.recent_good_motion[iface][2], diff[1]+self.recent_good_motion[iface][2]] self.faces[iface] = (new_cen[0]-diff[0], new_cen[1]-diff[1], 2.0*diff[0], 2.0*diff[1]) (x,y,w,h) = copy.deepcopy(self.faces[iface]) pred_cen_3d = [o+n for (o,n) in zip(self.face_centers_3d[iface],self.recent_good_motion_3d[iface])] pred_cen_3d.append(2.0*self.real_face_sizes_3d[iface]) #### ZMULT self.get_features(ia, self.num_feats, (x,y,w,h), pred_cen_3d) if not ia.kp2d: break if DESCRIPTOR=='CALONDER': self.vo.collect_descriptors(ia) elif DESCRIPTOR=='SAD': self.vo.collect_descriptors_sad(ia) else: pass self.keyframes[self.current_keyframes[iface]] = copy.copy(ia) self.current_keyframes[iface] = 0 (cen,diff) = self.rect_to_center_diff(self.faces[iface]) self.feats_to_centers[iface] = self.make_face_model( cen, diff, ia.kp2d ) self.feats_to_centers_3d[iface] = self.make_face_model( [pred_cen_3d[0],pred_cen_3d[1],pred_cen_3d[2]], [self.real_face_sizes_3d[iface]]*3, ia.kp3d) self.face_centers_3d[iface] = copy.deepcopy(pred_cen_3d) self.same_key_rgfs[iface] = True # Good matches, mark this frame as good else: done_matching = True # END MATCHING # If we got enough matches for this frame, track. if ia.kp and ia.kp2d: # Track sparse_pred_list = [] sparse_pred_list_2d = [] probs = [] bandwidths = [] size_mult = 0.05 #1.0 for ((match1, match2), score) in zip(ia.matches, ia.desc_diffs): if score < self.desc_diff_thresh: sparse_pred_list.append( [ia.kp3d[match2][i]+self.feats_to_centers_3d[iface][match1][i] for i in range(3)] ) sparse_pred_list_2d.append( [ia.kp2d[match2][i]+self.feats_to_centers[iface][match1][i] for i in range(2)] ) #probs.append(score) probs = [1.0] * len(sparse_pred_list_2d) # Ignore actual match scores. Uncomment line above to use the match scores. bandwidths = [size_mult*self.real_face_sizes_3d[iface]] * len(sparse_pred_list_2d) (old_center, old_diff) = self.rect_to_center_diff(self.faces[iface]) if DEBUG: print "Old center 3d ", self.face_centers_3d[iface] print "Old center 2d ", old_center old_rect = self.faces[iface] # For display only new_center = self.mean_shift_sparse( self.face_centers_3d[iface][0:3], sparse_pred_list, probs, bandwidths, 10, 1.0 ) new_center_2d = self.cam.cam2pix(new_center[0], new_center[1], new_center[2]) # The above line assumes that we're tracking the face plane center, not the center of the head sphere. # If you want to track the center of the sphere instead, subtract self.real_face_sizes[iface] from the z-coord. ltf = self.cam.cam2pix( new_center[0]-self.real_face_sizes_3d[iface], new_center[1]-self.real_face_sizes_3d[iface], new_center[2]) rbf = self.cam.cam2pix( new_center[0]+self.real_face_sizes_3d[iface], new_center[1]+self.real_face_sizes_3d[iface], new_center[2]) w = rbf[0]-ltf[0] h = rbf[1]-ltf[1] if DEBUG: print "new center 3d ", new_center print "new_center 2d ", new_center_2d (nx,ny,nw,nh) = (new_center_2d[0]-(w-1)/2.0, new_center_2d[1]-(h-1)/2.0, w, h) # Force the window back into the image. nx += max(0,0-nx) + min(0, im.width - nx+nw) ny += max(0,0-ny) + min(0, im.height - ny+nh) self.faces[iface] = [nx, ny, nw, nh] self.recent_good_rects[iface] = [nx,ny,nw,nh] self.recent_good_centers_3d[iface] = copy.deepcopy(new_center) if bad_frame: self.recent_good_motion[iface] = self.recent_good_motion[iface] self.recent_good_motion_3d[iface] = self.recent_good_motion_3d[iface] else: self.recent_good_motion[iface] = [new_center_2d[0]-old_center[0], new_center_2d[1]-old_center[1], ((nw-1.0)/2.0)-old_diff[0]] self.recent_good_motion_3d[iface] = [ new_center[i]-self.face_centers_3d[iface][i] for i in range(len(new_center))] self.face_centers_3d[iface] = copy.deepcopy(new_center) self.recent_good_frames[iface] = copy.copy(ia) self.same_key_rgfs[iface] = False if DEBUG: print "motion ", self.recent_good_motion[iface], self.recent_good_motion_3d[iface] print "face 2d ", self.faces[iface] print "face center 3d ", self.face_centers_3d[iface] # Output the location of this face center in the 3D camera frame (of the left camera), and rotate # the coordinates to match the robot's idea of the 3D camera frame. center_uvd = (nx + (nw-1)/2.0, ny + (nh-1)/2.0, (numpy.average(ia.kp,0))[2] ) center_camXYZ = self.cam.pix2cam(center_uvd[0], center_uvd[1], center_uvd[2]) center_robXYZ = (center_camXYZ[2], -center_camXYZ[0], -center_camXYZ[1]) ########### PUBLISH the face center for the head controller to track. ######## if not self.usebag: #stamped_point = PointStamped() #(stamped_point.point.x, stamped_point.point.y, stamped_point.point.z) = center_robXYZ #stamped_point.header.frame_id = "stereo" #stamped_point.header.stamp = imarray.header.stamp #self.pub.publish(stamped_point) pm = PositionMeasurement() pm.header.stamp = imarray.header.stamp pm.name = "stereo_face_feature_tracker" pm.object_id = -1 (pm.pos.x,pm.pos.y, pm.pos.z) = center_robXYZ pm.header.frame_id = "stereo_link" pm.reliability = 0.5; pm.initialization = 0; #pm.covariance self.pub.publish(pm) # End later frames ############ DRAWING ################ if SAVE_PICS: if not self.keyframes or len(self.keyframes) <= iface : bigim_py = im_py draw = ImageDraw.Draw(bigim_py) else : key_im = self.keyframes[self.current_keyframes[iface]] keyim_py = Image.fromstring("L", key_im.size, key_im.rawdata) bigim_py = Image.new("RGB",(im_py.size[0]+key_im.size[0], im_py.size[1])) bigim_py.paste(keyim_py.convert("RGB"),(0,0)) bigim_py.paste(im_py,(key_im.size[0]+1,0)) draw = ImageDraw.Draw(bigim_py) (x,y,w,h) = self.faces[iface] draw.rectangle((x,y,x+w,y+h),outline=(0,255,0)) draw.rectangle((x+key_im.size[0],y,x+w+key_im.size[0],y+h),outline=(0,255,0)) (x,y,w,h) = old_rect draw.rectangle((x,y,x+w,y+h),outline=(255,255,255)) draw.rectangle((x+key_im.size[0],y,x+w+key_im.size[0],y+h),outline=(255,255,255)) mstart = old_center mend = (old_center[0]+self.recent_good_motion[iface][0], old_center[1]+self.recent_good_motion[iface][1]) draw.rectangle((mstart[0]-1,mstart[1]-1,mstart[0]+1,mstart[1]+1), outline=(255,255,255)) draw.rectangle((mend[0]-1,mend[1]-1,mend[0]+1,mend[1]+1), outline=(0,255,0)) draw.line(mstart+mend, fill=(255,255,255)) for (x,y) in key_im.kp2d : draw_x(draw, (x,y), (1,1), (255,0,0)) for (x,y) in ia.kp2d: draw_x(draw, (x+key_im.size[0],y), (1,1), (255,0,0)) if self.seq > 0 : if ia.matches: for ((m1,m2), score) in zip(ia.matches,ia.desc_diffs) : if score > self.desc_diff_thresh : color = (255,0,0) else : color = (0,255,0) draw.line((key_im.kp2d[m1][0], key_im.kp2d[m1][1], ia.kp2d[m2][0]+key_im.size[0], ia.kp2d[m2][1]), fill=color) for (i, (u,v)) in enumerate(sparse_pred_list_2d) : bscale = min(1,probs[i]/0.01) draw_x(draw, (u,v), (1,1), (128.0+128.0*bscale,128.0+128.0*bscale,(1.0-bscale)*255.0)) draw_x(draw, (u+key_im.size[0],v), (1,1),(128.0+128.0*bscale,128.0+128.0*bscale,(1.0-bscale)*255.0)) ####### PUBLISH 3d visualization point cloud ################### if self.usebag and self.visualize: cloud = PointCloud() cloud.header.frame_id = "stereo" cloud.header.stamp = imarray.header.stamp cloud.pts = [] cloud.pts.append(Point()) (cloud.pts[0].x, cloud.pts[0].y, cloud.pts[0].z) = self.face_centers_3d[iface][:3] for (i,kp3d) in enumerate(ia.kp3d): cloud.pts.append(Point()) (cloud.pts[i].x,cloud.pts[i].y,cloud.pts[i].z) = kp3d lp = len(cloud.pts) if self.seq > 0: for (i, (u,v)) in enumerate(sparse_pred_list): cloud.pts[lp+i].append(Point()) (cloud.pts[lp+i].x,cloud.pts[lp+i].y,cloud.pts[lp+i].z) = sparse_pred_list[i][:3] self.pub.publish(cloud) bigim_py.save("/tmp/tiff/feats%06d_%03d.tiff" % (self.seq, iface)) #END DRAWING # END FACE LOOP self.seq += 1
def frame(self, imarray): # No calibration params yet. if not self.vo: return if self.seq > 10000: sys.exit() if DEBUG: print "" print "" print "Frame ", self.seq print "" print "" im = imarray.images[1] im_r = imarray.images[0] if im.colorspace == "mono8": im_py = Image.fromstring("L", (im.width, im.height), im.data) im_r_py = Image.fromstring("L", (im_r.width, im_r.height), im_r.data) elif im.colorspace == "rgb24": use_color = True im_col_py = Image.fromstring("RGB", (im.width, im.height), im.data) im_py = im_col_py.convert("L") im_r_py = Image.fromstring("RGB", (im_r.width, im_r.height), im_r.data) im_r_py = im_r_py.convert("L") else: print "Unknown colorspace" return # Detect faces on the first frame if not self.current_keyframes: self.faces = self.p.detectAllFaces(im_py.tostring(), im.width, im.height, self.cascade_file, 1.0, None, None, True) if DEBUG: print "Faces ", self.faces sparse_pred_list = [] sparse_pred_list_2d = [] old_rect = [0, 0, 0, 0] ia = SparseStereoFrame(im_py, im_r_py) ia.matches = [] ia.desc_diffs = [] ia.good_matches = [] # Track each face iface = -1 for face in self.faces: iface += 1 (x, y, w, h) = copy.copy(self.faces[iface]) if DEBUG: print "A face ", (x, y, w, h) (old_center, old_diff) = self.rect_to_center_diff((x, y, w, h)) if self.face_centers_3d and iface < len(self.face_centers_3d): censize3d = list(copy.copy(self.face_centers_3d[iface])) censize3d.append(2.0 * self.real_face_sizes_3d[iface]) ###ZMULT self.get_features(ia, self.num_feats, (x, y, w, h), censize3d) else: self.get_features(ia, self.num_feats, (x, y, w, h), (0.0, 0.0, 0.0, 1000000.0)) if not ia.kp2d: continue # First frame: if len(self.current_keyframes) < iface + 1: (cen, diff) = self.rect_to_center_diff((x, y, w, h)) cen3d = self.cam.pix2cam(cen[0], cen[1], ia.avgd) cen3d = list(cen3d) ltf = self.cam.pix2cam(x, y, ia.avgd) rbf = self.cam.pix2cam(x + w, y + h, ia.avgd) fs3d = ((rbf[0] - ltf[0]) + (rbf[1] - ltf[1])) / 4.0 # This assumes that we're tracking the face plane center, not the center of the head sphere. # If you want to track the center of the sphere instead, do: cen3d[2] += fs3d # Check that the face is a reasonable size. If not, skip this face. if 2 * fs3d < self.min_real_face_size or 2 * fs3d > self.max_real_face_size or iface > 1: #HACK: ONLY ALLOW ONE FACE self.faces.pop(iface) iface -= 1 continue if DESCRIPTOR == 'CALONDER': self.vo.collect_descriptors(ia) elif DESCRIPTOR == 'SAD': self.vo.collect_descriptors_sad(ia) else: pass self.current_keyframes.append(0) self.keyframes.append(copy.copy(ia)) self.feats_to_centers.append( self.make_face_model(cen, diff, ia.kp2d)) self.real_face_sizes_3d.append(copy.deepcopy(fs3d)) self.feats_to_centers_3d.append( self.make_face_model(cen3d, (fs3d, fs3d, fs3d), ia.kp3d)) self.face_centers_3d.append(copy.deepcopy(cen3d)) self.recent_good_frames.append(copy.copy(ia)) self.recent_good_rects.append(copy.deepcopy([x, y, w, h])) self.recent_good_centers_3d.append(copy.deepcopy(cen3d)) self.recent_good_motion.append([0.0] * 3) #dx,dy,dimfacesize self.recent_good_motion_3d.append([0.0] * 3) self.same_key_rgfs.append(True) if DEBUG: print "cen2d", cen print "cen3d", self.face_centers_3d[iface] # End first frame # Later frames else: if DESCRIPTOR == 'CALONDER': self.vo.collect_descriptors(ia) elif DESCRIPTOR == 'SAD': self.vo.collect_descriptors_sad(ia) else: pass done_matching = False bad_frame = False while not done_matching: # Try matching to the keyframe keyframe = self.keyframes[self.current_keyframes[iface]] temp_match = self.vo.temporal_match(ia, keyframe, want_distances=True) ia.matches = [(m2, m1) for (m1, m2, m3) in temp_match] ia.desc_diffs = [m3 for (m1, m2, m3) in temp_match] print "temp matches", temp_match ia.good_matches = [ s < self.desc_diff_thresh for s in ia.desc_diffs ] n_good_matches = len([ m for m in ia.desc_diffs if m < self.desc_diff_thresh ]) if DEBUG: if len(keyframe.kp) < 2: print "Keyframe has less than 2 kps" if n_good_matches < len(keyframe.kp) / 2.0: print "ngoodmatches, len key.kp, len key.kp/2", n_good_matches, len( keyframe.kp), len(keyframe.kp) / 2.0 # Not enough matches, get a new keyframe if len(keyframe.kp) < 2 or n_good_matches < len( keyframe.kp) / 2.0: if DEBUG: print "New keyframe" # Make a new face model, either from a recent good frame, or from the current image if not self.same_key_rgfs[iface]: if DEBUG: print "centers at beginning of new keyframe" print "cen2d", [ self.faces[iface][0] + self.faces[iface][2] / 2.0, self.faces[iface][1] + self.faces[iface][3] / 2.0 ] print "cen3d", self.face_centers_3d[iface] matched_z_list = [ kp3d[2] for (kp3d, is_good) in zip( self.recent_good_frames[iface].kp3d, self. recent_good_frames[iface].good_matches) if is_good ] if len(matched_z_list) == 0: matched_z_list = [ kp3d[2] for kp3d in self.recent_good_frames[iface].kp3d ] avgz_goodmatches = sum(matched_z_list) / len( matched_z_list) tokeep = [ math.fabs( self.recent_good_frames[iface].kp3d[i][2] - avgz_goodmatches) < 2.0 * self.real_face_sizes_3d[iface] for i in range( len(self.recent_good_frames[iface].kp3d)) ] kp3d_for_model = [ kp3d for (kp3d, tk) in zip( self.recent_good_frames[iface].kp3d, tokeep) if tk ] kp_for_model = [ kp for (kp, tk) in zip( self.recent_good_frames[iface].kp, tokeep) if tk ] # If you're not left with enough points, just take all of them and don't worry about the depth constraints. if len(kp3d_for_model) < 2: kp3d_for_model = copy.deepcopy( self.recent_good_frames[iface].kp3d) kp_for_model = copy.deepcopy( self.recent_good_frames[iface].kp) (cen, diff) = self.rect_to_center_diff( self.recent_good_rects[iface]) self.feats_to_centers[ iface] = self.make_face_model( cen, diff, [(kp0, kp1) for (kp0, kp1, kp2) in kp_for_model]) cen3d = self.recent_good_centers_3d[iface] self.feats_to_centers_3d[ iface] = self.make_face_model( cen3d, [self.real_face_sizes_3d[iface]] * 3, kp3d_for_model) self.keyframes[ self.current_keyframes[iface]] = copy.copy( self.recent_good_frames[iface]) self.keyframes[self.current_keyframes[ iface]].kp = kp_for_model self.keyframes[ self.current_keyframes[iface]].kp2d = [ (k0, k1) for (k0, k1, k2) in kp_for_model ] self.keyframes[self.current_keyframes[ iface]].kp3d = kp3d_for_model self.keyframes[ self.current_keyframes[iface]].matches = [ (i, i) for i in range(len(kp_for_model)) ] self.keyframes[ self.current_keyframes[iface]].good_matches = [ True ] * len(kp_for_model) self.keyframes[self.current_keyframes[ iface]].desc_diffs = [0] * len(kp_for_model) if DESCRIPTOR == 'CALONDER': self.vo.collect_descriptors(self.keyframes[ self.current_keyframes[iface]]) elif DESCRIPTOR == 'SAD': self.vo.collect_descriptors_sad(self.keyframes[ self.current_keyframes[iface]]) else: pass self.face_centers_3d[iface] = copy.deepcopy(cen3d) # Not changing the face size self.current_keyframes[ iface] = 0 #### HACK: ONLY ONE KEYFRAME!!! self.same_key_rgfs[iface] = True # Don't need to change the recent good frame yet. if DEBUG: print "centers at end of new keyframe" print "cen2d", [ self.faces[iface][0] + self.faces[iface][2] / 2.0, self.faces[iface][1] + self.faces[iface][3] / 2.0 ] print "cen3d", self.face_centers_3d[iface] else: # Making a new model off of the current frame but with the predicted new position. # HACK: The displacement computation assumes that the robot/head is still, fix this. bad_frame = True #done_matching = True if DEBUG: print "Bad frame ", self.seq, " for face ", iface (cen, diff) = self.rect_to_center_diff( self.faces[iface]) if DEBUG: print "Motion for bad frame ", self.recent_good_motion[ iface], self.recent_good_motion_3d[iface] new_cen = [ cen[0] + self.recent_good_motion[iface][0], cen[1] + self.recent_good_motion[iface][1] ] diff = [ diff[0] + self.recent_good_motion[iface][2], diff[1] + self.recent_good_motion[iface][2] ] self.faces[iface] = (new_cen[0] - diff[0], new_cen[1] - diff[1], 2.0 * diff[0], 2.0 * diff[1]) (x, y, w, h) = copy.deepcopy(self.faces[iface]) pred_cen_3d = [ o + n for (o, n) in zip( self.face_centers_3d[iface], self.recent_good_motion_3d[iface]) ] pred_cen_3d.append( 2.0 * self.real_face_sizes_3d[iface]) #### ZMULT self.get_features(ia, self.num_feats, (x, y, w, h), pred_cen_3d) if not ia.kp2d: break if DESCRIPTOR == 'CALONDER': self.vo.collect_descriptors(ia) elif DESCRIPTOR == 'SAD': self.vo.collect_descriptors_sad(ia) else: pass self.keyframes[ self.current_keyframes[iface]] = copy.copy(ia) self.current_keyframes[iface] = 0 (cen, diff) = self.rect_to_center_diff( self.faces[iface]) self.feats_to_centers[ iface] = self.make_face_model( cen, diff, ia.kp2d) self.feats_to_centers_3d[ iface] = self.make_face_model([ pred_cen_3d[0], pred_cen_3d[1], pred_cen_3d[2] ], [self.real_face_sizes_3d[iface]] * 3, ia.kp3d) self.face_centers_3d[iface] = copy.deepcopy( pred_cen_3d) self.same_key_rgfs[iface] = True # Good matches, mark this frame as good else: done_matching = True # END MATCHING # If we got enough matches for this frame, track. if ia.kp and ia.kp2d: # Track sparse_pred_list = [] sparse_pred_list_2d = [] probs = [] bandwidths = [] size_mult = 0.05 #1.0 for ((match1, match2), score) in zip(ia.matches, ia.desc_diffs): if score < self.desc_diff_thresh: sparse_pred_list.append([ ia.kp3d[match2][i] + self.feats_to_centers_3d[iface][match1][i] for i in range(3) ]) sparse_pred_list_2d.append([ ia.kp2d[match2][i] + self.feats_to_centers[iface][match1][i] for i in range(2) ]) #probs.append(score) probs = [1.0] * len( sparse_pred_list_2d ) # Ignore actual match scores. Uncomment line above to use the match scores. bandwidths = [size_mult * self.real_face_sizes_3d[iface] ] * len(sparse_pred_list_2d) (old_center, old_diff) = self.rect_to_center_diff(self.faces[iface]) if DEBUG: print "Old center 3d ", self.face_centers_3d[iface] print "Old center 2d ", old_center old_rect = self.faces[iface] # For display only new_center = self.mean_shift_sparse( self.face_centers_3d[iface][0:3], sparse_pred_list, probs, bandwidths, 10, 1.0) new_center_2d = self.cam.cam2pix(new_center[0], new_center[1], new_center[2]) # The above line assumes that we're tracking the face plane center, not the center of the head sphere. # If you want to track the center of the sphere instead, subtract self.real_face_sizes[iface] from the z-coord. ltf = self.cam.cam2pix( new_center[0] - self.real_face_sizes_3d[iface], new_center[1] - self.real_face_sizes_3d[iface], new_center[2]) rbf = self.cam.cam2pix( new_center[0] + self.real_face_sizes_3d[iface], new_center[1] + self.real_face_sizes_3d[iface], new_center[2]) w = rbf[0] - ltf[0] h = rbf[1] - ltf[1] if DEBUG: print "new center 3d ", new_center print "new_center 2d ", new_center_2d (nx, ny, nw, nh) = (new_center_2d[0] - (w - 1) / 2.0, new_center_2d[1] - (h - 1) / 2.0, w, h) # Force the window back into the image. nx += max(0, 0 - nx) + min(0, im.width - nx + nw) ny += max(0, 0 - ny) + min(0, im.height - ny + nh) self.faces[iface] = [nx, ny, nw, nh] self.recent_good_rects[iface] = [nx, ny, nw, nh] self.recent_good_centers_3d[iface] = copy.deepcopy( new_center) if bad_frame: self.recent_good_motion[ iface] = self.recent_good_motion[iface] self.recent_good_motion_3d[ iface] = self.recent_good_motion_3d[iface] else: self.recent_good_motion[iface] = [ new_center_2d[0] - old_center[0], new_center_2d[1] - old_center[1], ((nw - 1.0) / 2.0) - old_diff[0] ] self.recent_good_motion_3d[iface] = [ new_center[i] - self.face_centers_3d[iface][i] for i in range(len(new_center)) ] self.face_centers_3d[iface] = copy.deepcopy(new_center) self.recent_good_frames[iface] = copy.copy(ia) self.same_key_rgfs[iface] = False if DEBUG: print "motion ", self.recent_good_motion[ iface], self.recent_good_motion_3d[iface] print "face 2d ", self.faces[iface] print "face center 3d ", self.face_centers_3d[iface] # Output the location of this face center in the 3D camera frame (of the left camera), and rotate # the coordinates to match the robot's idea of the 3D camera frame. center_uvd = (nx + (nw - 1) / 2.0, ny + (nh - 1) / 2.0, (numpy.average(ia.kp, 0))[2]) center_camXYZ = self.cam.pix2cam(center_uvd[0], center_uvd[1], center_uvd[2]) center_robXYZ = (center_camXYZ[2], -center_camXYZ[0], -center_camXYZ[1]) ########### PUBLISH the face center for the head controller to track. ######## if not self.usebag: #stamped_point = PointStamped() #(stamped_point.point.x, stamped_point.point.y, stamped_point.point.z) = center_robXYZ #stamped_point.header.frame_id = "stereo" #stamped_point.header.stamp = imarray.header.stamp #self.pub.publish(stamped_point) pm = PositionMeasurement() pm.header.stamp = imarray.header.stamp pm.name = "stereo_face_feature_tracker" pm.object_id = -1 (pm.pos.x, pm.pos.y, pm.pos.z) = center_robXYZ pm.header.frame_id = "stereo_link" pm.reliability = 0.5 pm.initialization = 0 #pm.covariance self.pub.publish(pm) # End later frames ############ DRAWING ################ if SAVE_PICS: if not self.keyframes or len(self.keyframes) <= iface: bigim_py = im_py draw = ImageDraw.Draw(bigim_py) else: key_im = self.keyframes[self.current_keyframes[iface]] keyim_py = Image.fromstring("L", key_im.size, key_im.rawdata) bigim_py = Image.new( "RGB", (im_py.size[0] + key_im.size[0], im_py.size[1])) bigim_py.paste(keyim_py.convert("RGB"), (0, 0)) bigim_py.paste(im_py, (key_im.size[0] + 1, 0)) draw = ImageDraw.Draw(bigim_py) (x, y, w, h) = self.faces[iface] draw.rectangle((x, y, x + w, y + h), outline=(0, 255, 0)) draw.rectangle( (x + key_im.size[0], y, x + w + key_im.size[0], y + h), outline=(0, 255, 0)) (x, y, w, h) = old_rect draw.rectangle((x, y, x + w, y + h), outline=(255, 255, 255)) draw.rectangle( (x + key_im.size[0], y, x + w + key_im.size[0], y + h), outline=(255, 255, 255)) mstart = old_center mend = (old_center[0] + self.recent_good_motion[iface][0], old_center[1] + self.recent_good_motion[iface][1]) draw.rectangle((mstart[0] - 1, mstart[1] - 1, mstart[0] + 1, mstart[1] + 1), outline=(255, 255, 255)) draw.rectangle( (mend[0] - 1, mend[1] - 1, mend[0] + 1, mend[1] + 1), outline=(0, 255, 0)) draw.line(mstart + mend, fill=(255, 255, 255)) for (x, y) in key_im.kp2d: draw_x(draw, (x, y), (1, 1), (255, 0, 0)) for (x, y) in ia.kp2d: draw_x(draw, (x + key_im.size[0], y), (1, 1), (255, 0, 0)) if self.seq > 0: if ia.matches: for ((m1, m2), score) in zip(ia.matches, ia.desc_diffs): if score > self.desc_diff_thresh: color = (255, 0, 0) else: color = (0, 255, 0) draw.line( (key_im.kp2d[m1][0], key_im.kp2d[m1][1], ia.kp2d[m2][0] + key_im.size[0], ia.kp2d[m2][1]), fill=color) for (i, (u, v)) in enumerate(sparse_pred_list_2d): bscale = min(1, probs[i] / 0.01) draw_x(draw, (u, v), (1, 1), (128.0 + 128.0 * bscale, 128.0 + 128.0 * bscale, (1.0 - bscale) * 255.0)) draw_x(draw, (u + key_im.size[0], v), (1, 1), (128.0 + 128.0 * bscale, 128.0 + 128.0 * bscale, (1.0 - bscale) * 255.0)) ####### PUBLISH 3d visualization point cloud ################### if self.usebag and self.visualize: cloud = PointCloud() cloud.header.frame_id = "stereo" cloud.header.stamp = imarray.header.stamp cloud.pts = [] cloud.pts.append(Point()) (cloud.pts[0].x, cloud.pts[0].y, cloud.pts[0].z) = self.face_centers_3d[iface][:3] for (i, kp3d) in enumerate(ia.kp3d): cloud.pts.append(Point()) (cloud.pts[i].x, cloud.pts[i].y, cloud.pts[i].z) = kp3d lp = len(cloud.pts) if self.seq > 0: for (i, (u, v)) in enumerate(sparse_pred_list): cloud.pts[lp + i].append(Point()) (cloud.pts[lp + i].x, cloud.pts[lp + i].y, cloud.pts[lp + i].z) = sparse_pred_list[i][:3] self.pub.publish(cloud) bigim_py.save("/tmp/tiff/feats%06d_%03d.tiff" % (self.seq, iface)) #END DRAWING # END FACE LOOP self.seq += 1
def frame(self, imarray): # No calibration params yet. if not self.vo: return if self.seq > 10000: sys.exit() if DEBUG: print "" print "" print "Frame ", self.seq print "" print "" im = imarray.images[1] im_r = imarray.images[0] if im.colorspace == "mono8": im_py = Image.fromstring("L", (im.width, im.height), im.data) im_r_py = Image.fromstring("L", (im_r.width, im_r.height), im_r.data) elif im.colorspace == "rgb24": use_color = True im_col_py = Image.fromstring("RGB", (im.width, im.height), im.data) im_py = im_col_py.convert("L") im_r_py = Image.fromstring("RGB", (im_r.width, im_r.height), im_r.data) im_r_py = im_r_py.convert("L") else : print "Unknown colorspace" return # Detect faces on the first frame if not self.current_keyframes : self.faces = self.p.detectAllFaces(im_py.tostring(), im.width, im.height, self.cascade_file, 1.0, None, None, True) if DEBUG: print "Faces ", self.faces sparse_pred_list = [] sparse_pred_list_2d = [] old_rect = [0,0,0,0] ia = SparseStereoFrame(im_py,im_r_py) ia.matches = [] ia.desc_diffs = [] ia.good_matches = [] # Track each face iface = -1 for face in self.faces: iface += 1 (x,y,w,h) = copy.copy(self.faces[iface]) if DEBUG: print "A face ", (x,y,w,h) (old_center, old_diff) = self.rect_to_center_diff((x,y,w,h)) if self.face_centers_3d and iface<len(self.face_centers_3d): censize3d = list(copy.copy(self.face_centers_3d[iface])) censize3d.append(1.0*self.real_face_sizes_3d[iface]) ###ZMULT self.get_features(ia, self.num_feats, (x,y,w,h), censize3d) else: self.get_features(ia, self.num_feats, (x, y, w, h), (0.0,0.0,0.0,1000000.0)) if not ia.kp2d: continue # First frame: if len(self.current_keyframes) < iface+1: (cen,diff) = self.rect_to_center_diff((x,y,w,h)) cen3d = self.cam.pix2cam(cen[0],cen[1],ia.avgd) ltf = self.cam.pix2cam(x,y,ia.avgd) rbf = self.cam.pix2cam(x+w,y+h,ia.avgd) fs3d = ( (rbf[0]-ltf[0]) + (rbf[1]-ltf[1]) )/4.0 # Check that the face is a reasonable size. If not, skip this face. if 2*fs3d < self.min_real_face_size or 2*fs3d > self.max_real_face_size or iface > 1: #HACK: ONLY ALLOW ONE FACE self.faces.pop(iface) iface -= 1 continue if DESCRIPTOR=='CALONDER': self.vo.collect_descriptors(ia) elif DESCRIPTOR=='SAD': self.vo.collect_descriptors_sad(ia) else: pass self.current_keyframes.append(0) self.keyframes.append(copy.copy(ia)) self.feats_to_centers.append(self.make_face_model( cen, diff, ia.kp2d )) self.real_face_sizes_3d.append( copy.deepcopy(fs3d) ) self.feats_to_centers_3d.append( self.make_face_model( cen3d, (fs3d,fs3d,fs3d), ia.kp3d) ) self.face_centers_3d.append( copy.deepcopy(cen3d) ) self.recent_good_frames.append(copy.copy(ia)) self.recent_good_rects.append(copy.deepcopy([x,y,w,h])) self.recent_good_motion.append([0.0]*3) #dx,dy,dimfacesize self.recent_good_motion_3d.append([0.0]*3) self.same_key_rgfs.append(True) # End first frame # Later frames else : if DESCRIPTOR=='CALONDER': self.vo.collect_descriptors(ia) elif DESCRIPTOR=='SAD': self.vo.collect_descriptors_sad(ia) else: pass done_matching = False bad_frame = False while not done_matching: # Try matching to the keyframe keyframe = self.keyframes[self.current_keyframes[iface]] temp_match = self.vo.temporal_match(ia,keyframe,want_distances=True) ia.matches = [(m2,m1) for (m1,m2,m3) in temp_match] ia.desc_diffs = [m3 for (m1,m2,m3) in temp_match] print "Scores", ia.desc_diffs #ia.matches = self.vo.temporal_match(keyframe,ia,want_distances=True) #ia.desc_diffs = [(VO.sad(keyframe.descriptors[a], ia.descriptors[b])) for (a,b) in ia.matches] ia.good_matches = [s < self.desc_diff_thresh for s in ia.desc_diffs] n_good_matches = len([m for m in ia.desc_diffs if m < self.desc_diff_thresh]) # Not enough matches, get a new keyframe if len(keyframe.kp)<2 or n_good_matches < len(keyframe.kp)/2.0 : if DEBUG: print "New keyframe" # Make a new face model, either from a recent good frame, or from the current image if not self.same_key_rgfs[iface] : matched_z_list = [tz for ((tx,ty,tz),is_good) in zip(self.recent_good_frames[iface].kp,self.recent_good_frames[iface].good_matches) if is_good] if len(matched_z_list) == 0: matched_z_list = [tz for (tx,ty,tz) in self.recent_good_frames[iface].kp] avgd_goodmatches = sum(matched_z_list)/ len(matched_z_list) avg3d_goodmatches = self.cam.pix2cam(0.0,0.0,avgd_goodmatches) kp3d = [self.cam.pix2cam(kp[0],kp[1],kp[2]) for kp in self.recent_good_frames[iface].kp] print "kp ", self.recent_good_frames[iface].kp print "kp3d ",kp3d print avg3d_goodmatches kp3d_for_model = [this_kp3d for this_kp3d in kp3d if math.fabs(this_kp3d[2]-avg3d_goodmatches[2]) < 2.0*self.real_face_sizes_3d[iface] ] kp_for_model = [this_kp for (this_kp, this_kp3d) in zip(self.recent_good_frames[iface].kp, kp3d) if math.fabs(this_kp3d[2]-avg3d_goodmatches[2]) < 2.0*self.real_face_sizes_3d[iface] ] # If you're not left with enough points, just take all of them and don't worry about the depth constraints. if len(kp3d_for_model) < 2: kp3d_for_model = kp3d kp_for_model = copy.deepcopy(self.recent_good_frames[iface].kp) (cen, diff) = self.rect_to_center_diff(self.recent_good_rects[iface]) self.feats_to_centers[iface] = self.make_face_model( cen, diff, [(kp0,kp1) for (kp0,kp1,kp2) in kp_for_model]) avgd = sum([kp2 for (kp0,kp1,kp2) in kp_for_model])/len(kp_for_model) cen3d = self.cam.pix2cam(cen[0],cen[1],avgd) self.feats_to_centers_3d[iface] = self.make_face_model( cen3d, [self.real_face_sizes_3d[iface]]*3, kp3d_for_model) self.keyframes[self.current_keyframes[iface]] = copy.copy(self.recent_good_frames[iface]) self.keyframes[self.current_keyframes[iface]].kp = kp_for_model self.keyframes[self.current_keyframes[iface]].kp2d = [(k0,k1) for (k0,k1,k2) in kp_for_model] self.keyframes[self.current_keyframes[iface]].kp3d = kp3d_for_model self.keyframes[self.current_keyframes[iface]].matches = [(i,i) for i in range(len(kp_for_model))] self.keyframes[self.current_keyframes[iface]].good_matches = [True]*len(kp_for_model) self.keyframes[self.current_keyframes[iface]].desc_diffs = [0]*len(kp_for_model) self.face_centers_3d[iface] = copy.deepcopy(cen3d) # Not changing the face size self.current_keyframes[iface] = 0 #### HACK: ONLY ONE KEYFRAME!!! self.same_key_rgfs[iface] = True # Don't need to change the recent good frame yet. else : # Making a new model off of the current frame but with the predicted new position. # HACK: The displacement computation assumes that the robot/head is still, fix this. bad_frame = True #done_matching = True if DEBUG: print "Bad frame ", self.seq, " for face ", iface (cen,diff) = self.rect_to_center_diff(self.faces[iface]) if DEBUG: print "Motion for bad frame ", self.recent_good_motion[iface], self.recent_good_motion_3d[iface] new_cen = [cen[0]+self.recent_good_motion[iface][0], cen[1]+self.recent_good_motion[iface][1]] diff = [diff[0]+self.recent_good_motion[iface][2], diff[1]+self.recent_good_motion[iface][2]] self.faces[iface] = (new_cen[0]-diff[0], new_cen[1]-diff[1], 2.0*diff[0], 2.0*diff[1]) (x,y,w,h) = copy.deepcopy(self.faces[iface]) pred_cen_3d = [o+n for (o,n) in zip(self.face_centers_3d[iface],self.recent_good_motion_3d[iface])] pred_cen_3d.append(1.0*self.real_face_sizes_3d[iface]) #### ZMULT self.get_features(ia, self.num_feats, (x,y,w,h), pred_cen_3d) if not ia.kp2d: break if DESCRIPTOR=='CALONDER': self.vo.collect_descriptors(ia) elif DESCRIPTOR=='SAD': self.vo.collect_descriptors_sad(ia) else: pass self.keyframes[self.current_keyframes[iface]] = copy.copy(ia) self.current_keyframes[iface] = 0 (cen,diff) = self.rect_to_center_diff(self.faces[iface]) self.feats_to_centers[iface] = self.make_face_model( cen, diff, ia.kp2d ) cen3d = self.cam.pix2cam(cen[0],cen[1],ia.avgd) self.feats_to_centers_3d[iface] = self.make_face_model( cen3d, [self.real_face_sizes_3d[iface]]*3, ia.kp3d) self.face_centers_3d[iface] = copy.deepcopy(cen3d) self.same_key_rgfs[iface] = True # Good matches, mark this frame as good else: done_matching = True # END MATCHING # If we got enough matches for this frame, track. if ia.kp and ia.kp2d: # Track sparse_pred_list = [] sparse_pred_list_2d = [] probs = [] bandwidths = [] size_mult = 1.0 for ((match1, match2), score) in zip(ia.matches, ia.desc_diffs): if score < self.desc_diff_thresh: kp3d = self.cam.pix2cam(ia.kp[match2][0],ia.kp[match2][1],ia.kp[match2][2]) sparse_pred_list.append( (kp3d[0]+self.feats_to_centers_3d[iface][match1][0], kp3d[1]+self.feats_to_centers_3d[iface][match1][1], kp3d[2]+self.feats_to_centers_3d[iface][match1][2]) ) sparse_pred_list_2d.append( (ia.kp2d[match2][0]+self.feats_to_centers[iface][match1][0], ia.kp2d[match2][1]+self.feats_to_centers[iface][match1][1]) ) probs = [1.0] * len(sparse_pred_list_2d) bandwidths = [size_mult*self.real_face_sizes_3d[iface]] * len(sparse_pred_list_2d) if DEBUG: print "Old center 3d ", self.face_centers_3d[iface] print "Old center 2d ",(x+(w-1)/2.0, y+(h-1)/2.0) old_rect = self.faces[iface] (old_center, old_diff) = self.rect_to_center_diff(old_rect) new_center = self.mean_shift_sparse( self.face_centers_3d[iface], sparse_pred_list, probs, bandwidths, 10, 5.0 ) new_center_2d = self.cam.cam2pix(new_center[0], new_center[1], new_center[2]) ltf = self.cam.cam2pix( new_center[0]-self.real_face_sizes_3d[iface], new_center[1]-self.real_face_sizes_3d[iface], new_center[2]) rbf = self.cam.cam2pix( new_center[0]+self.real_face_sizes_3d[iface], new_center[1]+self.real_face_sizes_3d[iface], new_center[2]) w = rbf[0]-ltf[0] h = rbf[1]-ltf[1] if DEBUG: print "new center 3d ", new_center print "new_center 2d ", new_center_2d (nx,ny,nw,nh) = (new_center_2d[0]-(w-1)/2.0, new_center_2d[1]-(h-1)/2.0, w, h) # Force the window back into the image. dx = max(0,0-nx) + min(0, im.width - nx+nw) dy = max(0,0-ny) + min(0, im.height - ny+nh) nx += dx ny += dy self.faces[iface] = [nx, ny, nw, nh] self.recent_good_rects[iface] = [nx,ny,nw,nh] if bad_frame: self.recent_good_motion[iface] = self.recent_good_motion[iface] self.recent_good_motion_3d[iface] = self.recent_good_motion_3d[iface] else: self.recent_good_motion[iface] = [new_center_2d[0]-old_center[0], new_center_2d[1]-old_center[1], ((nw-1.0)/2.0)-old_diff[0]] self.recent_good_motion_3d[iface] = [ new_center[i]-self.face_centers_3d[iface][i] for i in range(len(new_center))] self.face_centers_3d[iface] = copy.deepcopy(new_center) self.recent_good_frames[iface] = copy.copy(ia) self.same_key_rgfs[iface] = False if DEBUG: print "motion ", self.recent_good_motion[iface] print "face 2d ", self.faces[iface] print "face center 3d ", self.face_centers_3d[iface] # Output the location of this face center in the 3D camera frame (of the left camera), and rotate # the coordinates to match the robot's idea of the 3D camera frame. center_uvd = (nx + (nw-1)/2.0, ny + (nh-1)/2.0, (numpy.average(ia.kp,0))[2] ) center_camXYZ = self.cam.pix2cam(center_uvd[0], center_uvd[1], center_uvd[2]) center_robXYZ = (center_camXYZ[2], -center_camXYZ[0], -center_camXYZ[1]) ########### PUBLISH the face center for the head controller to track. ######## if not self.usebag: stamped_point = PointStamped() (stamped_point.point.x, stamped_point.point.y, stamped_point.point.z) = center_robXYZ stamped_point.header.frame_id = "stereo" stamped_point.header.stamp = imarray.header.stamp self.pub.publish(stamped_point) # End later frames ############ DRAWING ################ if SAVE_PICS: if not self.keyframes or len(self.keyframes) <= iface : bigim_py = im_py draw = ImageDraw.Draw(bigim_py) else : key_im = self.keyframes[self.current_keyframes[iface]] keyim_py = Image.fromstring("L", key_im.size, key_im.rawdata) bigim_py = Image.new("RGB",(im_py.size[0]+key_im.size[0], im_py.size[1])) bigim_py.paste(keyim_py.convert("RGB"),(0,0)) bigim_py.paste(im_py,(key_im.size[0]+1,0)) draw = ImageDraw.Draw(bigim_py) (x,y,w,h) = self.faces[iface] draw.rectangle((x,y,x+w,y+h),outline=(0,255,0)) draw.rectangle((x+key_im.size[0],y,x+w+key_im.size[0],y+h),outline=(0,255,0)) (x,y,w,h) = old_rect draw.rectangle((x,y,x+w,y+h),outline=(255,255,255)) draw.rectangle((x+key_im.size[0],y,x+w+key_im.size[0],y+h),outline=(255,255,255)) mstart = old_center mend = (old_center[0]+self.recent_good_motion[iface][0], old_center[1]+self.recent_good_motion[iface][1]) draw.rectangle((mstart[0]-1,mstart[1]-1,mstart[0]+1,mstart[1]+1), outline=(255,255,255)) draw.rectangle((mend[0]-1,mend[1]-1,mend[0]+1,mend[1]+1), outline=(0,255,0)) draw.line(mstart+mend, fill=(255,255,255)) for (x,y) in key_im.kp2d : draw_x(draw, (x,y), (1,1), (255,0,0)) for (x,y) in ia.kp2d: draw_x(draw, (x+key_im.size[0],y), (1,1), (255,0,0)) if self.seq > 0 : for (x,y) in sparse_pred_list_2d : draw_x(draw, (x,y), (1,1), (0,0,255)) draw_x(draw, (x+key_im.size[0],y), (1,1), (0,0,255)) if ia.matches: for ((m1,m2), score) in zip(ia.matches,ia.desc_diffs) : if score > self.desc_diff_thresh : color = (255,0,0) else : color = (0,255,0) draw.line((key_im.kp2d[m1][0], key_im.kp2d[m1][1], ia.kp2d[m2][0]+key_im.size[0], ia.kp2d[m2][1]), fill=color) bigim_py.save("/tmp/tiff/feats%06d_%03d.tiff" % (self.seq, iface)) #END DRAWING # END FACE LOOP self.seq += 1