def handleTags(msg): global pub global lastDir global lastSeen width = msg.image_width height = msg.image_height biggest = Tag() for tag in msg.tags: if (tag.diameter > biggest.diameter): biggest = tag #To only rotate when tag not detected if biggest.diameter == 0: twist = Twist() twist.linear.x = 0 if (time() - lastSeen > .5): twist.angular.z = .75*lastDir pub.publish(twist) print twist # reset velocity and distance distance.get_velocity(0) yRot.get_velocity(0) return lastSeen = time() #Determines the co-ordinates of corners in the ar_image square cx = 0; cy = 0 for i in [0,2,4,6]: cx = cx + biggest.cwCorners[i] cy = cy + biggest.cwCorners[i+1] cx = cx / 4. / width cy = cy / 4. / height twist = Twist() # move foward and backward, trying to stop at stopping_dist stopping_dist = 2000. dist = (biggest.distance - stopping_dist) / stopping_dist print biggest.distance print dist dist_vel = distance.get_velocity(dist) if abs(dist) < 0.25: # if we are close enough to the stopping distance, just try to stop twist.linear.x = dist_vel * 0.5 twist.angular.z = 0 else: # otherwise try to move within stopping_dist twist.linear.x = dist * 0.5 twist.angular.z = 0 print twist.linear.x pub.publish(twist) twist.angular.z = (-(cx - .5)/.25)
def handleTags(msg): global pub global lastDir global lastSeen width = msg.image_width height = msg.image_height biggest = Tag() print Msg
def handleTags(msg): global pub global lastDir global lastSeen width = msg.image_width height = msg.image_height biggest = Tag() for tag in msg.tags: if (tag.diameter > biggest.diameter): biggest = tag
def handleTag(msg): global pub global lastDir global lastSeen width = msg.image_width height = msg.image_height toggle = toggle_cam() toggle.t_cam() print "this is camera", toggle.cam_channels #---------------------------------------------------------------------------------------------------------------------------- if toggle.cam_channels == 1: print "following the tag" biggest = Tag() for tag in msg.tags: if (tag.diameter > biggest.diameter): biggest = tag if biggest.diameter == 0: twist = Twist() twist.linear.x = 0 if (time() - lastSeen > .5): twist.angular.z = .5*lastDir pub.publish(twist) return lastSeen = time() cx = 0; cy = 0 for i in [0,2,4,6]: cx = cx + biggest.cwCorners[i] cy = cy + biggest.cwCorners[i+1] cx = cx / 4. / width cy = cy / 4. / height twist = Twist() if (biggest.distance - 500 > 8): twist.linear.x = ((biggest.distance - 500.) / 500.) * .25 if (twist.linear.x < 0): twist.linear.x = 0 twist.angular.z = (-(cx - .5)/.5) if (twist.angular.z < 0): lastDir = -1 else: lastDir = 1 pub.publish(twist) print twist
def handleTags(msg): global pub global lastDir global lastSeen width = msg.image_width height = msg.image_height biggest = Tag() for tag in msg.tags: if (tag.diameter > biggest.diameter): biggest = tag if biggest.diameter == 0: twist = Twist() twist.linear.x = 0 if (time() - lastSeen > .5): twist.angular.z = .5 * lastDir pub.publish(twist) return lastSeen = time() cx = 0 cy = 0 for i in [0, 2, 4, 6]: cx = cx + biggest.cwCorners[i] cy = cy + biggest.cwCorners[i + 1] cx = cx / 4. / width cy = cy / 4. / height twist = Twist() if (biggest.distance - 500 > 8): twist.linear.x = ((biggest.distance - 500.) / 500.) * .25 if (twist.linear.x < 0): twist.linear.x = 0 twist.angular.z = (-(cx - .5) / .5) if (twist.angular.z < 0): lastDir = -1 else: lastDir = 1 pub.publish(twist)
def ImageProcessor(self): """ Process each frame > detect left and right hand-gestures > perform mapping to {left token, right token}s > use Finite state machine to generate full instruction see more details in the paper: ieeexplore.ieee.org/document/8543168 """ # get the tokens (and the bounding boxes for vizualization) left_token, left_box, right_token, right_box, success_ = self.gest_rec.Get_gest(self.original, self.use_single_hand) print ("Hand gestures detection success: {2}. token: ({0}, {1})".format(right_token, left_token, success_)) if success_: # ROBO_GEST mode if self.robo_gest_mode: # reverse left and right since camera(left, right) == person(right, left) # then pass it to generate instruction get_token, done_ = self.ins.decode(right_token, left_token) print (get_token, done_) if done_: print print ("*** Decoded Instruction: {0}".format(get_token)) print # For Menue Selection only if self.menue_mode: #Notifies aquamenu that the gesture is waiting. msg = String() msg.data = 'WAITING' self.stat_pub.publish(msg) men_ins_, men_done_ = self.men_sel.decode(right_token, left_token) #print(men_ins_, men_done_) if men_done_: #Notifies aquamenu that the gesture is done. msg.data = 'DONE' self.stat_pub.publish(msg) print print ("Decoded Instruction: {0}".format(men_ins_)) print men_tok = men_ins_.split(' ') if (len(men_tok)>0 and men_tok[1] in self.menue_map.keys()): menue_selected = self.menue_map[men_tok[1]] msg = Tags() tag = Tag() tag.id = menue_selected msg.tags = [tag] self.tags_pub.publish(msg) print ('***** Menue selected :: {0}'.format(menue_selected)) print if (self.bench_test or self.publish_image): localised_objs = [(left_token, left_box), (right_token, right_box)] self.draw_boxes_and_labels(localised_objs) if self.bench_test: self.showFrame(self.original, 'test_viz') if self.publish_image: msg_frame = CvBridge().cv2_to_imgmsg(self.original, encoding="bgr8") self.ProcessedRaw.publish(msg_frame)
def ImageProcessor(self): """ Process each frame > detect left and right hand-gestures > perform mapping to {left token, right token}s > use Finite state machine to generate full instruction see more details in the paper: ieeexplore.ieee.org/document/8543168 """ # get the tokens (and the bounding boxes for vizualization) left_token, left_box, right_token, right_box, success_ = self.gest_rec.Get_gest( self.original, self.use_single_hand) print("Hand gestures detection success: {2}. token: ({0}, {1})".format( right_token, left_token, success_)) if success_: # ROBO_GEST mode if self.robo_gest_mode: # reverse left and right since camera(left, right) == person(right, left) # then pass it to generate instruction get_token, done_ = self.ins.decode(right_token, left_token) print(get_token, done_) if done_: print print("*** Decoded Instruction: {0}".format(get_token)) print # For Menue Selection only if self.menue_mode: men_ins_, men_done_ = self.men_sel.decode( right_token, left_token) #print(men_ins_, men_done_) if men_done_: print print("Decoded Instruction: {0}".format(men_ins_)) print men_tok = men_ins_.split(' ') if (len(men_tok) > 0 and men_tok[1] in self.menue_map.keys()): menue_selected = self.menue_map[men_tok[1]] msg = Tags() tag = Tag() tag.id = menue_selected msg.tags = [tag] self.tags_pub.publish(msg) print('***** Menue selected :: {0}'.format( menue_selected)) print if self.bench_test: self.showFrame(self.original, 'test_viz') if self.publish_image: if left_box != None: output_img = cv2.rectangle(self.original, (left_box[0], left_box[2]), (left_box[1], left_box[3]), (255, 0, 0), 2) else: output_img = self.original msg_frame = CvBridge().cv2_to_imgmsg(output_img, encoding="bgr8") self.ProcessedRaw.publish(msg_frame)
def handleTags(msg): global pub global lastDir global lastSeen width = msg.image_width height = msg.image_height biggest = Tag() for tag in msg.tags: if (tag.diameter > biggest.diameter): biggest = tag if biggest.diameter == 0: twist = Twist() twist.linear.x = 0 if (time() - lastSeen > .5): twist.angular.z = .5*lastDir pub.publish(twist) # reset velocity and distance distance.get_velocity(0) yRot.get_velocity(0) return lastSeen = time() cx = 0; cy = 0 for i in [0,2,4,6]: cx = cx + biggest.cwCorners[i] cy = cy + biggest.cwCorners[i+1] cx = cx / 4. / width cy = cy / 4. / height twist = Twist() # move foward and backward, trying to stop at stopping_dist stopping_dist = 2000. dist = (biggest.distance - stopping_dist) / stopping_dist dist_vel = distance.get_velocity(dist) if abs(dist) < 0.25: # if we are close enough to the stopping distance, just try to stop twist.linear.x = dist_vel * 0.25 else: # otherwise try to move within stopping_dist twist.linear.x = dist * 0.25 twist.linear.x = max(-0.05, min(0.05, twist.linear.x)) # try to face perpendicular to the tag yRot_velocity = yRot.get_velocity(biggest.yRot) if abs(biggest.yRot) < 0.5: # if we are mostly facing perpendicular, just try to stay still twist.linear.y = yRot_velocity * 0.25 else: # otherwise, rotate towards being in front of the tag twist.linear.y = biggest.yRot * 0.25 twist.linear.y = max(-0.05, min(0.05, twist.linear.y)) # rotate to face the tag twist.angular.z = (-(cx - .5)/.5) if (twist.angular.z < 0): lastDir = -1 else: lastDir = 1 pub.publish(twist)
def up_ugv(self, Tag): global pub global lastDir global lastSeen print "started following" biggest = Tag() if biggest.diameter == 0: twist = Twist() twist.linear.x = 0 if (time() - lastSeen > .5): twist.angular.z = .5*lastDir pub.publish(twist) # reset velocity and distance distance.get_velocity(0) yRot.get_velocity(0) return lastSeen = time() #Determines the co-ordinates of corners in the ar_image square cx = 0; cy = 0 for i in [0,2,4,6]: cx = cx + biggest.cwCorners[i] cy = cy + biggest.cwCorners[i+1] cx = cx / 4. / width cy = cy / 4. / height twist = Twist() # move foward and backward, trying to stop at stopping_dist stopping_dist = 2000. dist = (biggest.distance - stopping_dist) / stopping_dist print "distance is %f" %biggest.distance print "dist is %f " %dist dist_vel = distance.get_velocity(dist) if abs(dist) < 0.25: # if we are close enough to the stopping distance, just try to stop twist.linear.x = dist_vel * 0.5 twist.angular.z = 0 else: # otherwise try to move within stopping_dist twist.linear.x = dist * 0.5 twist.angular.z = 0 print twist.linear.x pub.publish(twist) twist.angular.z = (-(cx - .5)/.25) #Keep itself from the tag at certain distance if (twist.angular.z < 0): lastDir = -1 twist.linear.x = (1/(biggest.diameter))*40 if (biggest.diameter>120): twist.linear.x= -(biggest.diameter)*0.0025 if (biggest.diameter<120 and biggest.diameter>100): twist.linear.x = 0 else: lastDir = 1 twist.linear.x = (1/(biggest.diameter))*40 if (biggest.diameter>120): twist.linear.x = -(biggest.diameter)*0.0025 if (biggest.diameter<120 and biggest.diameter>100): twist.linear.x = 0 pub.publish(twist) print twist print "Diameter is %f " %biggest.diameter
def handleTag(msg): global pub global lastDir global lastSeen width = msg.image_width height = msg.image_height toggle = Toggle_cam() toggle.t_cam() print "this is camera", toggle.cam_channels if toggle.cam_channels == 0: print "following the tag" biggest = Tag() for tag in msg.tags: if tag.diameter > biggest.diameter: biggest = tag if biggest.diameter == 0: twist = Twist() twist.linear.x = 0 if (time() - lastSeen > .5): twist.angular.z = .5 * lastDir pub.publish(twist) return print "Tag detected" lastSeen = time() cx = 0 cy = 0 for i in [0, 2, 4, 6]: cx = cx + biggest.cwCorners[i] cy = cy + biggest.cwCorners[i + 1] cx = cx / 4. / width cy = cy / 4. / height twist = Twist() if biggest.distance - 500 > 8: twist.linear.x = ((biggest.distance - 500.) / 500.) * .25 if twist.linear.x < 0: twist.linear.x = 0 twist.angular.z = (-(cx - .5) / .5) if twist.angular.z < 0: lastDir = -1 else: lastDir = 1 pub.publish(twist) print twist # elif toggle.cam_channels == 1: else: print "keeping position" biggest = Tag() twist = Twist() for tag in msg.tags: if tag.diameter > biggest.diameter: biggest = tag if biggest.diameter == 0: twist.linear.x = 0 if (time() - lastSeen > .5): twist.linear.z = .5 * lastDir pub.publish(twist) return lastSeen = time() cx = 0 cy = 0 for i in [0, 2, 4, 6]: cx = cx + biggest.cwCorners[i] cy = cy + biggest.cwCorners[i + 1] cx = cx / 4. / width cy = cy / 4. / height twist = Twist() # move foward and backward, trying to stop at stopping_dist stopping_dist = 2000. dist = (biggest.distance - stopping_dist) / stopping_dist print biggest.distance print dist dist_vel = distance.get_velocity(dist) if abs(dist) < 0.25: # if we are close enough to the stopping distance, just try to stop twist.linear.z = dist_vel * 0.25 else: # otherwise try to move within stopping_dist twist.linear.z = dist * 0.25 print twist.linear.z twist.linear.x = max( 0.03, min(0.05, twist.linear.x) ) #change done here for stopping at a optimal distance after seeing the tag by krishna # try to face perpendicular to the tag yRot_velocity = yRot.get_velocity(biggest.yRot) if abs(biggest.yRot) < 0.5: # if we are mostly facing perpendicular, just try to stay still twist.linear.y = yRot_velocity * 0.25 else: # otherwise, rotate towards being in front of the tag #t wist.linear.y = biggest.yRot * 0.25 twist.linear.y = max(-0.05, min(0.05, twist.linear.y)) # rotate to face the tag twist.angular.z = (-(cx - .5) / .5) if twist.angular.z < 0: lastDir = -1 else: lastDir = 1 pub.publish(twist) print "printing twist" print twist
def down_drone(): biggest = Tag() print "positioning" if biggest.diameter == 0: twist = Twist() twist.linear.x = 0 if (time() - lastSeen > .5): twist.angular.z = .5 * lastDir pub.publish(twist) # reset velocity and distance distance.get_velocity(0) yRot.get_velocity(0) return lastSeen = time() cx = 0 cy = 0 for i in [0, 2, 4, 6]: cx = cx + biggest.cwCorners[i] cy = cy + biggest.cwCorners[i + 1] cx = cx / 4. / width cy = cy / 4. / height twist = Twist() # move foward and backward, trying to stop at stopping_dist stopping_dist = 2000. dist = (biggest.distance - stopping_dist) / stopping_dist print biggest.distance print dist dist_vel = distance.get_velocity(dist) if abs(dist) < 0.25: # if we are close enough to the stopping distance, just try to stop twist.linear.x = dist_vel * 0.25 else: # otherwise try to move within stopping_dist twist.linear.x = dist * 0.25 print twist.linear.x twist.linear.x = max( 0.03, min(0.05, twist.linear.x) ) #change done here for stopping at a optimal distance after seeing the tag by krishna # try to face perpendicular to the tag yRot_velocity = yRot.get_velocity(biggest.yRot) if abs(biggest.yRot) < 0.5: # if we are mostly facing perpendicular, just try to stay still twist.linear.y = yRot_velocity * 0.25 else: # otherwise, rotate towards being in front of the tag #twist.linear.y = biggest.yRot * 0.25 twist.linear.y = max(-0.05, min(0.05, twist.linear.y)) # rotate to face the tag twist.angular.z = (-(cx - .5) / .5) if (twist.angular.z < 0): lastDir = -1 else: lastDir = 1 pub.publish(twist) print twist
twist = Twist() if (biggest.distance - 500 > 8): twist.linear.x = ((biggest.distance - 500.) / 500.) * .25 if (twist.linear.x < 0): twist.linear.x = 0 twist.angular.z = (-(cx - .5)/.5) if (twist.angular.z < 0): lastDir = -1 else: lastDir = 1 pub.publish(twist) print twist elif toggle.cam_channels == 1: #else: print "keeping position" biggest = Tag() twist = Twist() for tag in msg.tags: if (tag.diameter > biggest.diameter): biggest = tag if biggest.diameter == 0: twist.linear.x = 0 if (time() - lastSeen > .5): twist.angular.z = .5*lastDir pub.publish(twist) # reset velocity and distance distance.get_velocity(0) yRot.get_velocity(0) return
def handleTags(msg): global pub global lastSeen twist = Twist() width = msg.image_width height = msg.image_height biggest = Tag() # Recherche du marqueur le plus proche (ici diameter est la distance) for tag in msg.tags: if (tag.diameter > biggest.diameter): biggest = tag # Marqueur le plus proche if biggest.diameter == 0: twist.linear.x = 0 # Faire tourner en rond le drone if (time() - lastSeen > .5): twist.angular.z = .5 pub.publish(twist) # Réinitialiser a 0 au cas où on trouve un marqueur sur la prochaine image distance.get_velocity(0) yRot.get_velocity(0) return lastSeen = time() # Direction en x, on veut essayer de s'arrêter a stopping_dist stopping_dist = 5000. dist = (biggest.distance - stopping_dist) / stopping_dist dist_vel = distance.get_velocity(dist) if abs(dist) < 0.25: # Si on est proche de stopping_distance, on ralenti brusquement twist.linear.x = dist_vel * 0.2 else: # Sinon on avance ou recule vers stopping_distancet twist.linear.x = dist * 0.25 print twist.linear.x # Affiche sur le terminal la vitesse envoyé au drone # Limite la vitesse maximale du drone twist.linear.x = max(0.03, min(0.05, twist.linear.x)) # Algorithme pour se placer devant le marqueur (Gauche/Droite) yRot_velocity = yRot.get_velocity(biggest.yRot) if abs(biggest.yRot) < 0.5: # Si on presque devant, on ralenti brusquement la vitesse twist.linear.y = yRot_velocity * 0.2 else: # Sinon, on va à gauche ou à droite twist.linear.y = biggest.yRot * 0.25 # Limite la vitesse angulaire du drone twist.linear.y = max(-0.05, min(0.05, twist.linear.y)) # Détecte l'orientation du marqueur cx = 0 for i in [0, 2, 4, 6]: cx = cx + biggest.cwCorners[i] cx = cx / 4. / width # Rotation du drone pour se placer perpendiculairement devant le marqueur twist.angular.z = (-(cx - .5) / .5) pub.publish(twist) print twist