class Agent: def __init__(self): self._vision = Vision() self._controller = Controller() self._behaviours = [] # ------------------------------------------------------------------------------------------------------------------ # Public methods def AddBehaviour(self, behaviourName, minutes): behaviour = self._GetBehaviourByName(behaviourName)(vision=self._vision, controller=self._controller, minutes=minutes) self._behaviours.append(behaviour) def Activate(self): self._vision.Start() while len(self._behaviours) > 0: behaviour = self._behaviours.pop(0) behaviour.Activate() self._vision.Stop() def Deactivate(self): self._vision.Stop() # ------------------------------------------------------------------------------------------------------------------ # Private methods def _GetBehaviourByName(self, name): behaviour = None if name == 'fisherman': behaviour = Fisherman return behaviour
def check_dir_and_get_vision(self, direction: Vector) -> Vision: vision = Vision() distance = 0 head_buff = Vector(self.head.x, self.head.y) # Move once head_buff.x += direction.x head_buff.y += direction.y distance += 1 # Looks in 8 direction and checks where's food, wall and body's segment while 0 <= head_buff.x <= PLAYABLE_AREA_WIDTH and 0 <= head_buff.y <= PLAYABLE_AREA_HEIGHT: if head_buff.x == self.apple.pos.x and head_buff.y == self.apple.pos.y: vision.is_apple = 1 if self.is_on_tail(head_buff.x, head_buff.y): vision.tail_dist = 1 / distance head_buff.x += direction.x head_buff.y += direction.y distance += 1 vision.wall_dist = 1 / distance return vision
class Subscriber: def __init__(self): self.bridge = CvBridge() # self.subscriber = rospy.Subscriber(topicName, Image, self.callback) self.image_sub = message_filters.Subscriber("/camera/color/image_raw", Image) # self.depth_sub = message_filters.Subscriber("/camera/depth/color/points", PointCloud2, self.callaback) self.depth_sub = message_filters.Subscriber( "/camera/aligned_depth_to_color/image_raw", Image) self.ts = message_filters.TimeSynchronizer( [self.image_sub, self.depth_sub], 10) self.ts.registerCallback(self.callback) self.vision = Vision() def callback(self, data, data2): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as error: rospy.logerr(error) try: cv_depth_image = self.bridge.imgmsg_to_cv2( data2, desired_encoding="16UC1") except CvBridgeError as error: rospy.logerr(error) # cv2.imshow("Image Window", cv_image) # print("Subscribed") self.vision.mainLoop(cv_image, cv_depth_image)
def process_video(video_path, outdir): vision_object = Vision(mode="only_detect") video_name = FileUtils.get_file_name(video_path) logging.info("Starting process video: {}".format(video_name)) st_time = time.time() saved_faces = 0 processed_frames = 0 fvs = FileVideoStream(video_path, preload_size=256, start=True) check_frame = 0 delay_frame = 7 while fvs.has_next(): frame = fvs.get_next() processed_frames += 1 if processed_frames - check_frame < delay_frame: continue check_frame = processed_frames faces = vision_object.face_detector(frame) for face in faces: x, y, w, h = face img_face = frame[y:y+h, x:x+w] out_path = os.path.join(outdir, "{}.jpg".format(time.time())) cv2.imwrite(out_path, img_face) saved_faces += 1 elapsed_time = time.time() - st_time std_et = std_delta_time(int(elapsed_time)) logging.info("Process {} in {} - processed_frames/total_frames {}/{} - Saved Faces {} - Avg FPS: {}".format(video_name, std_et, processed_frames, fvs.total_frames, saved_faces, int(processed_frames/elapsed_time)))
def __init__(self, number_of_actions, input_dimension, batch_size, alpha, load, file_name): self.number_of_actions = number_of_actions self.input_dimension = input_dimension self.instant_reward = 0.0 self.cummulative_reward = 0.0 self.memory = deque(maxlen=30000) self.batch_size = int(batch_size) self.classes = self.number_of_actions self.controller = Controller("UR3", 6) self.controller.connect(19997) self.vision = Vision("Vision_frontal", "Vision_lateral", "Vision_top", self.controller.id_number) self.model = self.create_model(input_dimension, number_of_actions, 'mean_squared_error', Adam(lr=alpha), ['accuracy', 'mean_squared_error']) if load == 1: #load previous weights if set to 1 self.model.load_weights(file_name) now = datetime.now() print(str(now) + " model weights load done!") self.handlers = self.manage_handlers() self.counter = 0 self.step_degrees = 45.0 self.done_counter = 0 self.step_lost_counter = 0
def __init__(self): self.bridge = CvBridge() # self.subscriber = rospy.Subscriber(topicName, Image, self.callback) self.image_sub = message_filters.Subscriber("/camera/color/image_raw", Image) # self.depth_sub = message_filters.Subscriber("/camera/depth/color/points", PointCloud2, self.callaback) self.depth_sub = message_filters.Subscriber( "/camera/aligned_depth_to_color/image_raw", Image) self.ts = message_filters.TimeSynchronizer( [self.image_sub, self.depth_sub], 10) self.ts.registerCallback(self.callback) self.vision = Vision()
def __init__(self): self.RUNNING = True self.vision_object = Vision(mode='only_detect') self.multi_tracker = MultiTracker() self.sid = vision_config.SERVICE_TOKEN self.reg = vision_config.SERVICE_REGISTER_WORKER self.idworker = str(uuid.uuid4()) logging.info('Worker:: {}'.format(self.idworker)) self.rd = redis.StrictRedis(host='localhost', port=6379) self.ps = self.rd.pubsub()
def __init__(self): self.key = vision_config.IDENTIFY_QUEUE self.rd = redis.StrictRedis(host='localhost', port=6379) self.rd.delete(self.key) try: database = Database(host=vision_config.DB_HOST, \ user=vision_config.DB_USER, \ passwd=vision_config.DB_PASSWD, \ database_name= vision_config.DB_NAME) except: raise ConnectionError('Cannot connect to database') self.vision_object = Vision(mode='only_identify', database=database) threading.Thread(target=self.refetch_db).start()
def __init__(self): # Snake attributes self.x_start = PLAYABLE_AREA_WIDTH / 2 self.y_start = PLAYABLE_AREA_HEIGHT / 2 self.len = 1 self.is_alive = True self.time_left = 200 self.life_time = 0 self.score = 0 # Create vector of position and velocity self.head = Vector(self.x_start, self.y_start) # actual position self.vel = Vector(RIGHT_VECTOR.x, RIGHT_VECTOR.y) # Init snake with 4 body segments self.tail = [] self.tail.append(Vector(self.x_start - 30, self.y_start)) # Add last segment of body self.tail.append(Vector(self.x_start - 20, self.y_start)) # Add second segment of body self.tail.append(Vector(self.x_start - 10, self.y_start)) # Add first segment of body self.len += 3 # Increase a body length # Vision for input self.visions = [Vision() for _ in range(8)] self.visions_array = [] # Get DNA as snake's brain self.DNA = NeuralNetwork(INPUT_NODES, HIDDEN_NODES, OUTPUT_NODES) self.apple = Apple()
def main(config): """starting node for Thor""" if config.getKittStart(): # initialize Kitt object kitt = Kitt(config) # start Kitt kitt.start() if config.getVisionStart(): # initialize Vision object vision = Vision(config) # start Vision vision.start()
def addVision(self, imageUrl, text, isUploaded, isPublic): '''Creates new vision TODO: What does isUploaded mean? Returns (Vision/None, None or error_msg if add vision failed) ''' #TODO: Save page title and page URL? if imageUrl == "": return [None, "No image"] if len(text.strip()) <= 0: return [None, "No text"] pictureId, errorMsg = self._processAndUploadImageUrl(imageUrl, isUploaded) if pictureId == None: return [None, "Error saving picture"] privacy = VisionPrivacy.PUBLIC if not isPublic: privacy = VisionPrivacy.PRIVATE visionId = DataApi.addVision(self.model(), text, pictureId, 0, 0, privacy) if visionId == DataApi.NO_OBJECT_EXISTS_ID: return [None, "Error creating vision"] vision = Vision.getById(visionId, self) if vision: return [vision, "Saved Vision!"] else: return [None, "Error retrieving vision"]
def repostVisionList(self, visionIds): '''Convenience function that loops over self.repostVision()''' for visionId in reversed(visionIds): # TODO: Speed up later by grabbing list of visions vision = Vision.getById(visionId, self) if vision: self.repostVision(vision)
def commentOnVision(self, visionId, text, pictureId=0): '''Returns comment if successful, else returns None''' from ..WorkerJobs import Queue_commentEmail, \ Queue_commentNotificationEmail vision = Vision.getById(visionId, self) if vision: comment = vision.addComment(self, text, pictureId) if comment: # If comment is on someone else's vision, email them if vision.userId() != self.id(): Queue_commentEmail(self.toDictionary(), vision.toDictionary(), comment.toDictionary()) # send comment notifications for others that have commented # that are not the vision user or the author user # TODO: figure out better heuristic for how far to go back commentList = vision.comments(50) userIdSet = set() for comment in commentList.comments(): if comment.authorId() != self.id() and \ comment.authorId() != vision.userId(): userIdSet.add(comment.authorId()) otherCommenters = User.getByUserIds(userIdSet) for otherCommenter in otherCommenters: Queue_commentNotificationEmail( otherCommenter.toDictionaryFull(), self.toDictionary(), vision.toDictionary(), comment.toDictionary()) return comment return None
def seed(n): ####### training part ############### samples = np.loadtxt('generalsamples_seed.data', np.float32) responses = np.loadtxt('generalresponses_seed.data', np.float32) responses = responses.reshape((responses.size, 1)) model = cv2.ml.KNearest_create() model.train(samples, cv2.ml.ROW_SAMPLE, responses) ### recoognizing ### v = Vision() i = 0 save_coord = (1180, 1020) refine_coord = (1320, 1020) c = Controller() while i < n: min_magic_atk = recognize(v, (670, 1180, 20, 20), model) max_magic_atk = recognize(v, (695, 1180, 20, 20), model) accuracy = recognize(v, (745, 1180, 20, 20), model) print(accuracy) return if '+' in min_magic_atk and '+' in max_magic_atk and '+' in accuracy: print('saving') c.move_mouse(save_coord) c.left_mouse_click() time.sleep(1 / 2) else: print('refining') c.move_mouse(refine_coord) c.left_mouse_click() i += 1 time.sleep(1 / 2)
def __init__(self): self._state = {} GPIO.setmode(GPIO.BCM) self._gpio = GPIO self._driver = Driver.Driver(self._gpio) self._sensors = Sensors.Sensors(self._gpio) self._vision = Vision.Vision((640, 480)) self._laser = Laser.Laser(self._gpio, 25)
class FaceDetectionWorker: def __init__(self): self.RUNNING = True self.vision_object = Vision(mode='only_detect') self.multi_tracker = MultiTracker() self.sid = vision_config.SERVICE_TOKEN self.reg = vision_config.SERVICE_REGISTER_WORKER self.idworker = str(uuid.uuid4()) logging.info('Worker:: {}'.format(self.idworker)) self.rd = redis.StrictRedis(host='localhost', port=6379) self.ps = self.rd.pubsub() def register_work_service(self): self.rd.lpush(self.reg, self.idworker) st = time.time() while self.rd.exists(self.idworker) == False: if time.time() - st > 10.: logging.info('Wait too long!!!') return time.sleep(0.1) msg = self.rd.get(self.idworker) if msg is None or msg == b'NONE': logging.info('Server is busy!') self.rd.delete(self.idworker) self.RUNNING = False return info = msg.split() self.IN = info[0] self.OUT = info[1] self.CHANNEL = info[2] self.ps.subscribe(self.CHANNEL) logging.info('Connected to server :: {} :: {} :: {}'.format( self.IN, self.OUT, self.CHANNEL)) def run_service(self): logging.info('Detection service is running ...') while self.RUNNING: msg = self.rd.rpop(self.IN) if msg is not None: # shape_buffer = msg[:6] frame_buffer = msg # shape = np.frombuffer(shape_buffer, dtype=np.uint16) frame = np.frombuffer(frame_buffer, dtype=np.uint8) frame = cv2.imdecode(frame, cv2.IMREAD_COLOR) # frame = np.reshape(frame, (shape[0], shape[1], shape[2])) face = self.vision_object.face_detector(frame) face = np.array(face, dtype=np.uint16) self.rd.set(self.OUT, face.tobytes()) time.sleep(0.001) # if __name__ == '__main__': # worker = FaceDetectionWorker() # worker.register_work_service() # worker.run_service()
def __init__(self): #TODO: fix logging to file readable from web self._vision= Vision.Vision() GPIO.setmode(GPIO.BCM) self._accel = gy521.GY521() self._comp = gy273.GY273() self._calButton = IoInputs.PushButton(GPIO, deviceconfig["IO"]["GreenButton"]) self._resetButton = IoInputs.PushButton(GPIO, deviceconfig["IO"]["RedButton"]) self._streamSwitch = IoInputs.OnOnSwitch(GPIO, deviceconfig["IO"]["Switch1.1"], deviceconfig["IO"]["Switch1.2"]) self._onLed = LedIndicator.LedIndicator(GPIO, deviceconfig["IO"]["GreenLed"]) self._streamLed = LedIndicator.LedIndicator(GPIO, deviceconfig["IO"]["YellowLed"]) self._resetLed = LedIndicator.LedIndicator(GPIO, deviceconfig["IO"]["RedLed"])
def run(url=None): global vision_object, multi_tracker, database try: database = Database(host=vision_config.DB_HOST, \ user=vision_config.DB_USER, \ passwd=vision_config.DB_PASSWD, \ database_name= vision_config.DB_NAME) except: raise ConnectionError('Cannot connect to database') vision_object = Vision(database) multi_tracker = MultiTracker() if url is not None: threading.Thread(target=read_camrea, args=(url, )).start() else: threading.Thread(target=read_camrea, args=()).start() threading.Thread(target=detect_face, args=()).start() threading.Thread(target=identify, args=()).start() threading.Thread(target=show, args=()).start()
def repostVision(self, vision): '''Repost a vision and return new vision if successful, else None''' assert vision, "Invalid vision" isPublic = self.visionDefaultIsPublic() newVisionId = DataApi.repostVision(self.model(), vision.model(), isPublic) if DataApi.NO_OBJECT_EXISTS_ID != newVisionId: newVision = Vision.getById(newVisionId, self) if newVision: # Only let original user know if this vision is posted # publicly if isPublic: from ..WorkerJobs import Queue_repostEmail Queue_repostEmail(self.toDictionary(), vision.toDictionary(), newVision.toDictionary()) return newVision return None
def divinity(n): ####### training part ############### samples = np.loadtxt('generalsamples_divinity.data', np.float32) responses = np.loadtxt('generalresponses_divinity.data', np.float32) responses = responses.reshape((responses.size, 1)) model = cv2.ml.KNearest_create() model.train(samples, cv2.ml.ROW_SAMPLE, responses) ### recoognizing ### v = Vision() i = 0 save_coord = (1575, 1000) refine_coord = (1780, 1000) c = Controller() while i < n: result = '' atk = recognize(v, (600, 1670, 20, 20), model) p_def = recognize(v, (625, 1670, 20, 20), model) m_def = recognize(v, (655, 1670, 20, 20), model) max_hp = recognize(v, (685, 1670, 20, 20), model) accuracy = recognize(v, (715, 1670, 20, 20), model) evade = recognize(v, (745, 1670, 20, 20), model) result += atk + p_def + m_def + max_hp + accuracy + evade if '-' not in result and len(result) > 0: print('saving') c.move_mouse(save_coord) c.left_mouse_click() time.sleep(1 / 2) else: print('refining') c.move_mouse(refine_coord) c.left_mouse_click() i += 1 time.sleep(1 / 2)
from Vision import Vision from CamDeviceConfig import deviceconfig from Sensors import gy521 import time vision = Vision.Vision() accel = gy521.GY521() print "Vision init" frame = vision.initialize() print "First vision update" lastframetime = time.time() try: while 1: framerate = 1 / (time.time() - lastframetime) lastframetime = time.time() frame = vision.update() accel = accel.getAccelerometerdata() text = str(accel) if accel[0] > 0.8 and accel[0] < 1.2: vision._cam.hflip = True else: vision._cam.hflip = False if accel[1] > 0.8 and accel[1] < 1.2: vision._cam.vflip = True else: vision._cam.vflip = False vision.draw(frame, framerate, text) print text time.sleep(0.2) except:
''' pictureTest.py ''' #Add the api.py (parent) folder import sys sys.path.append("../objects") from Picture import Picture from Vision import Vision v = Vision() v.setInfo(1,1,"hi!", 1, 0, '12312312323') p = Picture() #filename is the name of the file on the web server p.setInfo(1,'www.facebook.com/hi.jpg', '1') v.setPicture(p) print(v.toDictionary())
from Controller import Controller from Vision import Vision v = Vision() (img, sct_img) = v.take_screenshot() v.save(sct_img, 'assets/screenshot.png') c = Controller() #c.move_mouse(1370,1070) #c.left_mouse_click()
if event.type == pygame.MOUSEBUTTONDOWN: startButton.click(event.pos) if event.type == pygame.MOUSEBUTTONUP: if startButton.release(event.pos): run = True bgColor = r, g, b screen.fill(bgColor) screen.blit(bgImage, bgRect) screen.blit(startButton.image, startButton.rect) pygame.display.flip() clock.tick(60) players = [Pax([width / 2, height / 2])] visions = [] for player in players: visions += [Vision("small", player)] ghosts = [] while run and len(players) > 0: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() if event.type == pygame.KEYDOWN: if event.key == pygame.K_w or event.key == pygame.K_UP: players[0].go("up") if event.key == pygame.K_d or event.key == pygame.K_RIGHT: players[0].go("right") if event.key == pygame.K_s or event.key == pygame.K_DOWN: players[0].go("down") if event.key == pygame.K_a or event.key == pygame.K_LEFT: players[0].go("left") #if event.key == pygame.K_SPACE:
import cv2 import numpy as np from Vision import Vision from Controller import Controller from Game import Game vision = Vision() controller = Controller() game = Game(vision, controller) s = game.can_start_round() game.click_object("arrow") controller.move_mouse() controller.left_mouse_click()
qIn = LifoQueue(maxsize=1) do = True Thread(target=grub, args=( window, qIn, 0, do, )).start() lastImg = img = get_img() gyroscope = Gyroscope(img, settings) # нужно указать в % примерные зоны cornerPos = settings['sensors']['cornerPos'] clear = '\n' * 6 vision = Vision(img, cornerPos, settings, gyroscope) # после инциаоизации можно получить реальные позиции cornerPos, sensorsPos = vision.roiSensors[:4], vision.roiSensors[4:] if settings['sensors']['showGrid']: while True: img = get_img() vision.cut_img = vision.cut_img_deb(img) monitor.show_result(img) xbot() do = False
def main(): sg.theme('LightGreen') low_H = 0 low_S = 0 low_V = 0 high_H = max_value_H high_S = max_value high_V = max_value v = Vision(1, 1, 1, 1) # define the window layout layout = [ [sg.Image(filename='', key='-IMAGE-')], [ sg.Radio('Mask', 'video', True, size=(20, 1), key='-Mask-'), sg.Radio('Img', 'video', True, size=(20, 1), key='-Img-') ], [ sg.Text('high_H', size=(10, 1)), sg.Slider((low_H, max_value_H), 359, 1, orientation='h', size=(40, 9), key='-high_H SLIDER-') ], [ sg.Text('low_H', size=(10, 1)), sg.Slider((0, high_H), 300, 1, orientation='h', size=(40, 9), key='-low_H SLIDER-') ], [ sg.Text('high_V', size=(10, 1)), sg.Slider((low_V, max_value), 255, 1, orientation='h', size=(40, 9), key='-high_V SLIDER-') ], [ sg.Text('low_V', size=(10, 1)), sg.Slider((0, high_V), 120, 1, orientation='h', size=(40, 9), key='-low_V SLIDER-') ], [ sg.Text('high_S', size=(10, 1)), sg.Slider((low_H, max_value), 255, 1, orientation='h', size=(40, 9), key='-high_S SLIDER-') ], [ sg.Text('low_S', size=(10, 1)), sg.Slider((0, high_S), 70, 1, orientation='h', size=(40, 9), key='-low_S SLIDER-') ], [ sg.Text('Known Width (inch/cm/feet...etc'), sg.InputText("", size=(5, 5), key='-KW-') ], [ sg.Text('Known Height (inch/cm/feet...etc)'), sg.InputText("", size=(5, 5), key='-KH-') ], [ sg.Text('Known distance from object (inch/cm/feet...etc)'), sg.InputText("", size=(5, 5), key='-KD-') ], [ sg.Text('pixel height at above distance from camera'), sg.InputText("", size=(5, 5), key='-PH-') ], [sg.Text('focal length: ERROR ERROR ERROR', key='-focal length-')], [sg.Text('Height : ERROR ERROR ERROR', key='-height-')], [sg.Text('Width : ERROR ERROR ERROR', key='-width-')], [sg.Text('Angle : ERROR ERROR ERROR', font='Ariel 24', key='-angle-')], [ sg.Text('Distance : ERROR ERROR ERROR', font='Ariel 24', key='-distance-') ], ] # Create the window window = sg.Window('Distance and Angle Demo', layout, location=(800, 400)) # Start video stream cap = cv2.VideoCapture(0) while True: event, values = window.read(timeout=20) if event == 'Exit' or event == sg.WIN_CLOSED: break knownDistance = values['-KD-'] knownWidth = values['-KW-'] knownHeight = values['-KH-'] pixelHeight = values['-PH-'] # Uses data from textboxes if it is numerical if (isNumeric(pixelHeight) and isNumeric(knownHeight) and isNumeric(knownWidth) and isNumeric(knownDistance)): v = Vision(float(pixelHeight), float(knownDistance), float(knownWidth), float(knownHeight)) elif (isNumeric(knownHeight) and isNumeric(knownWidth)): v = Vision(float(1), float(1), float(knownWidth), float(knownHeight)) ret, frame = cap.read() mask, img = v.updateFrame(frame, low_H, low_S, low_V, high_H, high_S, high_V) low_H = values['-low_H SLIDER-'] low_S = values['-low_S SLIDER-'] low_V = values['-low_V SLIDER-'] high_H = values['-high_H SLIDER-'] high_S = values['-high_S SLIDER-'] high_V = values['-high_V SLIDER-'] window['-focal length-'].update('Focal Length: ' + str(v.getFocalLength())) h, w = v.getFittedBox() window['-height-'].update('Height: ' + str(h)) window['-width-'].update('Width: ' + str(w)) distance = v.getDistance() angle = v.getAngle() window['-angle-'].update('Angle: ' + str(round((angle), 3))) window['-distance-'].update('Distance: ' + str(round((distance), 3))) if values['-Mask-']: frame = cv2.resize( mask, (int(frame.shape[1] * .35), int(frame.shape[0] * .35))) elif values['-Img-']: frame = cv2.resize( img, (int(frame.shape[1] * .35), int(frame.shape[0] * .35))) imgbytes = cv2.imencode('.png', frame)[1].tobytes() window['-IMAGE-'].update(data=imgbytes) window.close()
class Agent(object): """ docstring for Agent """ def __init__(self, number_of_actions, input_dimension, batch_size, alpha, load, file_name): self.number_of_actions = number_of_actions self.input_dimension = input_dimension self.instant_reward = 0.0 self.cummulative_reward = 0.0 self.memory = deque(maxlen=30000) self.batch_size = int(batch_size) self.classes = self.number_of_actions self.controller = Controller("UR3", 6) self.controller.connect(19997) self.vision = Vision("Vision_frontal", "Vision_lateral", "Vision_top", self.controller.id_number) self.model = self.create_model(input_dimension, number_of_actions, 'mean_squared_error', Adam(lr=alpha), ['accuracy', 'mean_squared_error']) if load == 1: #load previous weights if set to 1 self.model.load_weights(file_name) now = datetime.now() print(str(now) + " model weights load done!") self.handlers = self.manage_handlers() self.counter = 0 self.step_degrees = 45.0 self.done_counter = 0 self.step_lost_counter = 0 """ manage handlers for action manipulation """ def manage_handlers(self): handler_strings = [] for i in range(0, 6): handler_strings.append("_joint" + str(i + 1)) handlers = self.controller.get_handlers(handler_strings) return handlers """ creates the DQN model with a specified input dimension and a number of actions for the output layers """ def create_model(self, input_dimension, number_of_actions, loss_type, optimizer, metrics_list): model1 = Sequential() model2 = Sequential() model3 = Sequential() model1.add( Conv2D(8, kernel_size=(5, 5), activation='relu', input_shape=(input_dimension, input_dimension, 1))) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Conv2D(8, (5, 5), activation='relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Conv2D(8, (5, 5), activation='relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Dropout(0.5)) model1.add(Flatten()) ################################################################################################### model2.add( Conv2D(8, kernel_size=(5, 5), activation='relu', input_shape=(input_dimension, input_dimension, 1))) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Conv2D(8, (5, 5), activation='relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Conv2D(8, (5, 5), activation='relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Flatten()) ################################################################################################### model3.add( Conv2D(8, kernel_size=(5, 5), activation='relu', input_shape=(input_dimension, input_dimension, 1))) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Conv2D(8, (5, 5), activation='relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Conv2D(8, (5, 5), activation='relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Dropout(0.5)) model3.add(Flatten()) x = concatenate([model1.output, model2.output, model3.output]) x = Dense(4096)(x) x = Dense(256, activation='relu')(x) x = Dropout(0.2)(x) x = Dense(number_of_actions)(x) model = Model(inputs=[model1.input, model2.input, model3.input], outputs=x) model.compile(loss=loss_type, optimizer=optimizer, metrics=metrics_list) # model = Sequential() # model.add(Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=(input_dimension,input_dimension, 1))) # model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Conv2D(64, (5, 5), activation='relu')) # model.add(MaxPooling2D(pool_size=(2, 2))) # # model.add(Conv2D(64, (5, 5), activation='relu')) # # model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Dropout(0.25)) # model.add(Flatten()) # # model.add(Dense(4096)) # model.add(Dense(256, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(number_of_actions)) # model.compile(loss = loss_type, optimizer = optimizer, metrics = metrics_list) #model.summary() return model """ decides to act randomly or not, aconding to epsilon value """ def act(self, state1, state2, state3, epsilon): if self.step_lost_counter < 30: if np.random.randint(0, 10) <= epsilon: return np.random.randint(0, self.number_of_actions) else: state1 = np.array(state1) state2 = np.array(state2) state3 = np.array(state3) action_values = self.model.predict([ state1[1].reshape(1, self.input_dimension, self.input_dimension, 1), state2[1].reshape(1, self.input_dimension, self.input_dimension, 1), state3[1].reshape(1, self.input_dimension, self.input_dimension, 1) ]) return np.argmax(action_values[0]) ## check if correct else: self.step_lost_counter = 0 return np.random.randint(0, self.number_of_actions) """ moves with a certain action, moving one joint 20 degrees clockwise or counter clockwise """ def do_step(self, action): #joint 1 if action == 0: self.controller.set_joint_position( self.handlers[0], self.controller.get_joint_position(self.handlers[0]) + self.step_degrees) elif action == 1: self.controller.set_joint_position( self.handlers[0], self.controller.get_joint_position(self.handlers[0]) - self.step_degrees) #joint 2 elif action == 2: self.controller.set_joint_position( self.handlers[1], self.controller.get_joint_position(self.handlers[1]) + self.step_degrees) elif action == 3: self.controller.set_joint_position( self.handlers[1], self.controller.get_joint_position(self.handlers[1]) - self.step_degrees) #joint 3 elif action == 4: self.controller.set_joint_position( self.handlers[2], self.controller.get_joint_position(self.handlers[2]) + self.step_degrees) elif action == 5: self.controller.set_joint_position( self.handlers[2], self.controller.get_joint_position(self.handlers[2]) - self.step_degrees) #joint 4 elif action == 6: self.controller.set_joint_position( self.handlers[3], self.controller.get_joint_position(self.handlers[3]) + self.step_degrees) elif action == 7: self.controller.set_joint_position( self.handlers[3], self.controller.get_joint_position(self.handlers[3]) - self.step_degrees) #joint 5 elif action == 8: self.controller.set_joint_position( self.handlers[4], self.controller.get_joint_position(self.handlers[4]) + self.step_degrees) elif action == 9: self.controller.set_joint_position( self.handlers[4], self.controller.get_joint_position(self.handlers[4]) - self.step_degrees) #joint 6 elif action == 10: self.controller.set_joint_position( self.handlers[5], self.controller.get_joint_position(self.handlers[5]) + self.step_degrees) else: #if action == 11: self.controller.set_joint_position( self.handlers[5], self.controller.get_joint_position(self.handlers[5]) - self.step_degrees) # # gripper # elif action == 12: # self.controller.gripper_open() # else: #action == 13: # self.controller.gripper_close() sleep(0.2) new_state1, new_state2, new_state3, reward, done = self.get_reward() return new_state1, new_state2, new_state3, reward, done """ etimates a reward value using computer vision for 3d distance between two objects """ def get_reward(self): new_state1 = self.vision.get_image(1) #frontal new_state2 = self.vision.get_image(2) #lateral new_state3 = self.vision.get_image(3) #top red_centers1 = self.vision.track_collor(new_state3[2], 0) red_centers2 = self.vision.track_collor(new_state2[2], 0) blue_center1 = self.vision.track_collor(new_state3[2], 1) blue_center2 = self.vision.track_collor(new_state2[2], 1) if (len(blue_center1) > 0) and (len(blue_center2) > 0) and ( len(red_centers1) > 0) and (len(red_centers2) > 0): red1 = red_centers1[0] red2 = red_centers2[0] blue1 = blue_center1[0] blue2 = blue_center2[0] _3d_red = (red1[0], red1[1], red2[1]) _3d_blue = (blue1[0], blue1[1], blue2[1]) #print(_3d_red, _3d_blue) distance = math.sqrt((_3d_red[0] - _3d_blue[0])**2 + (_3d_red[1] - _3d_blue[1])**2 + (_3d_red[2] - _3d_blue[2])**2) base = 150 if distance >= 11.0: done = 0 reward = -2 * abs( distance) if distance >= 50.0 else base - distance else: self.done_counter += 1 done = 1 reward = 10 * base else: self.step_lost_counter += 1 reward = 0.0 done = 0 return new_state1, new_state2, new_state3, reward, done
def skipTable(self): self.controller.click(1048, 195) def startGame(self): self.controller.click(1048, 488) def restartGame(self): self.controller.click(892, 160) def navigate(self): while True: self.scene = self.vision.getScene() print(self.scene) if (self.scene == 'turn_table'): self.skipTable() elif (self.scene == 'menu'): self.startGame() elif (self.scene == 'game'): continue elif (self.scene == 'game_over'): self.restartGame() else: print('****') v = Vision() g = Game(v) g.navigate()
from Movement import Movement from Vision import Vision from Neopixel import Neopixel as Neo import signal import sys neo = Neo() movement = Movement() vision = Vision(movement, neo) def signal_handler(signal, frame): print('has pulsado ctrl c') global vision vision.end() neo.end() sys.exit(0) signal.signal(signal.SIGINT, signal_handler) vision.runFaceFollower()
class FaceIdentifyService: def __init__(self): self.key = vision_config.IDENTIFY_QUEUE self.rd = redis.StrictRedis(host='localhost', port=6379) self.rd.delete(self.key) try: database = Database(host=vision_config.DB_HOST, \ user=vision_config.DB_USER, \ passwd=vision_config.DB_PASSWD, \ database_name= vision_config.DB_NAME) except: raise ConnectionError('Cannot connect to database') self.vision_object = Vision(mode='only_identify', database=database) threading.Thread(target=self.refetch_db).start() def refetch_db(self): self.rd.set(vision_config.FLAG_REFETCH_DB, b"0") while True: flag_value = self.rd.get(vision_config.FLAG_REFETCH_DB) if flag_value is not None and flag_value == b"1": self.rd.set(vision_config.FLAG_REFETCH_DB, b"0") self.vision_object.update_new_database() time.sleep(0.5) def run(self): logging.info('Service identify is running ...') while True: faces = [] IDs = [] modes = [] while True: msg = self.rd.rpop(self.key) if msg is None: break len_ID = int(msg[0]) ID = msg[1:1+len_ID].decode('utf-8') mode = msg[len_ID+1:len_ID+2].decode('utf-8') face = np.frombuffer(msg[2+len_ID:], dtype=np.uint8) face = cv2.imdecode(face, cv2.IMREAD_COLOR) if face.shape == (160, 160, 3): IDs.append(ID) modes.append(mode) # print(mode) faces.append(face) if len(faces) == vision_config.BATCH: break if len(faces) > 0: embedding_list, predictions = self.vision_object.identify_person_by_img(faces) for i in range(len(IDs)): if modes[i] == vision_config.IDEN_MOD: msg = "" if predictions[i] is not None: msg = modes[i].encode("utf-8") + str(predictions[i].id).encode("utf-8") # print(predictions[i].id) self.rd.lpush(IDs[i], msg) else: msg = modes[i].encode("utf-8") + b"-1" self.rd.lpush(IDs[i], msg) else: content = manage_data.convert_embedding_vector_to_bytes(embedding_list[i]) msg = modes[i].encode("utf-8") + content self.rd.lpush(IDs[i], msg) # if __name__ == '__main__': # fis = FaceIdentifyService() # fis.run()
from User import User from Vision import Vision u = User() u.setFromJson('{"password": "******", "id": 1, "email": "*****@*****.**"}') print u.email print u.dateCreated v = Vision() v.setFromJson('{"id":1, "userId": 1}') print v.id print v.userId
#!/usr/bin/python # _*_ coding: utf-8 _*_ from Nao import Nao from Vision import Vision if __name__ == "__main__": Robot = Nao('192.168.1.13 7', 9559) Detect = Vision(Robot) Robot.StartPosition() if Detect.identifyColor() == True: Robot.setFinish()
def __init__(self): self._vision = Vision() self._controller = Controller() self._behaviours = []
def vision(self, inquiringUser): from Vision import Vision return Vision.getById(self.visionId(), inquiringUser)
from Guide import Guide from Vision import Vision from DoBlue import doBlue from DoGreen import doGreen from DoRed import doRed import rospy import threading def wallFollowingUntilColor(myGuide, myVision): while True: color = myVision.recognizeColor() myGuide.wallFollowing() if color is not None: return color else: rospy.sleep(0.05) myGuide = Guide() myVision = Vision() rospy.sleep(1) color = wallFollowingUntilColor(myGuide, myVision) if color == 'blue': doBlue(myGuide, myVision) elif color == 'green': doGreen(myGuide, myVision) elif color == 'red': doRed(myGuide, myVision)
def identify(): global vision_object, multi_tracker, database global flag_training_online, flag_take_photo, name_interface, RUNNING tim_elapsed = 0.4 time_take_photo = 2 bbox_list_online = [] img_list_online = [] info_pack = None database_changed = False timer = time.time() bbox_faces = None while RUNNING: train_area = vision_config.TRAINING_AREA frame, bbox_faces = q_faces.get() try: multi_tracker.update_bounding_box(bbox_faces, database) unidentified_tracker, identified_tracker = multi_tracker.cluster_trackers( ) if len(unidentified_tracker) > 0: img_faces, predictions = vision_object.identify_person_by_tracker( frame, unidentified_tracker) multi_tracker.update_identification(unidentified_tracker, predictions, img_list=img_faces) multi_tracker.show_info(frame) except: continue pass if flag_training_online: if info_pack is None: name = input('Name: ') age = int(input('Age: ')) gender = input('Gender: ') idCode = input('Id Code: ') idCam = 1 info_pack = (name, age, gender, idCode, idCam) if time_take_photo > 0.2: cv2.rectangle(frame, (train_area[0], train_area[1]) , \ (train_area[2], train_area[3]), \ vision_config.NEG_COLOR, \ vision_config.LINE_THICKNESS+1) cv2.putText(frame, 'Time Remaining: ' + str(round(time_take_photo,1)) + 's', \ (train_area[0] + 10, train_area[1] + 30), cv2.FONT_HERSHEY_SIMPLEX, \ vision_config.FONT_SIZE+0.1, vision_config.NEG_COLOR, \ vision_config.LINE_THICKNESS*2, cv2.LINE_AA) else: cv2.putText(frame, 'System is being trained .... ', \ (100, 100), cv2.FONT_HERSHEY_SIMPLEX, \ vision_config.FONT_SIZE*2, vision_config.NEG_COLOR, \ vision_config.LINE_THICKNESS*2, cv2.LINE_AA) if flag_take_photo: if time.time() - timer >= 0.1: timer = time.time() time_take_photo = max(0, time_take_photo - 0.1) time_take_photo = round(time_take_photo, 1) if timer - tim_elapsed >= 0.4 and time_take_photo > 0: tim_elapsed = time.time() training_bbox_faces, training_img_faces = Vision.face_in_training_area(frame, bbox_faces, \ train_area) if len(training_bbox_faces) == 1: bbox_list_online.append(training_bbox_faces[0]) img_list_online.append(training_img_faces[0]) logging.info('Take successfully') else: logging.info('No face') if time_take_photo == 0: logging.info('Time_take_photo out') flag_take_photo = False if len(bbox_list_online) == 0: info_pack = None bbox_list_online.clear() img_list_online.clear() flag_training_online = False name_interface = None time_take_photo = 2 if flag_take_photo == False and len(bbox_list_online) > 0: learning.online_learning(bbox_list_online, img_list_online, \ info_pack, vision_object, multi_tracker, database) info_pack = None bbox_list_online.clear() img_list_online.clear() flag_training_online = False name_interface = None time_take_photo = 2 if q_identify.full(): q_identify.get() q_identify.put(frame)