Пример #1
0
 def end_ship_event(self):
     time.sleep(0.05)
     tempDetection = Detection(self.args)
     tempDetection.serialId, tempDetection.x, tempDetection.y = 0, [], []
     self.fire_and_forget()
     if not self.args["TEST"]:
         string_to_send = """
            mutation{
                endShipEvent(eventId: 
                  %d
                ){ 
                id,
                  }
                }
            """ % (self.id)
         request = requests.post(self.args["URL"],
                                 json={'query': string_to_send})
         if self.args["INFO"]:
             print(
                 f"close event {self.id} type {self.eventType} in camera {self.cameraId} in time {datetime.datetime.now()}"
             )
         if request.status_code != 200 and self.args["ERRORS"]:
             if self.args["WARNINGS"]:
                 logging.warning(
                     f"Query failed to run by returning code of {request.status_code} in end_ship_event"
                 )
     else:
         if self.args["INFO"]:
             print(
                 f"close event {self.id} type {self.eventType} in camera {self.cameraId} in time {datetime.datetime.now()}"
             )
     self.open = False
     self.published = False
     self.closedTime = time.time()
Пример #2
0
class Vision:

    def __init__(self, name_viewer):
        self.viewer = name_viewer
        self.detector_viewer = Detection(name_viewer)

    def view_screen(self, main_queue):
        # capture screen for detect monster and return position of monster
        image = np.array(ImageGrab.grab())
        image = cv.cvtColor(image, cv.COLOR_BGR2RGB)

        # model detection
        rectangles = self.detector_viewer.detect_monster(image)

        # draw detection result
        detection_image = self.detector_viewer.draw_rectangles(
            image, rectangles)

        # show picture detection
        # cv.imshow('Result', detection_image)
        # cv.waitKey()

        # put position of monster to queue
        if rectangles == ():
            print('Not found monsters')
        else:
            print('Have monsters')
            list_detect = {}
            list_detect['type'] = 'monster'
            list_detect['rectangles'] = rectangles
            main_queue.put(list_detect)
Пример #3
0
class DetectionWindow(QMainWindow):
	def __init__(self):
		super(DetectionWindow, self).__init__()	
		loadUi('UI/detection_window.ui', self)

		self.stop_detection_button.clicked.connect(self.close)

	# Created detection instance
	def create_detection_instance(self, token, location, receiver):
		self.detection = Detection(token, location, receiver)

	# Assigns detection output to the label in order to display detection output
	@pyqtSlot(QImage)
	def setImage(self, image):
		self.label_detection.setPixmap(QPixmap.fromImage(image))

	# Starts detection
	def start_detection(self):
		self.detection.changePixmap.connect(self.setImage)
		self.detection.start()
		self.show()

	# When closed
	def closeEvent(self, event):
		self.detection.running = False
		event.accept()
Пример #4
0
    def test_metrics_bad_confidence(self):
        # [2 2 10 20; 80 80 30 40]
        true1 = BoundingBox(1, 'A', (2, 2), (12, 22))
        true2 = BoundingBox(1, 'A', (80, 80), (110, 120))

        labels = [true1, true2]

        # [4 4 10 20; 50 50 30 10; 90 90 40 50];
        pred1 = BoundingBox(1, 'A', (4, 4), (14, 24), confidence=0.1)
        pred2 = BoundingBox(1, 'A', (50, 50), (80, 60), confidence=0.1)
        pred3 = BoundingBox(1, 'A', (80, 80), (110, 120), confidence=0.1)

        predictions = [pred1, pred2, pred3]

        det = Detection(labels=labels, predictions=predictions)

        pr, re, _ = det.metrics()

        self.assertEqual(pr, 0.0)
        self.assertEqual(re, 0.0)

        pr, re, _ = det.metrics(confidence_threshold=0.1)

        self.assertEqual(pr, 2.0 / 3.0)
        self.assertEqual(re, 1.000)
Пример #5
0
    def learn_face(self, nb_image, user):
        d = Detection(self.proto, self.model)
        face_images = d.get_face(nb_image, 0.8)
        # place where images to train the recognizer on are stored
        path_to_images = "temp/dataset/%s" % user
        if not os.path.exists(path_to_images):
            os.makedirs(path_to_images)
        i = 0
        for image in face_images:
            image_name = path_to_images + "/" + str(i) + ".jpg"
            i += 1
            # cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            cv2.imwrite(image_name, image)
        ExtractEmbeddings.main(self.dataset, self.embeddings, self.proto,
                               self.model, self.embedding_model)
        TrainModel.main(self.embeddings, self.recognizer, self.le)

        path_to_user_images = self.dataset + "/" + user
        shutil.rmtree(path_to_user_images)
        # zip output so it can be sent easily
        zipname = "%s_frdata.zip" % user
        output_zip = zipfile.ZipFile(zipname, 'w')
        # for folder, subfolders, files in os.walk("output"):
        #     for file in files:
        #         output_zip.write(os.path.join(folder, file), os.path.relpath(os.path.join(folder, file), "output"),
        #                          compress_type=zipfile.ZIP_DEFLATED)
        output_zip.write("output/le.pickle", "le.pickle")
        output_zip.write("output/recognizer.pickle", "recognizer.pickle")

        output_zip.close()
        return zipname
Пример #6
0
 def __init__(self):
     self.c = Communicate()
     self.d = Detection()
     self.m = Measure()
     self.msg_list = MSG_LIST  # 提示消息字典
     self.cmd_dict = {}  # 命令字典
     self._func_dict()  # 填充cmd_dict,将类方法名中后缀为'_(\d+)'的方法添加进命令字典
Пример #7
0
def main():
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.connect(('localhost', 51000))

    detecter = Detection()
    dp = Display()
    ad = Audio()
    thread = Thread(target=dp.start)
    thread.setDaemon(True)
    thread.start()

    state_before = 0
    while True:
        motor_power, target_dist = detecter.detect()

        # 走行プログラムと送信
        sock.sendall(motor_power.to_bytes(2, 'big') + target_dist.to_bytes(2, 'big'))
        byte_data = sock.recv(4)
        #print(byte_data)
        state = int.from_bytes(byte_data[:2], 'big')
        ad_flag = int.from_bytes(byte_data[2:], 'big')

        #print(state)
        if state != state_before:
            dp.changeImage(state)
            if ad_flag == 1:
                if state_before == 0:
                    ad.play(0)
                elif state == 5:
                    ad.play(1)
                elif state_before == 5:
                    ad.play(2)
        state_before = state
Пример #8
0
def main():

    img = parseInput()
    d = Detection()
    link_coord = d.run(img)
    laser_coord = getLaserCoords(img)
    print("Link Coordinates", link_coord)
    print("Laser Coordinates", laser_coord)
Пример #9
0
def go_particle():
    img     = Image()
    detect  = Detection()
    found = []
    initialState = None
    pf = None
    histograms = []
    detections = []
    hist_ref = None
    #Recoleccion
    while(True):
        img.get()

        #Face de recoleccion de información/ Aprendizaje
        if len(histograms) < 10:
            print len(histograms)
            #Detect faces
            found = detect.faces(img.image)
            if len(found) > 0:
                hist_ref = img.getColorHistogram(found[0]).ravel()
                histograms.append(hist_ref)
                img.show_hist(hist_ref)
                hist_ref=hist_ref/float(hist_ref.sum())
        else: #Aprendizaje finalizado
            #Iniciar filtro, utilizar el ultimo ROI/found como initialState
            # print len(hist_ref)

            if initialState is None:
                x,y,w,h = found[0]
                hist = img.getColorHistogram(found[0]).ravel()
                dx = random.uniform(0,5)
                dy = random.uniform(0,5)
                dw = random.uniform(0,.5)
                dh = random.uniform(0,.5)
                initialState = np.array([x,y,w,h,dx,dy,dw,dh])
                ceroState = np.random.uniform(0,img.size[1],8)
                cov = np.cov(np.asarray([initialState,ceroState]).T)
                pf  = ParticleFilter(initialState,cov,hist_ref,10.,100)
                u=np.zeros((100,4))
                for rect in pf.states:
                    rect = rect[:4]
                    img.draw_roi([rect])
                # pf.update(img)

            else: #ya se ha inicializado el filtro ahora se busca actualizar y predecir
                old=pf.states
                pf.predict(img.size[0],img.size[1],u)
                pf.update(img)
                for rect in pf.states:
                    rect = rect[:4]
                    img.draw_roi([rect])
                u=(old-pf.states)[:,-4:]
        img.show()

        if 0xFF & cv2.waitKey(5) == 27:
            break
Пример #10
0
    def __init__(self):
        self.webcam = Webcam()
        self.webcam.start()

        self.detection = Detection()

        self.x_axis = 0.0
        self.y_axis = 0.0
        self.z_axis = 0.0
        self.z_pos = -7.0
Пример #11
0
 def __init__(self, text_to_speech, speech_to_text):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
     Emotion.__init__(self)
     self.speech_to_text = speech_to_text
     self.background_image = np.array([])
     self.detection_image = np.array([])
     self.detection = Detection()
     self.reels = [None, None, None]
     self.holds = [None, None]
     self.coins = 100
Пример #12
0
def main():

    # Define class
    detection = Detection()
    user_interface = UserInterface()
    # Define var
    layout_crowd_detection = user_interface.layout_crowd_detection()
    layout_define_threshold = user_interface.layout_define_threshold()
    # Define threshold
    threshold = define_threshold(layout_define_threshold)
    # Crowd detection
    if not threshold == 'exit':
        detection.crowd_detection(threshold, layout_crowd_detection)

    print("Exit system")
Пример #13
0
    def update(self, bbox_xcycwh, confidences, ori_img,update_fg=True):
        self.height, self.width = ori_img.shape[:2]
        # generate detections
        if update_fg or not(len(bbox_xcycwh)==len(self.feature_tmp)):
            features = self._get_features(bbox_xcycwh, ori_img)
            self.feature_tmp = features
        else:
            features = self.feature_tmp
        detections = [Detection(bbox_xcycwh[i], conf, features[i]) for i,conf in enumerate(confidences) if conf>self.min_confidence]

        # run on non-maximum supression
        # boxes = np.array([d.tlwh for d in detections])
        # scores = np.array([d.confidence for d in detections])
        # indices = non_max_suppression( boxes, self.nms_max_overlap, scores)
        # detections = [detections[i] for i in indices]

        # update tracker
        self.tracker.predict()
        self.tracker.update(detections)
        # output bbox identities
        outputs = []
        for track in self.tracker.tracks:
            #print('time_s',track.time_since_update)
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            box = track.to_tlwh()
            x1,y1,x2,y2 = self._tlwh_to_xyxy(box)
            track_id = track.track_id
            fg = self.check_in_out(track)
            outputs.append(np.array([x1,y1,x2,y2,track_id,fg], dtype=np.int))
        if len(outputs) > 0:
            outputs = np.stack(outputs,axis=0)
        return outputs
Пример #14
0
    def get_objects(self, frame):
        """Converts bytes into Detection objects.
        
        Arguments:
            frame {Bytes} -- Raw data to parse
        
        Returns:
            [Detection], int -- list of Detections, frame taken time
        """
        time = int.from_bytes(frame[0:4], byteorder='little')
        size = len(frame)
        detected_objects = []
        for i in range(int(size/(5*4))):
            start_idx = 4 + 24 * i
            class_id = int.from_bytes(
                frame[start_idx:start_idx+4],  byteorder='little')
            x = int.from_bytes(
                frame[start_idx+4:start_idx+7],  byteorder='little')
            y = int.from_bytes(
                frame[start_idx+8:start_idx+11], byteorder='little')
            w = int.from_bytes(
                frame[start_idx+12:start_idx+15], byteorder='little')
            h = int.from_bytes(
                frame[start_idx+16:start_idx+19], byteorder='little')
            frame_id = int.from_bytes(
                frame[start_idx+20:start_idx+23], byteorder='little')

            class_name = self._dict.id_to_class_name(class_id)
            new_obj = Detection(x, y, w, h, time, frame_id, \
                class_name=class_name, class_id=class_id)
            detected_objects.append(new_obj)

        return detected_objects, time
def get_costs_matrix(actual_blobs, detections, threshold):
    # the costs matrix width has to be larger or equal than height
    rows_count = len(actual_blobs)

    if rows_count > len(detections):
        columns_count = rows_count
        for i in range(0, len(actual_blobs) - len(detections)):
            detections = np.append(detections, Detection())
    else:
        columns_count = len(detections)

    costs_matrix = np.zeros(shape=(rows_count, columns_count), dtype=float)

    for i, blob in enumerate(actual_blobs):
        for j, detection in enumerate(detections):
            if detection.position is None:
                costs_matrix[i][j] = INFINITE
            else:
                distance = euclidean_distance(
                    blob_center(blob), blob_center(detection.position)
                )

                costs_matrix[i][j] = \
                    distance if distance <= threshold else INFINITE

    return costs_matrix, detections
Пример #16
0
    def run(self):

        detect = Detection(
            graph=PATH_TO_GRAPH,
            labels=PATH_TO_LABELS,
            classes=NUM_CLASSES
        )
        try:
            while True:
                connection, addr = self.socket.accept()
                client_thread = threading.Thread(
                    target=self.process,
                    args=(connection, detect)
                )  # 有新的连接建立时,创建新线程
                client_thread.start()
        except:
            detect.terminate()
            print('Exceptions Occurred !')
Пример #17
0
    def test_precision_one_class(self):
        # [2 2 10 20; 80 80 30 40]
        true1 = BoundingBox(1, 'A', (2, 2), (12, 22))
        true2 = BoundingBox(1, 'A', (80, 80), (110, 120))

        labels = [true1, true2]

        # [4 4 10 20; 50 50 30 10; 90 90 40 50];
        pred1 = BoundingBox(1, 'A', (4, 4), (14, 24))
        pred2 = BoundingBox(1, 'A', (50, 50), (80, 60))
        pred3 = BoundingBox(1, 'A', (80, 80), (110, 120))

        predictions = [pred1, pred2, pred3]

        det = Detection(labels=labels, predictions=predictions)

        pr, re, _ = det.metrics()

        self.assertEqual(pr, 2.0 / 3.0)
        self.assertEqual(re, 1.000)
Пример #18
0
    def __init__(self):
        # self.state = 'start'
        self.state = 'intent'
        self.exit = False
        self.curr_msg = None
        self.menu = {}
        self.create_menu_graph()
        self.browser = 'chrome'
        # print(self.menu)
        self.spell_check_model = SpellCorrectionModel(language='en')
        self.domain_spell_check_model = SpellCorrectionModel(language='en')
        self.spell_check_model.load('en.pkl')
        self.domain_spell_check_model.load('custom_model.pkl')
        with open('data.json', "r") as f:
            self.db = json.load(f)
        f.close()
        # with open('words.json') as f:
        #     self.all_words = json.load(f)
        # f.close()

        pk_file = open('final.pkl', 'rb')
        answers = pickle.load(pk_file)
        self.answer_detector = Detection(answers=answers,
                                         detection_type='answer')

        intent_answers = []
        for each in INTENT_DATA:
            for i in INTENT_DATA[each]['data']:
                intent_answers.append(
                    (clean_text(i), INTENT_DATA[each]['number']))
        self.intent_detector = Detection(answers=intent_answers,
                                         detection_type='intent')

        port = 465  # For SSL
        smtp_server = "smtp.gmail.com"
        self.sender_email = "*****@*****.**"
        self.receiver_email = "*****@*****.**"
        password = "******"
        context = ssl.create_default_context()
        self.server = smtplib.SMTP_SSL(smtp_server, port, context=context)
        self.server.login(self.sender_email, password)
    def __init__(self):
        super(MainGUI, self).__init__()

        self.initiate_shared_variables()

        # Create detection and load model
        self.detection = Detection(shared_variables=self.shared_variables)

        self.threadpool = QThreadPool()

        print("Multithreading with maximum %d threads" %
              self.threadpool.maxThreadCount())
        """
        self.timer = QTimer()
        self.timer.setInterval(10)
        self.timer.timeout.connect(self.print_output)
        self.timer.start()
        """

        # Start Detection thread
        self.start_worker()
Пример #20
0
def convert_detections(detections, features, appearance_features,
                       detections_3d):
    detection_list = []
    if detections_3d is None:
        detections_3d = [None] * len(detections)
    for detection, feature, appearance_feature, detection_3d in zip(
            detections, features, appearance_features, detections_3d):
        x1, y1, x2, y2, conf, _, _ = detection
        box_2d = [x1, y1, x2 - x1, y2 - y1]
        if detection_3d is not None:
            x, y, z, l, w, h, theta = detection_3d
            box_3d = [x, y, z, l, w, h, theta]
        else:
            box_3d = None
        if feature is None:
            detection_list.append(
                Detection(box_2d, None, conf, appearance_feature, feature))
        else:
            detection_list.append(
                Detection(box_2d, box_3d, conf, appearance_feature, feature))

    return detection_list
Пример #21
0
 def detect(self, image, return_image=False):
     data = []
     if self.detector == None:
         ids, corners, centers, Hs = pyAprilTag.find(image)
         for i in range(len(ids)):
             # reversing corners since that's the order they were in for the older library
             corners_list = corners[i].tolist()
             corners_list.reverse()
             data.append(
                 Detection(ids[i], centers[i], corners_list,
                           util.compute_angle(corners_list)))
     else:
         data = [
             Detection(detection.tag_id, detection.center,
                       detection.corners,
                       util.compute_angle(detection.corners))
             for detection in self.detector.detect(image, False)
         ]
     data.sort(key=lambda entry: entry.tag_id)
     if return_image:
         return data, image
     else:
         return data
Пример #22
0
def detections_from_csv(csv_filepath: str) -> List[Detection]:
    """
    reads a detection list from csv file
    :param csv_filepath: csv file path
    :return: detections list
    """
    detections = list()
    with open(csv_filepath, newline='') as csvfile:
        reader = csv.DictReader(csvfile)
        for row in reader:
            top_right_coord = Coordinate(int(row[X_TOP_RIGHT_KEY]),
                                         int(row[Y_TOP_RIGHT_KEY]))
            bb = BoundingBox(top_right_coord, int(row[BB_W]), int(row[BB_H]))
            detection = Detection(bb, float(row[SCORE]))
            detections.append(detection)
    return detections
Пример #23
0
class DetectAPI():
    '''
    Description : Class ObjDetectAPI to detect the objects(cars in this case)
    '''
    def __init__(self):
        self.objdetect = Detection()

    def get_object(self, image):
        '''
        Description: Detect object from a given image
        Arguments: input image
        Returns: cropped object from the image
        '''
        logging.info('Detecting Objects')
        bboxes, centeres, objects = self.objdetect.detect_object(image)
        return bboxes, centeres, objects
Пример #24
0
    def predicted_position_as_real(self, time):
        """When no coordinates has been linked appends
        predicted position to history.

        Arguments:
            time {int} -- image download time
        """

        previous_box = self._boxes_history[-1]
        prediced_state = self._filter.x
        time = time % 0xffffffff
        box = Detection(prediced_state[0], prediced_state[2], int(prediced_state[4]),
                        int(prediced_state[5]), time, previous_box.frame_id + 1, \
                        class_name=self._class_name)
        self._boxes_history.append(box)
        self.time_since_update += 1
Пример #25
0
def runCAMprocess():
    d = Detection()

    while True:

        img_str = ""
        while True:
            data = camera_conn.recv(1024)
            img_str += data
            if "breakbreakbreak" in str(data):
                break
        print("Received Image")

        received_file = 'received_image.jpg'
        resized_file = 'resized_image.jpg'

        decoded_img = base64.b64decode(
            img_str.replace("breakbreakbreak", "") + "===")

        with open(received_file, 'wb') as f:
            f.write(decoded_img)
        img = cv2.imread(received_file)

        #resize_img = cv2.resize(img, (1280, 960))
        resize_img = cv2.resize(img, (960, 720))
        cv2.imwrite(resized_file, resize_img)

        b64_str = ""
        with open(resized_file, "rb") as imageFile:
            b64_str = base64.b64encode(imageFile.read())
        b64_str = b64_str + "breakbreakbreak"
        num_chunks = len(b64_str) / 1024
        for i in range(0, num_chunks - 1):
            web_conn.send(b64_str[1024 * i:1024 * (i + 1)])
        web_conn.send(b64_str[1024 * (num_chunks - 1):])
        print("Sent Image to Dashboard")

        location_data1, location_data2 = getLocations(d, img)
        controls_conn.send("clean" + location_data1)
        print(location_data1)
        camera_conn.send(location_data1)
        web_conn.send(location_data2)
        print("Sent Position Data to Pi 3 and Dashboard")
        print(" ")
def get_box_from_object_detection(yolo, image):
    (h, w) = image.shape[:2]
    center_point = (int(w / 2), int(h / 2))
    final_detections = []

    image = Image.fromarray(image[..., ::-1])  # bgr to rgb
    boxes, confidence, classes = yolo.detect_image(image)

    detections = [
        Detection(bbox, confidence, cls)
        for bbox, confidence, cls in zip(boxes, confidence, classes)
    ]

    # Run non-maxima suppression.
    boxes = np.array([d.tlwh for d in detections])
    scores = np.array([d.confidence for d in detections])
    classes = np.array([d.cls for d in detections])
    indices = non_max_suppression(boxes, 1.0, scores)
    detections = [detections[i] for i in indices]

    # cv2.imshow("Bounding Box", cv2.resize(after_registration_object, (800, 500)))
    # cv2.waitKey(1)

    if len(detections) == 0:
        print("No human object detection!")
        return None

    for detection in detections:
        start_x, _, end_x, _ = detection.to_tlbr()

        # Registration only on the center area of image
        if end_x < center_point[0] or start_x > center_point[0]:
            continue
        final_detections.append(detection)

    # get biggest bounding boxes (closest object)
    if len(final_detections) >= 1:
        final_detections = sorted(final_detections,
                                  key=sort_by_bb_area,
                                  reverse=True)
    else:
        return None

    return final_detections[0].tlwh
Пример #27
0
def load_detections(dataset, detector, boat_class, min_conf):
    text_file_path = "detections_no_desc/%s/%s.txt" % (dataset, detector)
    f = open(text_file_path, "r")
    line = f.readline()
    detections = {}
    comps = []
    while (line):

        line = line.replace("\n", "")
        comps = line.split(",")

        if (int(comps[2]) == boat_class and float(comps[3]) > min_conf):

            if (not comps[0] in detections):
                detections[comps[0]] = []
            if (not (int(comps[4]) > 270 and int(comps[4]) < 740
                     and int(comps[5]) > 540 and int(comps[6]) > 580)):

                detections[comps[0]].append(
                    Detection(comps[3], comps[4:8], comps[8:]))

        line = f.readline()

    f.close()
    detections_after = {}
    for k in detections.keys():
        cur = detections[k]
        detections_after[k] = []
        #print('there was ',len(cur),' detections')
        for item in cur:
            contained = False
            for item2 in cur:
                overlap = get_overlap_to_self(item, item2)

                if (overlap > 0.5):
                    #print('overlap is ',overlap,' discarding...')
                    contained = True
                    break
            if (contained == False):
                detections_after[k].append(item.copy())
        #print('now there are ', len(detections_after[k]),' detections')
    return detections_after
    def convert_single(self, detected_object, frame_millis):
        """Converts single DetectedObject object into Detection.
        
        Arguments:
            detected_object {DetectedObject} -- Object to convert.
            frame_millis {int} -- Analyzed picture taken time.
        
        Returns:
            Detection -- Target object.
        """
        frame_millis = frame_millis % 0xffffffff

        return Detection(      \
            detected_object.x, \
            detected_object.y, \
            detected_object.w, \
            detected_object.h, \
            frame_millis,      \
            detected_object.frame_id, \
            class_name=detected_object.class_name, \
            class_id = detected_object.class_id)
Пример #29
0
def setup():

    global camera_conn, camera_addr, controls_conn, controls_addr, web_conn, web_addr

    camera_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    controls_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    web_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    print('Sockets created.')

    camera_socket.bind((HOST, CAMERA_PORT))
    controls_socket.bind((HOST, CONTROLS_PORT))
    web_socket.bind((HOST, WEB_PORT))
    print('Socket binds complete.')

    camera_socket.listen(10)
    controls_socket.listen(10)
    web_socket.listen(10)
    print('Sockets now listening.')

    camera_conn, camera_addr = camera_socket.accept()
    controls_conn, controls_addr = controls_socket.accept()
    web_conn, web_addr = web_socket.accept()

    d = Detection()
Пример #30
0
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import cv2
from detection import Detection

sign_detect = Detection()
stream = cv2.VideoCapture(0)
fps = FPS().start()

while True:
	
	(grabbed, frame) = stream.read()
	if not grabbed:
		break
	sign_detect.signDetected(frame)
	cv2.imshow("Frame", frame)
	if cv2.waitKey(10) == 27:
                break
	fps.update()

fps.stop()
stream.release()
cv2.destroyAllWindows()
Пример #31
0
    def create_network(self,
                       net_info,
                       input_width=None,
                       input_height=None,
                       input_channels=None):
        models = OrderedDict()
        blob_channels = dict()
        blob_width = dict()
        blob_height = dict()

        layers = net_info['layers']
        props = net_info['props']
        layer_num = len(layers)

        blob_channels['data'] = 3
        if input_channels != None:
            blob_channels['data'] = input_channels
        blob_height['data'] = 1
        if input_height != None:
            blob_height['data'] = input_height
        blob_width['data'] = 1
        if input_width != None:
            blob_width['data'] = input_width
        if props.has_key('input_shape'):
            blob_channels['data'] = int(props['input_shape']['dim'][1])
            blob_height['data'] = int(props['input_shape']['dim'][2])
            blob_width['data'] = int(props['input_shape']['dim'][3])

            self.width = int(props['input_shape']['dim'][3])
            self.height = int(props['input_shape']['dim'][2])
        elif props.has_key('input_dim'):
            blob_channels['data'] = int(props['input_dim'][1])
            blob_height['data'] = int(props['input_dim'][2])
            blob_width['data'] = int(props['input_dim'][3])

            self.width = int(props['input_dim'][3])
            self.height = int(props['input_dim'][2])

        if input_width != None and input_height != None:
            blob_width['data'] = input_width
            blob_height['data'] = input_height
            self.width = input_width
            self.height = input_height

        i = 0
        while i < layer_num:
            layer = layers[i]
            lname = layer['name']
            if layer.has_key('include') and layer['include'].has_key('phase'):
                phase = layer['include']['phase']
                lname = lname + '.' + phase
            ltype = layer['type']
            tname = layer['top']
            if ltype in ['Data', 'AnnotatedData']:
                if not self.omit_data_layer:
                    models[lname] = CaffeData(layer.copy())
                    data, label = models[lname].forward()
                    data_name = tname[0] if type(tname) == list else tname
                    blob_channels[data_name] = data.size(
                        1)  # len(layer['transform_param']['mean_value'])
                    blob_height[data_name] = data.size(
                        2
                    )  #int(layer['transform_param']['resize_param']['height'])
                    blob_width[data_name] = data.size(
                        3
                    )  #int(layer['transform_param']['resize_param']['width'])
                    self.height = blob_height[data_name]
                    self.width = blob_width[data_name]
                i = i + 1
                continue
            bname = layer['bottom']
            if ltype == 'Convolution':
                convolution_param = layer['convolution_param']
                channels = blob_channels[bname]
                out_filters = int(convolution_param['num_output'])
                kernel_size = int(convolution_param['kernel_size'])
                stride = int(convolution_param['stride']
                             ) if convolution_param.has_key('stride') else 1
                pad = int(convolution_param['pad']
                          ) if convolution_param.has_key('pad') else 0
                group = int(convolution_param['group']
                            ) if convolution_param.has_key('group') else 1
                dilation = 1
                if convolution_param.has_key('dilation'):
                    dilation = int(convolution_param['dilation'])
                bias = True
                if convolution_param.has_key(
                        'bias_term'
                ) and convolution_param['bias_term'] == 'false':
                    bias = False
                models[lname] = nn.Conv2d(channels,
                                          out_filters,
                                          kernel_size=kernel_size,
                                          stride=stride,
                                          padding=pad,
                                          dilation=dilation,
                                          groups=group,
                                          bias=bias)
                blob_channels[tname] = out_filters
                blob_width[tname] = (blob_width[bname] + 2 * pad -
                                     kernel_size) / stride + 1
                blob_height[tname] = (blob_height[bname] + 2 * pad -
                                      kernel_size) / stride + 1
                i = i + 1
            elif ltype == 'BatchNorm':
                momentum = 0.9
                if layer.has_key('batch_norm_param') and layer[
                        'batch_norm_param'].has_key('moving_average_fraction'):
                    momentum = float(
                        layer['batch_norm_param']['moving_average_fraction'])
                channels = blob_channels[bname]
                models[lname] = nn.BatchNorm2d(channels,
                                               momentum=momentum,
                                               affine=False)
                blob_channels[tname] = channels
                blob_width[tname] = blob_width[bname]
                blob_height[tname] = blob_height[bname]
                i = i + 1
            elif ltype == 'Scale':
                channels = blob_channels[bname]
                models[lname] = Scale(channels)
                blob_channels[tname] = channels
                blob_width[tname] = blob_width[bname]
                blob_height[tname] = blob_height[bname]
                i = i + 1
            elif ltype == 'ReLU':
                inplace = (bname == tname)
                if layer.has_key('relu_param') and layer['relu_param'].has_key(
                        'negative_slope'):
                    negative_slope = float(
                        layer['relu_param']['negative_slope'])
                    models[lname] = nn.LeakyReLU(negative_slope=negative_slope,
                                                 inplace=inplace)
                else:
                    models[lname] = nn.ReLU(inplace=inplace)
                blob_channels[tname] = blob_channels[bname]
                blob_width[tname] = blob_width[bname]
                blob_height[tname] = blob_height[bname]
                i = i + 1
            elif ltype == 'Pooling':
                kernel_size = int(layer['pooling_param']['kernel_size'])
                stride = int(layer['pooling_param']['stride'])
                padding = 0
                if layer['pooling_param'].has_key('pad'):
                    padding = int(layer['pooling_param']['pad'])
                pool_type = layer['pooling_param']['pool']
                if pool_type == 'MAX':
                    models[lname] = nn.MaxPool2d(kernel_size,
                                                 stride,
                                                 padding=padding,
                                                 ceil_mode=True)
                elif pool_type == 'AVE':
                    models[lname] = nn.AvgPool2d(kernel_size,
                                                 stride,
                                                 padding=padding,
                                                 ceil_mode=True)

                blob_width[tname] = int(
                    math.ceil((blob_width[bname] + 2 * padding - kernel_size) /
                              float(stride))) + 1
                blob_height[tname] = int(
                    math.ceil(
                        (blob_height[bname] + 2 * padding - kernel_size) /
                        float(stride))) + 1
                blob_channels[tname] = blob_channels[bname]
                i = i + 1
            elif ltype == 'Eltwise':
                operation = 'SUM'
                if layer.has_key('eltwise_param') and layer[
                        'eltwise_param'].has_key('operation'):
                    operation = layer['eltwise_param']['operation']
                bname0 = bname[0]
                bname1 = bname[1]
                models[lname] = Eltwise(operation)
                blob_channels[tname] = blob_channels[bname0]
                blob_width[tname] = blob_width[bname0]
                blob_height[tname] = blob_height[bname0]
                i = i + 1
            elif ltype == 'InnerProduct':
                filters = int(layer['inner_product_param']['num_output'])
                if blob_width[bname] != -1 or blob_height[bname] != -1:
                    channels = blob_channels[bname] * blob_width[
                        bname] * blob_height[bname]
                    models[lname] = nn.Sequential(FCView(),
                                                  nn.Linear(channels, filters))
                else:
                    channels = blob_channels[bname]
                    models[lname] = nn.Linear(channels, filters)
                blob_channels[tname] = filters
                blob_width[tname] = 1
                blob_height[tname] = 1
                i = i + 1
            elif ltype == 'Dropout':
                channels = blob_channels[bname]
                dropout_ratio = float(layer['dropout_param']['dropout_ratio'])
                models[lname] = nn.Dropout(dropout_ratio, inplace=True)
                blob_channels[tname] = blob_channels[bname]
                blob_width[tname] = blob_width[bname]
                blob_height[tname] = blob_height[bname]
                i = i + 1
            elif ltype == 'Normalize':
                channels = blob_channels[bname]
                scale = float(layer['norm_param']['scale_filler']['value'])
                models[lname] = Normalize(channels, scale)
                blob_channels[tname] = blob_channels[bname]
                blob_width[tname] = blob_width[bname]
                blob_height[tname] = blob_height[bname]
                i = i + 1
            elif ltype == 'LRN':
                local_size = int(layer['lrn_param']['local_size'])
                alpha = float(layer['lrn_param']['alpha'])
                beta = float(layer['lrn_param']['beta'])
                models[lname] = LRN(local_size, alpha, beta)
                blob_channels[tname] = blob_channels[bname]
                blob_width[tname] = blob_width[bname]
                blob_height[tname] = blob_height[bname]
                i = i + 1
            elif ltype == 'Permute':
                orders = layer['permute_param']['order']
                order0 = int(orders[0])
                order1 = int(orders[1])
                order2 = int(orders[2])
                order3 = int(orders[3])
                models[lname] = Permute(order0, order1, order2, order3)
                shape = [
                    1, blob_channels[bname], blob_height[bname],
                    blob_width[bname]
                ]
                blob_channels[tname] = shape[order1]
                blob_height[tname] = shape[order2]
                blob_width[tname] = shape[order3]
                i = i + 1
            elif ltype == 'Flatten':
                axis = int(layer['flatten_param']['axis'])
                models[lname] = Flatten(axis)
                blob_channels[tname] = blob_channels[bname] * blob_width[
                    bname] * blob_height[bname]
                blob_width[tname] = 1
                blob_height[tname] = 1
                i = i + 1
            elif ltype == 'Slice':
                axis = int(layer['slice_param']['axis'])
                assert (axis == 1)
                assert (type(tname) == list)
                slice_points = layer['slice_param']['slice_point']
                assert (type(slice_points) == list)
                assert (len(slice_points) == len(tname) - 1)
                slice_points = [int(s) for s in slice_points]
                shape = [
                    1, blob_channels[bname], blob_height[bname],
                    blob_width[bname]
                ]
                slice_points.append(shape[axis])
                models[lname] = Slice(axis, slice_points)
                prev = 0
                for idx, tn in enumerate(tname):
                    blob_channels[tn] = slice_points[idx] - prev
                    blob_width[tn] = blob_width[bname]
                    blob_height[tn] = blob_height[bname]
                    prev = slice_points[idx]
                i = i + 1
            elif ltype == 'Concat':
                axis = 1
                if layer.has_key('concat_param'
                                 ) and layer['concat_param'].has_key('axis'):
                    axis = int(layer['concat_param']['axis'])
                models[lname] = Concat(axis)
                if axis == 1:
                    blob_channels[tname] = 0
                    for bn in bname:
                        blob_channels[tname] += blob_channels[bn]
                        blob_width[tname] = blob_width[bn]
                        blob_height[tname] = blob_height[bn]
                elif axis == 2:
                    blob_channels[tname] = blob_channels[bname[0]]
                    blob_width[tname] = 1
                    blob_height[tname] = 0
                    for bn in bname:
                        blob_height[tname] += blob_height[bn]
                i = i + 1
            elif ltype == 'PriorBox':
                min_size = float(layer['prior_box_param']['min_size'])
                max_size = -1
                if layer['prior_box_param'].has_key('max_size'):
                    max_size = float(layer['prior_box_param']['max_size'])
                aspects = []
                if layer['prior_box_param'].has_key('aspect_ratio'):
                    print(layer['prior_box_param']['aspect_ratio'])
                    aspects = layer['prior_box_param']['aspect_ratio']
                    aspects = [float(aspect) for aspect in aspects]
                clip = (layer['prior_box_param']['clip'] == 'true')
                flip = False
                if layer['prior_box_param'].has_key('flip'):
                    flip = (layer['prior_box_param']['flip'] == 'true')
                step = int(layer['prior_box_param']['step'])
                offset = float(layer['prior_box_param']['offset'])
                variances = layer['prior_box_param']['variance']
                variances = [float(v) for v in variances]
                models[lname] = PriorBox(min_size, max_size, aspects, clip,
                                         flip, step, offset, variances)
                blob_channels[tname] = 1
                blob_width[tname] = 1
                blob_height[tname] = 1
                i = i + 1
            elif ltype == 'DetectionOutput':
                num_classes = int(
                    layer['detection_output_param']['num_classes'])
                bkg_label = int(
                    layer['detection_output_param']['background_label_id'])
                top_k = int(
                    layer['detection_output_param']['nms_param']['top_k'])
                keep_top_k = int(layer['detection_output_param']['keep_top_k'])
                conf_thresh = float(
                    layer['detection_output_param']['confidence_threshold'])
                nms_thresh = float(layer['detection_output_param']['nms_param']
                                   ['nms_threshold'])
                models[lname] = Detection(num_classes, bkg_label, top_k,
                                          conf_thresh, nms_thresh, keep_top_k)
                blob_channels[tname] = 1
                blob_width[tname] = 1
                blob_height[tname] = 1
                i = i + 1
            elif ltype == 'MultiBoxLoss':
                num_classes = int(layer['multibox_loss_param']['num_classes'])
                overlap_threshold = float(
                    layer['multibox_loss_param']['overlap_threshold'])
                prior_for_matching = layer['multibox_loss_param'][
                    'use_prior_for_matching'] == 'true'
                bkg_label = int(
                    layer['multibox_loss_param']['background_label_id'])
                neg_mining = True
                neg_pos = float(layer['multibox_loss_param']['neg_pos_ratio'])
                neg_overlap = float(
                    layer['multibox_loss_param']['neg_overlap'])
                models[lname] = MultiBoxLoss(num_classes,
                                             overlap_threshold,
                                             prior_for_matching,
                                             bkg_label,
                                             neg_mining,
                                             neg_pos,
                                             neg_overlap,
                                             use_gpu=True)
                blob_channels[tname] = 1
                blob_width[tname] = 1
                blob_height[tname] = 1
                i = i + 1
            elif ltype == 'Crop':
                axis = int(layer['crop_param']['axis'])
                offset = int(layer['crop_param']['offset'])
                models[lname] = Crop(axis, offset)
                blob_channels[tname] = blob_channels[bname[0]]
                blob_width[tname] = blob_width[bname[0]]
                blob_height[tname] = blob_height[bname[0]]
                i = i + 1
            elif ltype == 'Deconvolution':
                #models[lname] = nn.UpsamplingBilinear2d(scale_factor=2)
                #models[lname] = nn.Upsample(scale_factor=2, mode='bilinear')
                in_channels = blob_channels[bname]
                out_channels = int(layer['convolution_param']['num_output'])
                group = int(layer['convolution_param']['group'])
                kernel_w = int(layer['convolution_param']['kernel_w'])
                kernel_h = int(layer['convolution_param']['kernel_h'])
                stride_w = int(layer['convolution_param']['stride_w'])
                stride_h = int(layer['convolution_param']['stride_h'])
                pad_w = int(layer['convolution_param']['pad_w'])
                pad_h = int(layer['convolution_param']['pad_h'])
                kernel_size = (kernel_h, kernel_w)
                stride = (stride_h, stride_w)
                padding = (pad_h, pad_w)
                bias_term = layer['convolution_param']['bias_term'] != 'false'
                models[lname] = nn.ConvTranspose2d(in_channels,
                                                   out_channels,
                                                   kernel_size=kernel_size,
                                                   stride=stride,
                                                   padding=padding,
                                                   groups=group,
                                                   bias=bias_term)
                blob_channels[tname] = out_channels
                blob_width[tname] = 2 * blob_width[bname]
                blob_height[tname] = 2 * blob_height[bname]
                i = i + 1
            elif ltype == 'Reshape':
                reshape_dims = layer['reshape_param']['shape']['dim']
                reshape_dims = [int(item) for item in reshape_dims]
                models[lname] = Reshape(reshape_dims)
                blob_channels[tname] = 1
                blob_width[tname] = 1
                blob_height[tname] = 1
                i = i + 1
            elif ltype == 'Softmax':
                axis = 1
                if layer.has_key('softmax_param'
                                 ) and layer['softmax_param'].has_key('axis'):
                    axis = int(layer['softmax_param']['axis'])
                models[lname] = Softmax(axis)
                blob_channels[tname] = blob_channels[bname]
                blob_width[tname] = 1
                blob_height[tname] = 1
                i = i + 1
            elif ltype == 'Accuracy':
                models[lname] = Accuracy()
                blob_channels[tname] = 1
                blob_width[tname] = 1
                blob_height[tname] = 1
                i = i + 1
            elif ltype == 'SoftmaxWithLoss':
                models[lname] = SoftmaxWithLoss()
                blob_channels[tname] = 1
                blob_width[tname] = 1
                blob_height[tname] = 1
                i = i + 1
            else:
                print('create_network: unknown type #%s#' % ltype)
                i = i + 1
            input_width = blob_width[bname] if type(
                bname) != list else blob_width[bname[0]]
            input_height = blob_height[bname] if type(
                bname) != list else blob_height[bname[0]]
            input_channels = blob_channels[bname] if type(
                bname) != list else blob_channels[bname[0]]
            output_width = blob_width[tname] if type(
                tname) != list else blob_width[tname[0]]
            output_height = blob_height[tname] if type(
                tname) != list else blob_height[tname[0]]
            output_channels = blob_channels[tname] if type(
                tname) != list else blob_channels[tname[0]]
            print('create %-20s (%4d x %4d x %4d) -> (%4d x %4d x %4d)' %
                  (lname, input_channels, input_height, input_width,
                   output_channels, output_height, output_width))

        return models
Пример #32
0
def compare_distributions():
    img     = Image()
    detect  = Detection()
    histograms = []
    detections = []
    while(True):
        img.get()
        #Detect faces
        found = detect.faces(img.image)
        if len(found) > 0:
            # Set reference histogram from roi
            # roi is the first face detected assuming one face in the portview
            hist_ref = img.getColorHistogram(found[0]).ravel()
            histograms.append(hist_ref)
            detections.append(found)
            img.show_hist(hist_ref)
            hist_ref=hist_ref/float(hist_ref.sum())
        if 0xFF & cv2.waitKey(5) == 27:
            break
    print np.asmatrix(histograms).shape
    alpha_hat,it= fit_betabinom_minka_alternating(histograms)

    gauss_data_f = []
    gauss_data_n = []
    polya_data_f = []
    polya_data_n = []
    while(True):
        img.get()
        found = detect.faces(img.image)
        if len(found) > 0:
            img.draw_roi([found[0]])
            hist=img.getColorHistogram(found[0]).ravel()
            img.show_hist(hist)
            hist_norm=hist/float(hist.sum())
            #For face
            D=bhattacharyya(hist_ref,hist_norm)
            G=np.exp(-20.*D**2)
            P=np.exp(log_like_polya(alpha_hat,np.array([hist])))

            #FOr not face

            x = 0#random.sample([random.uniform(0,img.size[0]) for i in range(100)],1)[0]
            y = 0#random.sample([random.uniform(0,img.size[1]) for i in range(100)],1)[0]
            w = found[0][2]#random.sample([random.uniform(0,200) for i in range(100)],1)[0]
            h = found[0][3]#random.sample([random.uniform(0,200) for i in range(100)],1)[0]
            rect = (x,y,w,h)

            hist = img.getColorHistogram(rect).ravel()
            img.show_hist(hist,'NO FACE')
            hist_norm=hist/float(hist.sum())
            D2=bhattacharyya(hist_ref,hist_norm)
            G2=np.exp(-20.*D2**2)
            P2=np.exp(log_like_polya(alpha_hat,np.array([hist])))
            print 'B={0}, B={1} | G={2}, G={3} | P={4}, P={5}'.format(D,D2,G,G2,P,P2)
            gauss_data_n.append(G2)
            gauss_data_f.append(G)
            polya_data_n.append(P2)
            polya_data_f.append(P)
        img.show()
        if 0xFF & cv2.waitKey(5) == 27:
            cv2.destroyAllWindows()
            break
    gauss_data_f = np.asarray(gauss_data_f)        
    gauss_data_f = gauss_data_f[~np.isnan(gauss_data_f)]
    gauss_data_n = np.asarray(gauss_data_n)
    gauss_data_n = gauss_data_n[~np.isnan(gauss_data_n)]
    d            =  np.concatenate((gauss_data_f,gauss_data_n))
    v = np.concatenate(( np.ones(len(gauss_data_f)), np.zeros(len(gauss_data_n))))
    
    fpr, tpr, thresholds = roc_curve(v, d)
    roc_auc = auc(fpr, tpr)
    print("Area under the ROC curve : %f" % roc_auc)
    
    polya_data_f = np.asarray(polya_data_f)        
    polya_data_f = polya_data_f[~np.isnan(polya_data_f)] #Remove Nan elements
    polya_data_n = np.asarray(polya_data_n)
    polya_data_n = polya_data_n[~np.isnan(polya_data_n)] #Remove Nan elements
    d            =  np.concatenate((polya_data_f,polya_data_n))
    v = np.concatenate(( np.ones(len(polya_data_f)), np.zeros(len(polya_data_n))))
    fpr2, tpr2, thresholds = roc_curve(v, d)
    roc_auc2 = auc(fpr2, tpr2)
    print("Area under the ROC curve : %f" % roc_auc2)
    
    pl.subplot(212)
    pl.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
    pl.plot([0, 1], [0, 1], 'k--')
    pl.xlim([0.0, 1.0])
    pl.ylim([0.0, 1.0])
    pl.xlabel('False Positive Rate')
    pl.ylabel('True Positive Rate')
    pl.title('ROC for Gaussian')
    pl.subplot(211)
    pl.plot(fpr2, tpr2, label='ROC curve (area = %0.2f)' % roc_auc2)
    pl.plot([0, 1], [0, 1], 'k--')
    pl.xlim([0.0, 1.0])
    pl.ylim([0.0, 1.0])
    pl.xlabel('False Positive Rate')
    pl.ylabel('True Positive Rate')
    pl.title('ROC for Polya')
    pl.show()
Пример #33
0
class Vision:

    def __init__(self, pitch_num, stdout, reset_pitch_size, reset_thresh,
                 scale, colour_order, render_tlayers, file_input=None):
        self.running = True
        self.connected = False
        self.scale = scale
        self.stdout = stdout
        self._logger = Logger('vision_errors.log')

        if file_input is None:
            self.cam = Camera(prop_set = {"width": 720, "height": 540})
        else:
            file_type = 'video'
            if file_input.endswith(('jpg', 'png')):
                file_type = 'image'
            self.cam = VirtualCamera(file_input, file_type)

        try:
            calibration_path = os.path.join('calibration', 'pitch{0}'.format(pitch_num))
            self.cam.loadCalibration(os.path.join(sys.path[0], calibration_path))
        except TypeError:
            error_msg = 'Calibration file not found.'
            self._logger.log(error_msg)
            print error_msg

        self.cropper = Cropper(pitch_num=pitch_num, reset_pitch=reset_pitch_size)
        self.processor = Processor(pitch_num, reset_pitch_size, reset_thresh, scale)
        if self.cropper.is_ready():
            self.gui = Gui(self.cropper.pitch_size)
        else:
            self.gui = Gui()
        self.threshold_gui = ThresholdGui(self.processor, self.gui, pitch_num = pitch_num)
        self.detection = Detection(self.gui, self.processor, colour_order, scale, pitch_num,
                                   render_tlayers=render_tlayers)
        self.event_handler = self.gui.get_event_handler()
        self.event_handler.add_listener('q', self.quit)

        while self.running:
            try:
                if not self.stdout:
                    self.connect()
                else:
                    self.connected = True
                if self.cropper.is_ready():
                    #self.output_pitch_size()
                    self.detection.set_coord_rect(self.cropper.get_coord_rect())
                    self.detection.set_pitch_dims(self.cropper.pitch_size)
                    self.processor.set_crop_rect(self.cropper.get_crop_rect())
                    self.gui.set_show_mouse(False)
                else:
                    self.event_handler.set_click_listener(self.set_next_pitch_corner)
                while self.running:
                    self.process_frame()
            except socket.error:
                self.connected = False
                # If the rest of the system is not up yet/gets quit,
                # just wait for it to come available.
                time.sleep(1)
                error_msg = 'Connection error, sleeping 1s...' 
                self._logger.log(error_msg)
                print error_msg
                self.process_frame()

        if not self.stdout:
            self.socket.close()

    def process_frame(self):
        """Get frame, detect objects and display frame
        """
        # This is where calibration comes in
        if self.cam.getCameraMatrix is None:
            frame = self.cam.getImage()
        else:
            frame = self.cam.getImageUndistort()

        self.processor.preprocess(frame, self.scale)
        if self.cropper.is_ready():
            self.gui.update_layer('raw', self.processor.get_bgr_frame())
        else:
            self.gui.update_layer('raw', frame)

        if self.cropper.is_ready():
            entities = self.detection.detect_objects()
            self.output_entities(entities)

        self.gui.process_update()

    def set_next_pitch_corner(self, where):

        self.cropper.set_point(where)

        if self.cropper.is_ready():
            #self.output_pitch_size()
            self.processor.set_crop_rect(self.cropper.get_crop_rect())
            self.detection.set_pitch_dims(self.cropper.pitch_size)
            self.detection.set_coord_rect(self.cropper.get_coord_rect())
            self.gui.draw_crosshair(self.cropper.get_coord_rect()[0], 'corner1')
            self.gui.draw_crosshair(self.cropper.get_coord_rect()[1], 'corner2')
            self.cropper.get_coord_rect()[0]
            self.gui.set_show_mouse(False)
            self.gui.update_layer('corner', None)
        else:
            self.gui.draw_crosshair(where, 'corner')

    def output_pitch_size(self):
        self.send('{0} {1}\n'.format(PITCH_SIZE_BIT, self.processor.pitch_points_string))
        

    def output_entities(self, entities):

        if not self.connected or not self.cropper.is_ready():
            return

        self.send('{0} '.format(ENTITY_BIT))

        for i in range(0, 4):
            entity = entities[i]
            x, y = entity.get_coordinates()
            angle = -1 if entity.get_angle() is None else entity.get_angle()
            self.send('{0} {1} {2} '.format(x, y, angle))

        x, y = entities[BALL].get_coordinates()
        self.send('{0} {1} '.format(x, y))
        self.send(str(int(time.time() * 1000)) + "\n")

    def send(self, string):
        if self.stdout:
            sys.stdout.write(string)
        else:
            self.socket.send(string)

    def connect(self):
        print('Attempting to connect...')
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.connect((HOST, PORT))
        self.connected = True
        print('Successfully connected.')

    def quit(self):
        self.running = False
Пример #34
0
    def __init__(self, pitch_num, stdout, reset_pitch_size, reset_thresh,
                 scale, colour_order, render_tlayers, file_input=None):
        self.running = True
        self.connected = False
        self.scale = scale
        self.stdout = stdout
        self._logger = Logger('vision_errors.log')

        if file_input is None:
            self.cam = Camera(prop_set = {"width": 720, "height": 540})
        else:
            file_type = 'video'
            if file_input.endswith(('jpg', 'png')):
                file_type = 'image'
            self.cam = VirtualCamera(file_input, file_type)

        try:
            calibration_path = os.path.join('calibration', 'pitch{0}'.format(pitch_num))
            self.cam.loadCalibration(os.path.join(sys.path[0], calibration_path))
        except TypeError:
            error_msg = 'Calibration file not found.'
            self._logger.log(error_msg)
            print error_msg

        self.cropper = Cropper(pitch_num=pitch_num, reset_pitch=reset_pitch_size)
        self.processor = Processor(pitch_num, reset_pitch_size, reset_thresh, scale)
        if self.cropper.is_ready():
            self.gui = Gui(self.cropper.pitch_size)
        else:
            self.gui = Gui()
        self.threshold_gui = ThresholdGui(self.processor, self.gui, pitch_num = pitch_num)
        self.detection = Detection(self.gui, self.processor, colour_order, scale, pitch_num,
                                   render_tlayers=render_tlayers)
        self.event_handler = self.gui.get_event_handler()
        self.event_handler.add_listener('q', self.quit)

        while self.running:
            try:
                if not self.stdout:
                    self.connect()
                else:
                    self.connected = True
                if self.cropper.is_ready():
                    #self.output_pitch_size()
                    self.detection.set_coord_rect(self.cropper.get_coord_rect())
                    self.detection.set_pitch_dims(self.cropper.pitch_size)
                    self.processor.set_crop_rect(self.cropper.get_crop_rect())
                    self.gui.set_show_mouse(False)
                else:
                    self.event_handler.set_click_listener(self.set_next_pitch_corner)
                while self.running:
                    self.process_frame()
            except socket.error:
                self.connected = False
                # If the rest of the system is not up yet/gets quit,
                # just wait for it to come available.
                time.sleep(1)
                error_msg = 'Connection error, sleeping 1s...' 
                self._logger.log(error_msg)
                print error_msg
                self.process_frame()

        if not self.stdout:
            self.socket.close()
Пример #35
0
def detect_class(projdir, detmodel, classmodel, detsession, clssession, dataset, detargs, classargs):
    
    ##### Only testing needed
    
    # FOLDER VARIABLES
    detsessiondir = projdir + 'nets/' + detmodel + '_' + detsession + '/'
    clssessiondir = projdir + 'nets/' + classmodel + '_' + clssession + '/'
    resultsdir = projdir + 'testresults/' + detmodel + '_' + classmodel +  '_' + clssession + '/'
    datadir = projdir + 'data/' + dataset
    test_dir = datadir+'/testing'
    
    if not os.path.exists(resultsdir):
        os.mkdir(resultsdir)
        
    
    # Detmodel init
    #
    detgraph = tf.Graph()
    with detgraph.as_default():
        detsess = tf.Session()
        detx = tf.placeholder("float", shape=[None, None, None, 3])
        #xsize = tf.placeholder(tf.int32, shape=[2])
        dety = network(detx,detmodel, detargs)
        
        detsaver = tf.train.Saver()
        if os.path.isfile(detsessiondir+'checkpoint'):
            detsaver.restore(detsess, tf.train.latest_checkpoint(detsessiondir))
        else:
            print 'Detection model not pretrained'
    
    
    clsgraph = tf.Graph()
    with clsgraph.as_default():
        clssess = tf.Session()
        
        # Classmodel init
        clsx = tf.placeholder("float", shape=[None, None, None, 3])
        #xsize = tf.placeholder(tf.int32, shape=[2])
        clsy = network(clsx, classmodel, classargs)    
        clssaver = tf.train.Saver()
        if os.path.isfile(clssessiondir+'checkpoint'):
            clssaver.restore(clssess, tf.train.latest_checkpoint(clssessiondir))
        else:
            print 'Classification model not pretrained'
         
    
    # Restore models with saver
    
    
    
    detobj = Detection()
    #clsobj = Classification()
    
    # Read test data
    det_data, filenames = detobj.read_testing_sets(test_dir)
    testdata = preprocess(det_data.testdata)
    
    #init classifications structure
    print 'GO'
    # for each test image:
    for j in range(testdata.shape[0]):
        print j
        classifications = {}
    
        for i in range(classargs['nouts']):
            classifications[i] = ([],[])
        #imagename = str(j) + '.jpg'        
        
        # run detection net on test image
        with detgraph.as_default():
            detres = detsess.run([dety], feed_dict={detx: testdata[j:j+1]})
        image = detres[0][0,:,:,0]
        image = scipy.ndimage.gaussian_filter(image, sigma=(1, 1), order=0)
        #fig = plt.figure(frameon=False)
        #ax = fig.add_subplot(111)
        #ax.imshow(image, aspect='normal')
        #fig.savefig(resultsdir+str(j)+'_detout.jpg')
        plt.imsave(resultsdir+filenames[j]+'_detout.jpg', image)
        #plt.clf()
        # put detections on image
        nmy, nmx = detection.nonmaxsuppresion(image)
        #print detargs['patchsize']
        patches = detection.getpatches(nmy, nmx, testdata[j], detargs['patchsize'])
        #patches = detection.getpatches(nmy, nmx, det_data.testdata[j], detargs['patchsize'])
        # for each detection in image:
        for i in range(len(nmy)):
            # extract patch around detection
            patch = patches[(nmy[i],nmx[i])]
            #patch = preprocess(np.expand_dims(patch,0))
            patch = np.expand_dims(patch,0)
            # run classification net on patch
            with clsgraph.as_default():
                clssaver.restore(clssess, tf.train.latest_checkpoint(clssessiondir))
                clsres = clssess.run([clsy], feed_dict={clsx: patch})
            classification = np.argmax(clsres[0], 1)[0]
            #print clsres[0]
            ylist, xlist = classifications[classification]
            ylist.append(nmy[i])
            xlist.append(nmx[i])
            classifications[classification] = (ylist,xlist)
            
            # save coordinates, class and test image entry in file
            #string = imagename + '\t' + str(nmy[i]) + '\t' + str(nmx[i]) + '\t' + str(classification)
            
        
        # save image with detections overlaid in different colors
        
        #plt.imshow(original)
        
        colors = ['b', 'g', 'r']
        #ax = plt.Axes(fig, [0., 0., 1., 1.])
        #ax.set_axis_off()
        #fig.add_axes(ax)
        fig = plt.figure(frameon=False)
        ax = fig.add_subplot(111)
        ax.imshow(det_data.testdata[j], aspect='normal')
        for i in range(classargs['nouts']):
            cly, clx = classifications[i]
            ax.scatter(clx, cly, c=colors[i], s=20, marker='+')
            print len(cly)
        fig.savefig(resultsdir+filenames[j]+'_detclassifications.jpg')
        plt.clf()
        #ax.clear()
        plt.close('all')
Пример #36
0
class FruitMachine(Feature, Speaking, Emotion):

    def __init__(self, text_to_speech, speech_to_text):
        Feature.__init__(self)
        Speaking.__init__(self, text_to_speech)
        Emotion.__init__(self)
        self.speech_to_text = speech_to_text
        self.background_image = np.array([])
        self.detection_image = np.array([])
        self.detection = Detection()
        self.reels = [None, None, None]
        self.holds = [None, None]
        self.coins = 100

    # start thread
    def start(self, args=None):
        Feature.start(self, args)
        self.background_image = args
        self.detection_image = args.copy()

        # draw holds
        self.background_image = draw_holds(self.holds, self.background_image)

        # rotate and draw reels
        self.reels = rotate_reels(self.reels)
        draw_reels(self.reels)

    # stop thread
    def stop(self):
        Feature.stop(self)
        self.background_image = np.array([])

    # run thread
    def _thread(self, args):

        # check player has coins
        if self.coins == 0:
            self._text_to_speech("Sorry dude, you're all out of cash")
            return

        # on occasion, allow player to hold reels
        if (not None in self.reels) and (randint(0,2) == 0):

            # croupier tells player that one or two reels can be held
            self._text_to_speech("If you want to hold one or two fruits, press them now")

            # player selects holds
            self.detection.set_previous_image(self.detection_image)

            for i, hold in enumerate(self.holds):
                timeout = time.time() + 5

                while True:
                    active_cell = self.detection.get_active_cell(self.detection_image)

                    if (active_cell != None) and (active_cell not in self.holds):
                        self.holds[i] = active_cell
                        break

                    if time.time() > timeout:
                        break

                if self.holds[i] == None: break

        # croupier asks player if ready to spin reels
        self._text_to_speech("Just say the word Start, and I'll spin the fruits")

        # wait until player says "start"
        while self.speech_to_text.convert() != "start": continue

        # refresh reels
        self.reels = refresh_reels(self.reels, self.holds)

        # wait while reels rotate
        while is_reels_rotating(self.reels):
            time.sleep(1)

        # clear any holds
        self.holds = [None, None]

        # determine if player has won or lost
        if is_reels_win(self.reels):
            self.coins += 50
            self._text_to_speech("Wow, you won! You now have {} coins".format(self.coins))
            self._display_emotion(HAPPY)
        else:
            self.coins -= 10
            self._text_to_speech("Damn, you lost! You now have {} coins".format(self.coins))
            self._display_emotion(SAD)