예제 #1
0
    def _handle_input_data(self):
        if self.engine_id == "LEGO_SLOW":
            import lego_cv as lc
        else:
            import lego_cv_fast as lc

        # receive data from control VM
        header_size = struct.unpack("!I", self._recv_all(4))[0]
        data_size = struct.unpack("!I", self._recv_all(4))[0]
        header_str = self._recv_all(header_size)
        image_data = self._recv_all(data_size)
        #header = json.loads(header_str)

        ## symbolic representation extraction
        img = zc.raw2cv_image(image_data)
        stretch_ratio = float(16) / 9 * img.shape[0] / img.shape[1]
        if img.shape != (config.IMAGE_WIDTH, config.IMAGE_HEIGHT, 3):
            img = cv2.resize(img, (config.IMAGE_WIDTH, config.IMAGE_HEIGHT),
                             interpolation=cv2.INTER_AREA)
        zc.check_and_display('input',
                             img,
                             display_list,
                             wait_time=config.DISPLAY_WAIT_TIME,
                             resize_max=config.DISPLAY_MAX_PIXEL)

        # get bitmap for current image
        rtn_msg, bitmap = lc.process(img, stretch_ratio, display_list)
        if rtn_msg['status'] != 'success':
            print rtn_msg['message']
            result_str = "None"
        else:
            result_str = json.dumps(bitmap.tolist())

        packet = struct.pack("!I%dsI%ds" % (len(header_str), len(result_str)),
                             len(header_str), header_str, len(result_str),
                             result_str)
        self.sock.sendall(packet)
예제 #2
0
    def _handle_img(self, img):
        if self.is_first_frame and not config.RECOGNIZE_ONLY: # do something special when the task begins
            result, img_guidance = self.task.get_first_guidance()
            zc.check_and_display('guidance', img_guidance, display_list, wait_time = config.DISPLAY_WAIT_TIME, resize_max = config.DISPLAY_MAX_PIXEL)
            self.is_first_frame = False
            result['state_index'] = 0  # first step
            return json.dumps(result)

        result = {'status' : "nothing"} # default

        stretch_ratio = float(16) / 9 * img.shape[0] / img.shape[1]
        if img.shape != (config.IMAGE_WIDTH, config.IMAGE_HEIGHT, 3):
            img = cv2.resize(img, (config.IMAGE_WIDTH, config.IMAGE_HEIGHT), interpolation = cv2.INTER_AREA)

        ## get bitmap for current image
        zc.check_and_display('input', img, display_list, wait_time = config.DISPLAY_WAIT_TIME, resize_max = config.DISPLAY_MAX_PIXEL)
        rtn_msg, bitmap = lc.process(img, stretch_ratio, display_list)
        if rtn_msg['status'] != 'success':
            print rtn_msg['message']
            if rtn_msg['message'] == "Not confident about reconstruction, maybe too much noise":
                self.counter['not_confident'] += 1
            return json.dumps(result)

        self.counter['confident'] += 1

        ## try to commit bitmap
        state_change = False
        if bm.bitmap_same(self.commited_bitmap, bitmap):
            pass
        else:
            current_time = time.time()
            if not bm.bitmap_same(self.temp_bitmap['bitmap'], bitmap):
                self.temp_bitmap['bitmap'] = bitmap
                self.temp_bitmap['first_time'] = current_time
                self.temp_bitmap['count'] = 0
                self.counter['diff_from_prev'] += 1
            else:
                self.counter['same_as_prev'] += 1
            self.temp_bitmap['count'] += 1
            if current_time - self.temp_bitmap['first_time'] > config.BM_WINDOW_MIN_TIME or self.temp_bitmap['count'] >= config.BM_WINDOW_MIN_COUNT:
                self.commited_bitmap = self.temp_bitmap['bitmap']
                state_change = True
        #print "\n\n\n\n\n%s\n\n\n\n\n" % self.counter

        bitmap = self.commited_bitmap
        if 'lego_syn' in display_list and bitmap is not None:
            img_syn = bm.bitmap2syn_img(bitmap)
            zc.display_image('lego_syn', img_syn, wait_time = config.DISPLAY_WAIT_TIME, resize_scale = 50)

        if config.RECOGNIZE_ONLY:
            return json.dumps(result)

        ## now user has done something, provide some feedback
        img_guidance = None
        if state_change:
            self.task.update_state(bitmap)
            result, img_guidance = self.task.get_guidance()

            if self.task.is_final_state():
                step_idx = len(self.task.states)
            else:
                # get current step
                step_idx = self.task.state2idx(self.task.current_state)
                # make sure step index is always -1 in case of error
                # also, differentiate from the default initial step (which we assign a step index 0)
                # from the internal step index obtained from the task (which also begins at 0) by 
                # shifting the index by 1:
                step_idx = -1 if step_idx < 0 else step_idx + 1
            result['state_index'] = step_idx

        if img_guidance is not None:
            zc.check_and_display('guidance', img_guidance, display_list, wait_time = config.DISPLAY_WAIT_TIME, resize_max = config.DISPLAY_MAX_PIXEL)

        return json.dumps(result)
예제 #3
0

# load test image
input_file = parse_arguments()
img = cv2.imread(input_file)
stretch_ratio = float(16) / 9 * img.shape[0] / img.shape[1]
if img.shape != (config.IMAGE_WIDTH, config.IMAGE_HEIGHT, 3):
    img = cv2.resize(img, (config.IMAGE_WIDTH, config.IMAGE_HEIGHT),
                     interpolation=cv2.INTER_AREA)

zc.check_and_display("input",
                     img,
                     display_list,
                     resize_max=config.DISPLAY_MAX_PIXEL,
                     wait_time=config.DISPLAY_WAIT_TIME)

# process image and get the symbolic representation
rtn_msg, bitmap = lc.process(img, stretch_ratio, display_list)
if 'lego_syn' in display_list and bitmap is not None:
    img_syn = bm.bitmap2syn_img(bitmap)
    zc.display_image('lego_syn',
                     img_syn,
                     wait_time=config.DISPLAY_WAIT_TIME,
                     resize_scale=50)

try:
    while True:
        time.sleep(1)
except KeyboardInterrupt as e:
    sys.stdout.write("user exits\n")
예제 #4
0
    def _handle_img(self, img):
        if self.is_first_frame and not config.RECOGNIZE_ONLY:  # do something special when the task begins
            result, img_guidance = self.task.get_first_guidance()
            zc.check_and_display('guidance',
                                 img_guidance,
                                 display_list,
                                 wait_time=config.DISPLAY_WAIT_TIME,
                                 resize_max=config.DISPLAY_MAX_PIXEL)
            result['image'] = b64encode(zc.cv_image2raw(img_guidance))
            result.pop('animation', None)
            self.is_first_frame = False
            return json.dumps(result)

        result = {'status': "nothing"}  # default

        stretch_ratio = float(16) / 9 * img.shape[0] / img.shape[1]
        if img.shape != (config.IMAGE_WIDTH, config.IMAGE_HEIGHT, 3):
            img = cv2.resize(img, (config.IMAGE_WIDTH, config.IMAGE_HEIGHT),
                             interpolation=cv2.INTER_AREA)

        ## get bitmap for current image
        zc.check_and_display('input',
                             img,
                             display_list,
                             wait_time=config.DISPLAY_WAIT_TIME,
                             resize_max=config.DISPLAY_MAX_PIXEL)
        rtn_msg, bitmap = lc.process(img, stretch_ratio, display_list)

        if gabriel.Debug.TIME_MEASUREMENT:
            result[gabriel.Protocol_measurement.
                   JSON_KEY_APP_SYMBOLIC_TIME] = time.time()

        if rtn_msg['status'] != 'success':
            print rtn_msg['message']
            if rtn_msg[
                    'message'] == "Not confident about reconstruction, maybe too much noise":
                self.counter['not_confident'] += 1
            return json.dumps(result)

        self.counter['confident'] += 1

        ## try to commit bitmap
        state_change = False
        if bm.bitmap_same(self.commited_bitmap, bitmap):
            pass
        else:
            current_time = time.time()
            if not bm.bitmap_same(self.temp_bitmap['bitmap'], bitmap):
                self.temp_bitmap['bitmap'] = bitmap
                self.temp_bitmap['first_time'] = current_time
                self.temp_bitmap['count'] = 0
                self.counter['diff_from_prev'] += 1
            else:
                self.counter['same_as_prev'] += 1
            self.temp_bitmap['count'] += 1
            if current_time - self.temp_bitmap[
                    'first_time'] > config.BM_WINDOW_MIN_TIME or self.temp_bitmap[
                        'count'] >= config.BM_WINDOW_MIN_COUNT:
                self.commited_bitmap = self.temp_bitmap['bitmap']
                state_change = True
        #print "\n\n\n\n\n%s\n\n\n\n\n" % self.counter

        bitmap = self.commited_bitmap
        if 'lego_syn' in display_list and bitmap is not None:
            img_syn = bm.bitmap2syn_img(bitmap)
            zc.display_image('lego_syn',
                             img_syn,
                             wait_time=config.DISPLAY_WAIT_TIME,
                             resize_scale=50)

        if config.RECOGNIZE_ONLY:
            return json.dumps(result)

        ## now user has done something, provide some feedback
        img_guidance = None
        if state_change:
            self.task.update_state(bitmap)
            sym_time = result[
                gabriel.Protocol_measurement.JSON_KEY_APP_SYMBOLIC_TIME]
            result, img_guidance = self.task.get_guidance()
            result[gabriel.Protocol_measurement.
                   JSON_KEY_APP_SYMBOLIC_TIME] = sym_time
            result['image'] = b64encode(zc.cv_image2raw(img_guidance))
            result.pop('animation', None)

        if img_guidance is not None:
            zc.check_and_display('guidance',
                                 img_guidance,
                                 display_list,
                                 wait_time=config.DISPLAY_WAIT_TIME,
                                 resize_max=config.DISPLAY_MAX_PIXEL)

        return json.dumps(result)