Exemplo n.º 1
0
action = {}
if os.path.isfile(args.mapping):
    mapping.read(args.mapping)

    for m in mapping['MAPPING']: 
        val = mapping['MAPPING'][m].split(',')
        action[m] = {'fn': val[0], 'keys': val[1:]}  # fn: hotkey, press, typewrite

else:
    # print('[ERROR] Mapping file for gestures to keyboard keys is not found at ' + args.mapping)
    raise FileNotFoundError(
        errno.ENOENT, os.strerror(errno.ENOENT), args.mapping)


if args.use_gpu:
    model.cuda()
    # model = torch.nn.DataParallel(model, device_ids=args.gpus).to(device)

if os.path.isfile(args.checkpoint):
    # if (verbose>0): print("=> loading checkpoint '{}'".format(resume))
    checkpoint = torch.load(args.checkpoint, map_location='cpu')
    new_state_dict = OrderedDict()

    for k, v in checkpoint.items():
        if(k == 'state_dict'):
            del checkpoint['state_dict']
            for j, val in v.items():
                name = j[7:] # remove `module.`
                new_state_dict[name] = val
            checkpoint['state_dict'] = new_state_dict
            break
Exemplo n.º 2
0
class GestureDetectorThread(Thread):
    SWIPE_LEFT = 'Swiping Left'
    SWIPE_RIGHT = 'Swiping Right'
    SWIPE_UP = 'Swiping Up'
    SWIPE_DOWN = 'Swiping Down'
    THUMB_OK = 'Thumb Up'
    THUMB_NOT = 'Thumb Down'

    NO_GESTURE = 'No gesture'
    OTHER_GESTURE = 'Doing other things'

    def __init__(self, fps=12, width=176, height=100, use_gpu=True, model_data="model_best.pth.tar"):
        Thread.__init__(self)
        self.isRunning = True

        self._capture = cv.VideoCapture(0)
        self._target_frame_size = (width, height)
        self._sleeping_time = 1/fps

        self._event_queue = queue.Queue()
        self._frame_queue = queue.Queue(maxsize=18)
        self._predict_queue = queue.Queue(maxsize=3)

        self._model = ConvColumn(8)
        if use_gpu:
            self._model.cuda()

        if os.path.isfile(model_data):
            last_checkpoint = torch.load(model_data, map_location='cpu')

            new_state_dict = OrderedDict()
            for k, v in last_checkpoint.items():
                if k == 'state_dict':
                    del last_checkpoint['state_dict']
                    for j, val in v.items():
                        name = j[7:] # we need name without 'module.' prefix
                        new_state_dict[name] = val
                    last_checkpoint['state_dict'] = new_state_dict
                    break

            self._model.load_state_dict(last_checkpoint['state_dict'])
        else:
            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), model_data)

        self._transform = Compose([
            ToPILImage(),
            CenterCrop(84),
            ToTensor(),
            Normalize(mean=[0.485, 0.456, 0.406],
                        std=[0.229, 0.224, 0.225])
        ])
        self._device = torch.device("cuda" if use_gpu and torch.cuda.is_available() else "cpu")

        self._gestures = {0: GestureDetectorThread.SWIPE_LEFT,
                          1: GestureDetectorThread.SWIPE_RIGHT,
                          2: GestureDetectorThread.SWIPE_DOWN,
                          3: GestureDetectorThread.SWIPE_UP,
                          4: GestureDetectorThread.THUMB_OK,
                          5: GestureDetectorThread.THUMB_NOT,
                          6: GestureDetectorThread.NO_GESTURE,
                          7: GestureDetectorThread.OTHER_GESTURE,
                          GestureDetectorThread.SWIPE_LEFT: 0,
                          GestureDetectorThread.SWIPE_RIGHT: 1,
                          GestureDetectorThread.SWIPE_DOWN: 2,
                          GestureDetectorThread.SWIPE_UP: 3,
                          GestureDetectorThread.THUMB_OK: 4, 
                          GestureDetectorThread.THUMB_NOT: 5,
                          GestureDetectorThread.NO_GESTURE: 6,
                          GestureDetectorThread.OTHER_GESTURE: 7}

        self.TRESHOLD = 0.7

    def run(self):
        while self.isRunning:
            start_time = time.time()
            _, frame = self._capture.read()
            frame = cv.resize(frame, self._target_frame_size)

            try:
                self._frame_queue.put_nowait(frame)
            except queue.Full:
                _ = self._frame_queue.get()
                self._frame_queue.put_nowait(frame)

                frames = [torch.unsqueeze(self._transform(img), 0) for img in list(self._frame_queue.queue)]
                
                data = torch.cat(frames)
                data = data.permute(1, 0, 2, 3)
                data = data[None, :, :, :, :]
                data = data.to(self._device)
 

                self._model.eval()
                nn_output = self._model(data)
                nn_output = torch.nn.functional.softmax(nn_output, dim=1)
                pred, class_index = nn_output.max(1)
                pred = pred.item()
                class_index = class_index.item()

                g = self._gestures[class_index]
                if pred > self.TRESHOLD and g != GestureDetectorThread.OTHER_GESTURE and g != GestureDetectorThread.NO_GESTURE:

                    try:
                        self._predict_queue.put_nowait((pred, g))
                    except queue.Full:
                        self._predict_queue.get()
                        self._predict_queue.put_nowait((pred, g))

                        predictions = sorted(list(self._predict_queue.queue))
                        print(predictions)

                        g = predictions[-1][1]
                        self._event_queue.put(g)

                        # Clear queues
                        while not self._frame_queue.empty():
                            self._frame_queue.get_nowait()
                        while not self._predict_queue.empty():
                            self._predict_queue.get_nowait()

                else:
                    while not self._predict_queue.empty():
                        self._predict_queue.get_nowait()


            time_diff = time.time() - start_time
            try:
                time.sleep(self._sleeping_time - time_diff)
            except:
                pass

        self._capture.release()
    
    def get_event(self) -> int:
        try:
            return self._event_queue.get(block=False)
        except:
            return None

    def stop_detector(self):
        self.isRunning = False