def segment(self, in_root, ou_root):
        """
            Extract trials from video

            Args:
                in_root: string - path to input trial folder
                ou_root: string - path to output folder root
        """
        # list containing history of anchor points
        progression = [0]
        folders = []
        try:
            frames, N = self._load_images(in_root)
            n = 0
            key = ''
            feedback = ''
            with tqdm(total=N) as pbar:
                while key != ord('+'):
                    self.main_frm[:, :, :] = frames[n, :, :, ::-1]
                    if feedback == 'fwd':
                        # if saved segment in forward dataset:
                        #    display green circle for feedback
                        self._feedback((0, 255, 0))
                    if feedback == 'bwd':
                        # if saved segment in backward dataset:
                        #    display magenta circle for feedback
                        self._feedback((255, 0, 220))
                    if feedback == 'skip':
                        # if new anchor point set without save:
                        #    display blue circle for feedback
                        self._feedback((255, 0, 0))
                    if feedback == 'undo':
                        # if undo:
                        #    display red circle for feedback
                        self._feedback((0, 0, 255))

                    # render and await user input
                    Tools.render(self.main_frm)
                    feedback = ''
                    key = self.stdscr.getch()
                    self.stdscr.addch(20, 25, key)
                    self.stdscr.refresh()

                    # process user input
                    n = self._process_nav(key, n, N, progression)
                    save = self._process_save(key, in_root, ou_root, folders,
                                              progression[-1], n, progression)
                    if save:
                        feedback = save
                    if self._process_skip(key, folders, n, progression):
                        feedback = 'skip'
                    n, undo = self._process_undo(key, progression, folders, n)
                    if undo:
                        feedback = 'undo'
                    pbar.update(n - pbar.n)

        except Exception as e:
            self._shutdown('Shutting down due to exception')
            raise e
예제 #2
0
    def _plot(self, path):
        appendage = '/steady' if 'pouring' in self.trial_root else '/left'
        frames = Tools.list_files(self.trial_root + appendage)
        a_fldr = self.anchor_root + appendage

        for ii in range(len(path)):
            frame = cv2.imread(frames[ii])
            ancho = cv2.imread(os.path.join(a_fldr, path[ii][0]))
            # Tools.debug(frame.shape, ancho.shape)
            Tools.render(np.concatenate((frame, ancho), axis=1))
 def _heatmap(self, matrix, path=None):
     """
     draw a heatmap of distance matrix or accumulated cost matrix with
     selected path
     """
     fig, ax = plt.subplots()
     plt.imshow(matrix, interpolation='nearest', cmap='terrain')
     plt.gca().invert_yaxis()
     plt.xlabel("X")
     plt.ylabel("Y")
     plt.grid()
     plt.colorbar()
     if path is not None:
         plt.plot([point[0] for point in path],
                  [point[1] for point in path], 'r')
     fig.canvas.draw()
     data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
     data = data.reshape(fig.canvas.get_width_height()[::-1] +
                         (3, ))[:, :, ::-1]
     plt.close()
     Tools.render(data)
예제 #4
0
    def _heatmap(self, matrix, path=None):
        """
        draw a heatmap of distance matrix or accumulated cost matrix with
        selected path
        """

        img = np.copy(matrix)
        img -= np.min(img)
        img /= np.max(img)

        if path is None:
            img = cv2.resize(img, (360, 360))

            return img[::-1, :]
        else:
            img = (np.stack((img, ) * 3, axis=-1) * 255).astype(np.uint8)
            appendage = '/steady' if 'pouring' in self.trial_root else '/left'
            frames = Tools.list_files(self.trial_root + appendage)
            anchors = Tools.list_files(self.anchor_root + appendage)
            # print(len(frames), len(path))
            for ii in range(len(path) - 2, -1, -1):
                p0 = path[ii]
                p1 = path[ii + 1]

                img = cv2.line(img, p0, p1, (0, 0, 255), 2)
                frame = cv2.imread(frames[p0[0]])
                ancho = cv2.imread(anchors[p0[1]])

                frame = cv2.resize(frame, (240, 240))
                ancho = cv2.resize(ancho, (240, 240))

                if not ii % 3:
                    img_ = np.copy(img)
                    img_ = cv2.resize(img_, (240, 240))
                    Tools.render(np.concatenate(
                        (img_[::-1, :, :], frame[:, :, ::1], ancho), axis=1),
                                 waitkey=50)
            return img[::-1, :, :]
    def extract_trajectory(indir):
        trajectory = {}

        # load dictionaries of camera projections
        D_l = Prep.init_joint_dict(os.path.join(
            indir, '0', '3d', 'alphapose-results.json'))
        D_r = Prep.init_joint_dict(os.path.join(
            indir, '2', '3d', 'alphapose-results.json'))

        # load dictionaries of real camera positions
        lvid_path = os.path.join(indir.replace(
            '/grabbing/', '/kinect-recordings-3/'), 'color-recording-left.avi')
        rvid_path = os.path.join(indir.replace(
            '/grabbing/', '/kinect-recordings-3/'), 'color-recording-right.avi')
        xyz = None

        # load image paths of video
        img_paths_l = sorted(Tools.list_files(os.path.join(indir, '0')))
        img_paths_r = sorted(Tools.list_files(os.path.join(indir, '2')))

        # if videos are not equal lenght, cut from start and end to make them
        # equal.
        img_paths_l, img_paths_r = Prep.make_equal_length(
            (img_paths_l, img_paths_r))

        # loop over all frames
        for img_pth_l, img_pth_r in zip(img_paths_l, img_paths_r):
            # left and right frame have same name (but in different folder)
            img_name = img_pth_l.split('/')[-1]
            frame = str(int(img_name.split('.')[0]))

            # for testing: display images
            img_l = cv2.imread(img_pth_l)
            img_r = cv2.imread(img_pth_r)

            # get person on couch in left view (if any)
            try:
                peoples = [person for person in D_l[img_name] if
                           Prep.on_couch(person['KP']['LHip'], 0) or
                           Prep.on_couch(person['KP']['RHip'], 0)]
                if len(peoples) == 0:
                    lperson = None
                else:
                    lperson_idx = max(range(len(peoples)),
                                      key=lambda x: peoples[x]['score'])
                    lperson = peoples[lperson_idx]
            except KeyError:
                pass

            # get person on couch in right view (if any)
            try:
                peoples = [person for person in D_r[img_name] if
                           Prep.on_couch(person['KP']['LHip'], 2) or
                           Prep.on_couch(person['KP']['RHip'], 2)]
                if len(peoples) == 0:
                    rperson = None
                else:
                    rperson_idx = max(range(len(peoples)),
                                      key=lambda x: peoples[x]['score'])
                    rperson = peoples[rperson_idx]
            except KeyError:
                pass

            # do not add frame to trajectory if both are None
            if lperson is not None or rperson is not None:
                # left wrist
                if lperson is None:  # so rperson is not None
                    # get left wrist from right frame
                    lwrist = Prep.affine_transform(
                        xyz[rvid_path][frame]['xyz'][rperson_idx * 2 + 1], 'R')
                    cv2.circle(
                        img_r,
                        (int(rperson['KP']['LWrist'][0]),
                         int(rperson['KP']['LWrist'][1])),
                        10, (0, 255, 0), -1)

                    # get right wrist from right frame
                    rwrist = Prep.affine_transform(
                        xyz[rvid_path][frame]['xyz'][rperson_idx * 2], 'R')
                    cv2.circle(
                        img_r,
                        (int(rperson['KP']['RWrist'][0]),
                         int(rperson['KP']['RWrist'][1])),
                        10, (0, 255, 0), -1)

                elif rperson is None:  # so lperson is not None
                    # get left wrist from left frame
                    lwrist = Prep.affine_transform(
                        xyz[lvid_path][frame]['xyz'][lperson_idx * 2 + 1], 'L')
                    cv2.circle(
                        img_l,
                        (int(lperson['KP']['LWrist'][0]),
                         int(lperson['KP']['LWrist'][1])),
                        10, (0, 255, 0), -1)

                    # get right wrist from left frame
                    rwrist = Prep.affine_transform(
                        xyz[lvid_path][frame]['xyz'][lperson_idx * 2], 'L')
                    cv2.circle(
                        img_l,
                        (int(lperson['KP']['RWrist'][0]),
                         int(lperson['KP']['RWrist'][1])),
                        10, (0, 255, 0), -1)

                else:
                    if rperson['KP']['LWrist'][2] > lperson['KP']['LWrist'][2]:
                        # get left wrist from right frame
                        lwrist = Prep.affine_transform(
                            xyz[rvid_path][frame]['xyz'][rperson_idx * 2 + 1],
                            'R')
                        cv2.circle(
                            img_l,
                            (int(lperson['KP']['LWrist'][0]),
                             int(lperson['KP']['LWrist'][1])),
                            10, (0, 0, 255), -1)
                        cv2.circle(
                            img_r,
                            (int(rperson['KP']['LWrist'][0]),
                             int(rperson['KP']['LWrist'][1])),
                            10, (0, 255, 0), -1)
                    else:
                        # get left wrist from left frame
                        lwrist = Prep.affine_transform(
                            xyz[lvid_path][frame]['xyz'][lperson_idx * 2 + 1],
                            'L')
                        cv2.circle(
                            img_l,
                            (int(lperson['KP']['LWrist'][0]),
                             int(lperson['KP']['LWrist'][1])),
                            10, (0, 255, 0), -1)
                        cv2.circle(
                            img_r,
                            (int(rperson['KP']['LWrist'][0]),
                             int(rperson['KP']['LWrist'][1])),
                            10, (0, 0, 255), -1)

                    if rperson['KP']['RWrist'][2] > lperson['KP']['RWrist'][2]:
                        # get right wrist from right frame
                        rwrist = Prep.affine_transform(
                            xyz[rvid_path][frame]['xyz'][rperson_idx * 2], 'R')
                        cv2.circle(
                            img_l,
                            (int(lperson['KP']['RWrist'][0]),
                             int(lperson['KP']['RWrist'][1])),
                            10, (0, 0, 255), -1)
                        cv2.circle(
                            img_r,
                            (int(rperson['KP']['RWrist'][0]),
                             int(rperson['KP']['RWrist'][1])),
                            10, (0, 255, 0), -1)
                    else:
                        # get right wrist from left frame
                        rwrist = Prep.affine_transform(
                            xyz[lvid_path][frame]['xyz'][lperson_idx * 2], 'L')
                        cv2.circle(
                            img_l,
                            (int(lperson['KP']['RWrist'][0]),
                             int(lperson['KP']['RWrist'][1])),
                            10, (0, 255, 0), -1)
                        cv2.circle(
                            img_r,
                            (int(rperson['KP']['RWrist'][0]),
                             int(rperson['KP']['RWrist'][1])),
                            10, (0, 0, 255), -1)
                trajectory[img_name] = [lwrist[0], lwrist[1], lwrist[2],
                                        rwrist[0], rwrist[1], rwrist[2]]

            # resize for easier rendering
            img_l = cv2.resize(
                img_l, (img_l.shape[1] // 2, img_l.shape[0] // 2))
            img_r = cv2.resize(
                img_r, (img_r.shape[1] // 2, img_r.shape[0] // 2))

            # display video
            Tools.render('DEBUG', np.concatenate((img_l, img_r), axis=1))
    def segment_video(self, in_folder, ou_folder):
        """
            Extract trials from video

            Args:
                in_folder: string - path to intput video folder
                ou_fodler: string - path to output folder root
        """
        try:
            l_color = self._load_videos(in_folder)[0]

            img = np.copy(l_color[0])
            img = cv2.resize(img, (240, 160))
            Tools.render(img[:, :, ::-1])

        except Exception as e:
            print(in_folder)
            print(e)
            time.sleep(1000)
            raise e

        progression = [0]
        created_folders = []
        ii = 0
        key = ''
        try:
            with tqdm(total=len(l_color)) as pbar:
                while key != ord('+'):
                    feedback = ''
                    key = self.stdscr.getch()
                    self.stdscr.addch(20, 25, key)
                    self.stdscr.refresh()
                    if key == ord('d'):
                        # move forwards slow
                        ii = min(ii + 1, len(l_color) - 1)
                    elif key == ord('e'):
                        # move forwards medium
                        ii = min(ii + 5, len(l_color) - 1)
                    elif key == ord('3'):
                        # move forwards fast
                        ii = min(ii + 25, len(l_color) - 1)

                    elif key == ord('a'):
                        # move backwards slow
                        ii = max(progression[-1], ii - 1)
                    elif key == ord('q'):
                        # move backwards medium
                        ii = max(progression[-1], ii - 5)
                    elif key == ord('1'):
                        # move backwards fast
                        ii = max(progression[-1], ii - 25)

                    elif key == ord(']'):
                        # STORE SEGMENT
                        # make new folder
                        store_folder_number = len(
                            [f for f in created_folders if f is not None])
                        savepath = os.path.join(
                            ou_folder,
                            Tools.fname(in_folder) + '_' +
                            str(store_folder_number).zfill(3))
                        self._makedirs(savepath)

                        # store images
                        self._store_imgs(l_color, savepath + '/left',
                                         progression[-1], ii)

                        # update progression data
                        progression.append(ii)
                        created_folders.append(savepath)

                        feedback = 'store'

                    elif key == ord('u'):
                        # skip
                        progression.append(ii)
                        created_folders.append(None)

                        feedback = 'skip'
                    elif key == ord('.'):
                        # undo
                        if len(progression) > 1:
                            if created_folders[-1] is not None:
                                shutil.rmtree(created_folders[-1])

                            progression.pop(-1)
                            created_folders.pop(-1)
                            ii = progression[-1]

                        feedback = 'back'

                    pbar.update(ii - pbar.n)
                    img = np.copy(l_color[ii])
                    img = cv2.resize(img, (240, 160))

                    if feedback == 'store':
                        cv2.circle(img, (10, 10), 10, (0, 255, 0), -1)
                    if feedback == 'skip':
                        cv2.circle(img, (10, 10), 10, (255, 0, 0), -1)
                    if feedback == 'back':
                        cv2.circle(img, (10, 10), 10, (0, 0, 255), -1)

                    Tools.render(img[:, :, ::-1])
        except Exception as e:
            print(e)
            time.sleep(1000)
            raise e

        with open('./res/finished_files.txt', 'a+') as f:
            f.write(in_folder + '\n')