def slot_export_npy_and_label(self):
     poses = PlayingUnit.only_ins.pose_model._fdata
     logger.debug(list(poses.keys()))
     labels = self.mw.table_labeled.get_all_labels()
     labels = [l for l in labels if l.pose_index != -1]
     logger.debug(len(labels))
     npy = []
     label_len_max = 0
     for label in labels:
         one_action_pose = []
         label_len_max = max(label_len_max, label.end - label.begin + 1)
         for frame_index in range(label.begin, label.end + 1):
             one_action_pose.append(
                 np.asarray(poses[str(frame_index)][int(label.pose_index)]))
         npy.append(np.asarray(one_action_pose))
     logger.debug([len(o_a_p) for o_a_p in npy])
     logger.debug(label_len_max)
     for one_action_pose in npy:
         one_action_pose.resize((label_len_max, *one_action_pose.shape[1:]),
                                refcheck=False)
     npy = np.asarray(npy)
     npy = np.expand_dims(npy, axis=1)
     npy = np.repeat(npy, 3, axis=1)
     logger.info(npy.shape)
     time_stamp = int(time.time())
     CommonUnit.save_ndarray(npy, f'train_data_{time_stamp}.npy')
     CommonUnit.save_pkl(
         ([f'name{i}'
           for i in range(len(npy))], [l.action_id for l in labels]),
         f'train_label_{time_stamp}.pkl')
Example #2
0
 def drawBboxes(self, bbox_entities: List[Tuple[Rect, str]], copy=True):
     """
     bbox_entities: List[Tuple[rect, label]]
     """
     # Log.debug(bbox_entities)
     img = self.org().copy() if copy else self.org()
     for i, bbox_entity in enumerate(bbox_entities):
         bbox, label = bbox_entity
         logger.info(label)
         # color = tuple(np.random.randint(256, size=3))
         xywh = tuple(bbox.toInt().xywh)
         color = tuple(np.random.choice(range(40, 256), size=3))
         color_int = tuple(map(int, color))[::-1]
         label_w = len(label) * 12
         label_h = 25
         thickness = 3
         cv2.rectangle(img, rec=xywh, color=color_int, thickness=thickness)
         cv2.rectangle(img,
                       rec=(
                           xywh[0] - thickness // 2 - 1,
                           max(0, xywh[1] - label_h),
                           label_w,
                           label_h,
                       ),
                       color=color_int,
                       thickness=-1)
         cv2.putText(img,
                     label,
                     org=(xywh[0], max(22, xywh[1] - 5)),
                     fontFace=cv2.FONT_HERSHEY_TRIPLEX,
                     fontScale=0.6,
                     color=(0, 0, 0),
                     lineType=cv2.LINE_AA)
     return self.__class__(img,
                           f'{self.title}:draw_bboxes') if copy else self
 def slot_eval(self):
     eval_content = self.mw.eval_input.toPlainText()
     try:
         resp = eval(eval_content)
         logger.info(f'{eval_content} -> {resp}')
     except Exception as e:
         resp = e.__str__()
         logger.error(f'{eval_content} -> {resp}')
     self.mw.eval_output.setText(str(resp))
Example #4
0
 def slot_schedule(self, jump_to, bias, stop_at, emitter):
     # index: related signal defined to receive int parameters, None will be cast to large number 146624904,
     #        hence replace None with -1
     logger.info(f'{jump_to}, {bias}, {stop_at}, {emitter}')
     if self.media_model is None:
         return
     if jump_to != -1:
         bias = None
     self.media_model.schedule(jump_to, bias, stop_at, emitter)
Example #5
0
 def grayDiff(self, another):
     logger.info('===============diffing image===============')
     if isinstance(another, np.ndarray):
         another = self.__class__(another)
     elif not isinstance(another, type(self)):
         assert False, f'Another should be a numpy.ndarray or {type(self)}'
     if self.org().shape != another.org().shape:
         logger.warn('Comparing images with different shapes')
     diff_ = cv2.absdiff(self.gray().org(), another.gray().org())
     return self.__class__(diff_, imshow_params={'cmap': 'Greys_r'})
Example #6
0
 def slot_play_toggle(self):
     if self.media_model is None:
         logger.debug('media_model is None.')
         return
     if self.media_model.is_playing():
         logger.info('pause.')
         self.slot_pause()
     else:
         logger.info('start.')
         self.slot_start()
Example #7
0
 def _fillMissingPose(self, posescore_list, target_num):
     exists_boxer_num = len({p_s.boxer_id for p_s in posescore_list})
     missing_num = target_num - exists_boxer_num
     if missing_num <= 0: return posescore_list
     logger.info(f'fill missing pose keypoints num: {missing_num}')
     for _ in range(missing_num):
         new_pose = self._pose_estimator.pose_type.newZeroPose()
         posescore_list.append(
             PoseScore(new_pose, None, 0, 0, 0, 0, 1, False))
     return posescore_list
Example #8
0
    def buildGreenVersion(cls, mode='GPU'):
        assert not os.path.exists(cls.OPENPOSE_GREEN_PATH), 'already exists!'
        cls._prepare()

        clone_openpose = f'git clone -q --depth 1 https://github.com/CMU-Perceptual-Computing-Lab/openpose.git {cls.OPENPOSE_GREEN_RPATH}'
        cls.checkCall(clone_openpose)
        cmake_c, tar_file_name = cls._getModeEnv(mode)
        build = f'cd {cls.OPENPOSE_GREEN_PATH} && rm -rf build || true && mkdir build ' \
                f'&& cd build && {cmake_c} && make -j`nproc` ' \
                f'&& cd /content && tar -czf {tar_file_name} {cls.OPENPOSE_GREEN_RPATH}'
        cls.checkCall(build)
        logger.info('finished.')
Example #9
0
def test_file_logger():
    import time
    from zdl.utils.io.log import logger, addFileLogger

    log_file = 'test/this_is_a_pytest_file.log'
    log_content = f'{time.time()} succeed'

    addFileLogger(log_file)
    logger.info(log_content)

    with open(log_file) as f:
        assert f.read().endswith(log_content + '\n')
Example #10
0
 def diff(self, another):
     logger.info('===============diffing image===============')
     if isinstance(another, np.ndarray):
         assert self.org(
         ).shape == another.shape, 'The shapes should be the same!'
         another = self.__class__(another)
     elif isinstance(another, type(self)):
         assert self.org().shape == another.org(
         ).shape, 'The shapes should be the same!'
     else:
         assert False, f'Another should be a numpy.ndarray or {type(self)}'
     diff_ = cv2.absdiff(self.org(), another.org())
     return self.__class__(diff_, self.imshow_params)
Example #11
0
    def slot_open_file(self):
        # TODO: remove native directory
        all_types_filter = f'*{" *".join(Settings.video_exts + Settings.image_exts + Settings.plotting_exts)}'
        file_uri = CommonUnit.get_open_name(
            filter_=f"Media Files ({all_types_filter})")
        # got = '/Users/zdl/Downloads/下载-视频/poses.json'
        # got = '/Users/zdl/Downloads/下载-视频/金鞭溪-张家界.mp4'
        logger.info(file_uri)
        if not file_uri:
            return
        ext = os.path.splitext(file_uri)[1]
        if ext in Settings.video_exts:
            self.mw.tab_media.setCurrentIndex(0)
            video_model = Video(file_uri) \
                .set_viewer(self.mw.label_show)
            video_model.fps = video_model.get_info()['fps'] * float(
                self.mw.combo_speed.currentText())
            video_model.file = FileInfo(file_uri)
            self.video_model = video_model
            self.set_model(video_model)

            self.mw.table_timeline.set_column_num(
                video_model.get_info()['frame_c'])
            self.mw.video_textBrowser.append(file_uri)
        elif ext in Settings.plotting_exts:
            self.mw.tab_media.setCurrentIndex(2)
            file = JsonFilePoses.load(file_uri)
            plotter = self.mw.graphics_view.main_plotter
            plotter.set_range([0, int(file['video_info.w'])],
                              [0, int(file['video_info.h'])])
            pose_model = PosePlotting(file['info.pose_type']) \
                .set_data(file['poses']) \
                .set_viewer(plotter)
            pose_model.file = file
            self.pose_model = pose_model
            self.set_model(pose_model)

            self.mw.table_timeline.set_column_num(
                int(pose_model.indices[-1]) + 1)
            self.mw.plotting_textBrowser.append(file_uri)
        else:
            logger.warn(f'{file_uri} type {ext} not supported.')
            return
        self.media_model.signals.flushed.connect(
            self.mw.table_timeline.slot_follow_to)
        self.media_model.signals.flushed.connect(self.slot_follow_to)
        self.slot_start()
Example #12
0
    def _prepare(cls):
        # assert os.path.exists(cls.DRIVE_PACKAGE_PATH)
        os.makedirs(cls.LOCAL_PACKAGE_PATH, exist_ok=True)

        # see: https://github.com/CMU-Perceptual-Computing-Lab/openpose/issues/949
        # install new CMake because of CUDA10
        logger.info('installing dependency cmake...')
        cmake_package = 'cmake-3.13.0-Linux-x86_64.tar.gz'
        install_cmake = f'cd {cls.LOCAL_PACKAGE_PATH} ' \
                        f'&& ls {cmake_package} ' \
                        f'|| wget -q https://cmake.org/files/v3.13/{cmake_package} ' \
                        f'&& tar xfz {cmake_package} --strip-components=1 -C /usr/local'
        install_dependencies = f'apt-get -qq install -y libatlas-base-dev libprotobuf-dev libleveldb-dev ' \
                               f'libsnappy-dev libhdf5-serial-dev protobuf-compiler libgflags-dev libgoogle-glog-dev ' \
                               f'liblmdb-dev opencl-headers ocl-icd-opencl-dev libviennacl-dev'
        cls.checkCall(install_cmake)
        cls.checkCall(install_dependencies)
Example #13
0
 def _del_selected_label_cells(self):
     logger.info('here')
     label_cells = {}  # key:row,value:cols list
     for qindex in self.selectedIndexes():
         r, c = qindex.row(), qindex.column()
         item = self.model().item(r, c)
         if item is None or item.background() == Qt.white:
             continue
         item.setBackground(Qt.white)
         item.setWhatsThis(None)
         item.setToolTip(None)
         if r in label_cells:
             label_cells[r].append(c)
         else:
             label_cells[r] = [c]
     self.unselect_all()
     return label_cells
Example #14
0
    def slot_export_xml(self):
        logger.debug('')
        labels = self.mw.table_labeled.get_all_labels(
        )  # type:List[ActionLabel]
        labels.sort(key=lambda l: l.begin)
        actions = self.mw.table_action.get_all_actions()  # type:List[Action]
        id_action_dict = {a.id: a for a in actions}  # type:Dict[int,Action]
        framespan = int(self.mw.line_framespan.text())
        overlap = int(self.mw.line_overlap.text())
        logger.debug(f'{framespan}, {overlap}, {labels}')

        anno = AnnotationXml()
        file_num = 0
        abandoned = []
        while labels:
            trans = overlap * (0 if file_num == 0 else -1)
            range_ = (file_num * framespan + trans + 1,
                      (file_num + 1) * framespan + trans)
            anno.new_file(f'runtime/xmldemo_{range_}.xml')
            anno.set_tag('folder', 'runtime')
            anno.set_tag('filename', f'runtime/xmldemo_{range_}.png')
            anno.set_tag('width', framespan)
            anno.set_tag('height', 200)
            anno.set_tag('depth', 100)
            cursor = 0
            while labels:
                if labels[cursor].begin >= range_[0] and labels[
                        cursor].end <= range_[1]:
                    label = labels.pop(cursor)
                    action = id_action_dict[label.action_id]
                    anno.append_action(label.action, label.begin, label.end,
                                       action.xml_ymin, action.xml_ymax)
                elif labels[cursor].begin < range_[0]:
                    abandoned.append(labels.pop(cursor))
                elif labels[cursor].begin > range_[1]:
                    break
                else:
                    cursor += 1
            anno.dump()
            file_num += 1
        logger.info(f'labels abandoned: {abandoned}')
Example #15
0
 def show(self, title=None, figsize=None, **params):
     if params.get('show') == False:  # keep call chaining
         return self
     logger.info('===============showing image============')
     self.info()
     info = self.getInfo()
     assert info['width'] and info['height'], 'img shape error!'
     params = dict(self.imshow_params, **params)
     logger.info(params)
     logger.info(f'{title or self.title}:')
     if self.isColor():
         if params['cmap'] == 'hsv':
             logger.warn('HSV format should covert back to RGB!')
         img = self.org()
         if self.CHANNELS_ORDER == ('b', 'g', 'r'):
             img = img[..., ::-1]  # swap B and R channels for plt show
     else:
         if params['cmap'] == 'viridis':
             logger.warn(
                 "Single channel image, set cmap to default 'Grey_r'")
             params['cmap'] = 'Greys_r'
             img = self.gray().org()
         else:
             img = self.org()
     fig = pylab.figure(figsize=figsize or FIGSIZE)
     x_major_locator = int(img.shape[1] / 10)
     x_minor_locator = x_major_locator / 4
     ax = plt.gca()
     ax.xaxis.set_major_locator(pylab.MultipleLocator(x_major_locator))
     ax.xaxis.set_minor_locator(pylab.MultipleLocator(x_minor_locator))
     ax.yaxis.set_minor_locator(pylab.MultipleLocator(x_minor_locator))
     pylab.imshow(img, **params)
     pylab.show()
     return self
Example #16
0
 def channelInfo(self):
     for i, c in enumerate(cv2.split(self.org())):
         logger.info(f"channel {i}:")
         logger.info(f"  mean:", c.mean())
         sum_, full = c.sum(), c.size * 255
         logger.info("  sum: {}/{} {:.2f}%".format(sum_, full,
                                                   sum_ / full * 100))
     return self
Example #17
0
    def test(cls, imagepath_or_obj, params=None):
        if params is None:
            params = {}
        if isinstance(imagepath_or_obj, str):
            img = cv2.imread(imagepath_or_obj)
        else:
            img = imagepath_or_obj

        import sys
        sys.path.append('/usr/local/python')
        # sys.path.append('/usr/local/lib')
        from openpose import pyopenpose as opp

        model_path = (cls.OPENPOSE_GREEN_PATH if os.path.exists(
            cls.OPENPOSE_GREEN_PATH) else cls.OPENPOSE_PATH) + '/models/'
        logger.info(f'using model path: {model_path}')
        full_params = {
            'model_folder': model_path,
            'model_pose': 'BODY_25',
            'number_people_max': 3,
            # 'net_resolution': '-1x368', # it is default value
            'logging_level': 3,
            'display': 0,
            'alpha_pose': 0.79,
            # 'face': 1,
            # 'hand': 1,
        }
        full_params.update(params)

        op_wrapper = opp.WrapperPython()
        op_wrapper.configure(full_params)
        op_wrapper.start()
        datum = opp.Datum()
        datum.cvInputData = img
        op_wrapper.emplaceAndPop([datum])
        logger.debug(datum.poseKeypoints)
        return datum
Example #18
0
    def _getModeEnv(cls, mode, debug=True):
        if mode == 'GPU':
            _, gpu_info = subprocess.getstatusoutput(
                'nvidia-smi --query-gpu=gpu_name --format=csv')
            logger.info(f'GPU info: {gpu_info}')
            if gpu_info.find('failed') >= 0:
                raise Exception('No GPU found!!!')
            elif 'P100' in gpu_info or 'P4' in gpu_info:
                source_file = cls.GPU_PACKAGE
            elif 'K80' in gpu_info:
                source_file = cls.GPU_PACKAGE_K80
            elif 'T4' in gpu_info:
                # T4 version modified openpose/cmake/Cuda.cmake file, removed ${TURING}.
                # refer to: https://github.com/CMU-Perceptual-Computing-Lab/openpose/issues/1232
                source_file = cls.GPU_PACKAGE_T4
            else:
                raise Exception('The GPU card is not compiled!')

            logger.debug(f'{source_file} file selected.')
            cmake_c = cls.CMAKE_DEBUG_CMD if debug else cls.CMAKE_CMD
        else:
            source_file = cls.CPU_PACKAGE
            cmake_c = cls.CMAKE_CPU_DEBUG_CMD if debug else cls.CMAKE_CPU_CMD
        return cmake_c, source_file
Example #19
0
        def slot_dialog_btn_new_clicked(self):
            logger.debug('')
            action_name = self.combo_action_names.currentText()
            if not action_name:
                logger.info('Please add action first!')
                return
            action = self.actions[action_name]
            begin = self.line_begin.text() and int(self.line_begin.text())
            end = (self.line_end.text() or None) and int(self.line_end.text())
            label = ActionLabel(action.name, action.id, action.color, begin,
                                end, None)
            bresult = label.is_valid(['action', 'begin'])
            if not bresult:
                self.parent.status_prompt(bresult, True)
                return
            if end is None:
                self.labels_unfinished.append(label)
            else:
                self._commit_label(label)

            if self.checkb_autoclose.isChecked():
                self.buttonBox.accepted.emit()
            else:
                self._load_unfinished()
Example #20
0
    def installGreenVersion(cls, mode='GPU'):
        if exists(cls.OPENPOSE_GREEN_PATH):
            # if input("File exists, delete and republish? [y/n]:") == 'y':
            #     !rm -rf $OPENPOSE_GREEN_PATH
            # else:
            #     return
            logger.info(f'{cls.OPENPOSE_GREEN_PATH} already exists.')
            return

        cls._prepare()

        _, source_file = cls._getModeEnv(mode)

        logger.info(f'copy file...{source_file}')
        copy_pre_compiled_file = f'cp -rp {cls.DRIVE_PACKAGE_PATH}/{source_file} {cls.LOCAL_PACKAGE_PATH}/'
        cls.checkCall(copy_pre_compiled_file)

        logger.info('untar openpose...')
        decompression = f'tar xfz {cls.LOCAL_PACKAGE_PATH}/{source_file} --one-top-level={cls.OPENPOSE_GREEN_RPATH} --strip-components 1'
        cls.checkCall(decompression)

        logger.info('deploy openpose...')
        make_install = f'cd {cls.OPENPOSE_GREEN_PATH}/build/ && make install -j`nproc`'
        cls.checkCall(make_install)
Example #21
0
 def downloadCocoModel(cls, dst_path):
     logger.info('downloading coco model...')
     down = f'wget -N {cls.COCO_MODEL_URL} -P {dst_path}'
     cls.checkCall(down)
     logger.info('finished.')
Example #22
0
 def modelInfo(self):
     logger.info(f'=====model info====='
                 f'inputs: {self._detector.inputs}'
                 f'output_dtypes: {self._detector.output_dtypes}'
                 f'output_shapes: {self._detector.output_shapes}'
                 f'label_map: {self._getLabelMap()}')
Example #23
0
 def info(self):
     logger.info(self.getInfo())
     return self