コード例 #1
0
 def save(self):
     save_path = QFileDialog.getSaveFileName(self, "Save as", self.DEFAULT_TFRECORDS_SAVE_DIR, "TFRecords (*.tfrecords)")
     save_path = os.path.splitext(save_path[0])[0]
     if not save_path:
         return
     config_file = save_path + ".pkl"
     pbtxt_text = self.pbtxt_text.text()
     if pbtxt_text and os.path.isfile(pbtxt_text):
         self.pbtxt_path = pbtxt_text
     else:
         self.pbtxt_path = save_path + '.pbtxt'
         label_list = self.get_label_list()
         with open(self.pbtxt_path, 'w') as f:
             for index, label in enumerate(label_list):
                 f.writelines("item {\n  id: %s\n  name: \'%s\'\n}\n\n\n" % (index + 1, label))
     itemsTextList = [str(self.list.item(i).text().split(self.DELIMITER)[0]) for i in range(self.list.count())]
     valid_items = 0
     dataset_configs = []
     for item in itemsTextList:
         path = str(item)
         if os.path.exists(path):
             annotation_path = self.get_roi_annotation_dir(path)
             roi = DatasetDetailsDialog.getROIfromAnnotationsPath(annotation_path)
             if roi is None:
                 roi = [-1, -1, -1, -1]
             else:
                 roi = [roi['xmin'], roi['ymin'], roi['xmax'], roi['ymax']]
             frames_list_path = os.path.join(annotation_path, '.frames_lists.pkl')
             if os.path.exists(frames_list_path):
                 frames_list = set()
                 frames_list_rules = _pickle.load(open(frames_list_path, 'rb'))
                 for start_frame, end_frame, interval in frames_list_rules:
                     frames_list.update(list(range(start_frame, end_frame + 1, interval)))
                 frames_list = sorted(list(frames_list))
             else:
                 frames_reader = get_frames_reader(path)
                 frames_list = list(range(0, frames_reader.num_frames, 1))
                 del frames_reader
             bbox_source_filter = []
             if self.use_ground_truth_bboxes.isChecked():
               bbox_source_filter += ["ground_truth"]
             if self.use_detection_bboxes.isChecked():
               bbox_source_filter += ["object_detector"]
             if self.use_tracking_bboxes.isChecked():
               bbox_source_filter += ["tracker"]
             dataset_configs.append(dict(
                 frames_source=path,
                 annotation_source=annotation_path,
                 annotation_type='pascal_voc',
                 label_map_file=self.pbtxt_path,
                 mot_class_name='object',
                 frames_list=frames_list,
                 bbox_source_filter=bbox_source_filter,
                 roi=roi,
             ))
             valid_items += 1
         _pickle.dump(dataset_configs, open(config_file, 'wb'))
     if valid_items:
         self.save_path = save_path
         self.call_program(config_file, save_path + '.tfrecords')
コード例 #2
0
 def add_files(self):
     path = os.path.expanduser('~')
     opened_files = QFileDialog.getOpenFileNames(self, 'Add files', path, 'Video files (*)')
     for file in opened_files[0]:
         frames_reader = get_frames_reader(file)
         if frames_reader.num_frames > 0:
             self.addItem(file)
         del frames_reader
コード例 #3
0
 def add_dirs(self):
     path = os.path.expanduser('~')
     opened_directory = QFileDialog.getExistingDirectory(self,
                                      'Add Directory', path, QFileDialog.ShowDirsOnly
                                      | QFileDialog.DontResolveSymlinks)
     frames_reader = get_frames_reader(opened_directory)
     if frames_reader.num_frames > 0:
         self.addItem(opened_directory)
     del frames_reader
コード例 #4
0
 def edit_dataset(self):
     selected_dataset = self.list.currentItem().text().split(self.DELIMITER)[0]
     self.currently_editing_dataset = selected_dataset
     self.currently_editing_index = self.list.currentRow()
     frames_reader = get_frames_reader(selected_dataset)
     num_frames = frames_reader.num_frames
     del frames_reader
     dataset_frame_selection_dialog = \
         DatasetDetailsDialog(parent=self, parent_window=self,
                              num_frames=num_frames, dataset_path=selected_dataset)
     dataset_frame_selection_dialog.show()
コード例 #5
0
ファイル: Server.py プロジェクト: fisis/PTF
    def patchTracking(self, request=None, img_path=''):

        request_path = request["path"]
        request_roi = request["roi"]
        id_number = request['id_number']
        # init_frame_id = request["frame_number"]
        init_frame_id = 0
        init_bbox = request["bbox"]
        label = request['label']
        request_port = request["port"]

        if request_path != self.current_path:
            self.frames_reader = get_frames_reader(
                request_path, save_as_bin=self.params.save_as_bin)
            if request_roi is not None:
                self.frames_reader.setROI(request_roi)
            self.current_path = request_path

        n_frames = self.frames_reader.num_frames

        if self.params.end_frame_id >= init_frame_id:
            end_frame_id = self.params.end_frame_id
        else:
            end_frame_id = n_frames - 1

        for frame_id in range(init_frame_id, end_frame_id + 1):
            try:
                curr_frame = self.frames_reader.get_frame(frame_id)
            except IOError as e:
                print('{}'.format(e))
                break

            if request_port is not None:

                self.send(curr_frame, init_bbox, label, request_path, frame_id,
                          id_number, request_port)
            # self.single_object_tracking_results.append(tracking_result)

        sys.stdout.write('Closing tracker...\n')
        sys.stdout.flush()
コード例 #6
0
def saveMasks(seq_path,
              xml_path,
              out_mask_size,
              out_border,
              fixed_ar,
              save_raw_mask,
              show_img,
              out_root_path='',
              save_test=1,
              save_train=1,
              frames_reader=None,
              masks_per_seq=0,
              enable_out_suffix=1,
              train_fnames=None,
              test_fnames=None,
              map_to_bbox=0,
              out_img_dir='',
              enable_xml_annotations=0,
              allow_skipping_images=0):
    global _pause, _exit

    if not xml_path or not os.path.isdir(xml_path):
        raise IOError(
            'Folder containing the loaded boxes does not exist: {}'.format(
                xml_path))

    files = glob.glob(os.path.join(xml_path, '*.xml'))
    n_files = len(files)
    if n_files == 0:
        raise IOError('No loaded boxes found')

    if frames_reader is None:
        frames_reader = get_frames_reader(seq_path, save_as_bin=False)

    min_dim = max_dim = 0
    out_w, out_h = out_mask_size
    print('out_mask_size: {}'.format(out_mask_size))

    if out_w == -1 and out_h == -1:
        out_w = out_h = 0

    if out_w == -1:
        max_dim = out_h
    elif out_h == -1:
        min_dim = out_w

    if fixed_ar:
        print('Using fixed aspect ratio: {}'.format(fixed_ar))

    print('out_border: {}'.format(out_border))

    def getint(fn):
        basename = os.path.basename(fn)
        num = re.sub("\D", "", basename)
        try:
            return int(num)
        except:
            return 0

    if len(files) > 0:
        files = sorted(files, key=getint)

    print('Loading annotations from {:d} files'.format(n_files))
    file_id = 0
    n_boxes = 0

    seq_root_dir = os.path.dirname(seq_path)
    seq_name = os.path.basename(seq_path)
    if not out_root_path:
        out_root_path = os.path.join(seq_root_dir, 'masks')

    if not enable_out_suffix:
        out_seq_name = seq_name
    else:
        if map_to_bbox:
            out_seq_name = '{}_mapped'.format(seq_name)
        else:
            out_seq_name = '{}_{}x{}'.format(seq_name, out_w, out_h)
            if fixed_ar:
                out_seq_name = '{}_ar_{}'.format(out_seq_name, fixed_ar)
            else:
                out_seq_name = '{}_{}'.format(out_seq_name, out_border)

        out_seq_name = out_seq_name.replace('.', 'p')

    train_root_path = os.path.join(out_root_path, out_seq_name)

    if not save_test and not save_train:
        raise AssertionError('Either save_test or save_train must be on')

    # print('Saving output sequences to  {}'.format(out_root_path))

    if save_train:
        out_img_root_path = train_root_path
        if out_img_dir:
            out_img_root_path = os.path.join(out_img_root_path, out_img_dir)

        out_mask_root_path = os.path.join(train_root_path, 'labels')
        print('Saving training mask sequence to {}'.format(train_root_path))

        if not os.path.isdir(out_img_root_path):
            os.makedirs(out_img_root_path)

        if not os.path.isdir(out_mask_root_path):
            os.makedirs(out_mask_root_path)

        if enable_xml_annotations:
            out_xml_path = os.path.join(out_img_root_path, 'annotations')
            print('Saving xml_annotations to {}'.format(out_xml_path))
            if not os.path.isdir(out_xml_path):
                os.makedirs(out_xml_path)

    if save_test:
        out_test_seq_name = out_seq_name + '_test'
        test_img_root_path = os.path.join(out_root_path, out_test_seq_name)

        print('Saving unlabeled testing mask sequence to {}'.format(
            test_img_root_path))
        if not os.path.isdir(test_img_root_path):
            os.makedirs(test_img_root_path)

    win_name = 'patch and mask'

    disable_resizing = 0
    scale_x = scale_y = 1.0
    if out_w == 0 and out_h == 0:
        print('Resizing disabled')
        disable_resizing = 1

    csv_raw = []
    test_csv_raw = []

    n_files = len(files)

    if save_raw_mask:
        print('Saving raw labels')
        mask_pix_val = (1, 1, 1)
    else:
        mask_pix_val = (255, 255, 255)

    n_masks = 0

    _train_fnames = []
    _test_fnames = []

    _exit_seq = 0

    disp_img = None

    for file_id, file in enumerate(files):
        xml_reader = PascalVocReader(file)
        filename = os.path.basename(xml_reader.filename)
        filename_no_ext = os.path.splitext(filename)[0]
        # file_id = int(re.sub("\D", "", filename))

        # print('filename: {}'.format(filename))
        # print('file_id: {}'.format(file_id))

        img = frames_reader.get_frame_by_name(filename, convert_to_rgb=0)
        if img is None:
            print('image {} could not be read'.format(filename))
            continue

        img_h, img_w = img.shape[:2]

        mask_img = None

        shapes = xml_reader.getShapes()
        n_shapes = len(shapes)
        if n_shapes > 1:
            print('{} boxes found for {} in {}'.format(n_shapes, filename,
                                                       file))

        obj_id = 0

        img_written = 0
        for shape in shapes:
            label, points, _, _, difficult, bbox_source, id_number, score, mask, mask_img = shape
            if not mask:
                if not save_test:
                    continue

                xmin, ymin = points[0]
                xmax, ymax = points[2]
                img_root_path = test_img_root_path
            else:

                if not save_train:
                    continue

                mask_pts_list = Shape.getContourPts(mask, verbose=0)

                mask_pts = np.asarray(mask_pts_list)
                xmin, ymin = np.min(mask_pts, axis=0).astype(np.int32)
                xmax, ymax = np.max(mask_pts, axis=0).astype(np.int32)

                img_root_path = out_img_root_path

            if fixed_ar:
                w, h = xmax - xmin, ymax - ymin
                src_ar = float(w) / float(h)
                if fixed_ar > src_ar:
                    border_x = int((h * fixed_ar - w) / 2.0)
                    border_y = 0
                else:
                    border_y = int((w / fixed_ar - h) / 2.0)
                    border_x = 0
            else:
                border_x = border_y = out_border

            # start_row, start_col = max(0, ymin - border_y), max(0, xmin - border_x)
            # end_row, end_col = min(img_h - 1, ymax + border_y), min(img_w - 1, xmax + border_x)

            start_row, start_col = ymin - border_y, xmin - border_x
            end_row, end_col = ymax + border_y, xmax + border_x

            if start_row < 0 or start_col < 0 or end_row >= img_h or end_col >= img_w:
                msg = 'Invalid border {} for box {} in image {} of size {}'.format(
                    [border_x, border_y], [xmin, ymin, xmax, ymax], filename,
                    [img_w, img_h])
                if allow_skipping_images:
                    print('\n' + msg + '\n')
                    continue
                else:
                    raise AssertionError(msg)

            if mask:
                n_masks += 1

            w, h = end_col - start_col, end_row - start_row
            patch_img = img[start_row:end_row, start_col:end_col, :]

            if not disable_resizing:
                if max_dim > 0:
                    if w > h:
                        out_w = max_dim
                        out_h = 0
                    else:
                        out_h = max_dim
                        out_w = 0
                elif min_dim > 0:
                    if w < h:
                        out_w = min_dim
                        out_h = 0
                    else:
                        out_h = min_dim
                        out_w = 0
                else:
                    out_w, out_h = out_mask_size

                scale_x = float(out_w) / float(w)
                scale_y = float(out_h) / float(h)
                if scale_x == 0:
                    scale_x = scale_y
                    out_w = int(w * scale_x)
                elif scale_y == 0:
                    scale_y = scale_x
                    out_h = int(h * scale_y)
                try:
                    patch_img = cv2.resize(patch_img, (out_w, out_h))
                    # print('patch_img: {}'.format(patch_img.shape))
                except cv2.error as e:
                    print('patch_img: {}'.format(patch_img.shape))
                    print('out_size: {}, {}'.format(start_row, start_col))
                    print('out_size: {}, {}'.format(end_row, end_col))
                    print('out_size: {}, {}'.format(out_w, out_h))
                    raise cv2.error(e)
            else:
                out_w, out_h = w, h

            _label = label
            if id_number is None:
                id_number = -1
            if id_number > 0:
                _label = '{}_{}'.format(_label, id_number)

            if enable_out_suffix:
                out_fname = '{}_{}_{}'.format(filename_no_ext, obj_id, label)
            else:
                out_fname = filename_no_ext

            _xmin, _ymin = int((xmin - start_col) * scale_x), int(
                (ymin - start_row) * scale_y)
            _xmax, _ymax = int((xmax - start_col) * scale_x), int(
                (ymax - start_row) * scale_y)

            if map_to_bbox:
                if not img_written:
                    img_written = 1
                    out_img_path = os.path.join(img_root_path, filename)
                    cv2.imwrite(out_img_path, img)
                    if enable_xml_annotations:
                        imageShape = [xml_reader.height, xml_reader.width, 3]
                        xml_writer = PascalVocWriter(out_xml_path, filename,
                                                     imageShape)

                if mask:
                    if enable_xml_annotations:
                        bndbox = [xmin, ymin, xmax, ymax]
                        xml_writer.addBndBox(bndbox[0], bndbox[1], bndbox[2],
                                             bndbox[3], label, difficult,
                                             bbox_source, id_number, score,
                                             mask, mask_img)

                raw_data = {
                    'target_id': int(id_number),
                    'filename': filename,
                    'width': img_w,
                    'height': img_h,
                    'class': label,
                    'xmin': xmin,
                    'ymin': ymin,
                    'xmax': xmax,
                    'ymax': ymax
                }

                if show_img:
                    cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0),
                                  2)
                    disp_img = img
            else:
                img_out_fname = out_fname + '.jpg'
                if mask:
                    out_img_path = os.path.join(img_root_path, img_out_fname)
                    cv2.imwrite(out_img_path, patch_img)

                    if enable_xml_annotations:
                        n_mask = len(mask)

                        _mask = []
                        for i in range(n_mask):
                            _mask.append([(mask[i][0] - start_col) * scale_x,
                                          (mask[i][1] - start_row) * scale_y,
                                          mask[i][2]])

                        imageShape = [xml_reader.height, xml_reader.width, 3]
                        xml_writer = PascalVocWriter(out_xml_path,
                                                     xml_reader.filename,
                                                     imageShape)
                        bndbox = [_xmin, _ymin, _xmax, _ymax]
                        xml_writer.addBndBox(_xmin, _ymin, _xmax, _ymax, label,
                                             difficult, bbox_source, id_number,
                                             score, _mask)
                raw_data = {
                    'target_id': int(id_number),
                    'filename': img_out_fname,
                    'width': out_w,
                    'height': out_h,
                    'class': label,
                    'xmin': _xmin,
                    'ymin': _ymin,
                    'xmax': _xmax,
                    'ymax': _ymax
                }

                if show_img:
                    cv2.rectangle(patch_img, (_xmin, _ymin), (_xmax, _ymax),
                                  (0, 255, 0), 2)
                    disp_img = patch_img

            if mask:
                if mask_img is None:
                    mask_img = np.zeros_like(img)
                # print('border_x: {}'.format(border_x))
                # print('border_y: {}'.format(border_y))
                # print('scale_x: {}'.format(scale_x))
                # print('scale_y: {}'.format(scale_y))
                #
                # print('xmin: {}'.format(xmin))
                # print('ymin: {}'.format(ymin))

                mask_pts = [[(x - xmin + border_x) * scale_x,
                             (y - ymin + border_y) * scale_y]
                            for x, y in mask_pts]
                curr_mask = np.zeros_like(patch_img, dtype=np.uint8)
                # print('mask_img: {}'.format(mask_img.shape))
                mask_out_fname = out_fname + '.png'

                # np.savetxt('mask_seq_mask_pts.txt', mask_pts, fmt='%.6f')

                curr_mask = cv2.fillPoly(
                    curr_mask, np.array([
                        mask_pts,
                    ], dtype=np.int32), mask_pix_val)

                # print('min: {} max: {}'.format(
                #     np.min(mask_img.flatten()),
                #     np.max(mask_img.flatten()))
                # )

                if map_to_bbox:
                    mask_img = map_mask_to_bbox(
                        (xmin, ymin, xmax, ymax), curr_mask, fixed_ar,
                        out_border, mask_img.shape, mask_img)
                else:
                    mask_img = curr_mask
                    out_mask_path = os.path.join(out_mask_root_path,
                                                 mask_out_fname)
                    cv2.imwrite(out_mask_path, mask_img)

                    _train_fnames.append((out_img_path, out_mask_path))

                    if show_img:
                        disp_mask_img = mask_img.copy()
                        if save_raw_mask:
                            disp_mask_img[disp_mask_img > 0] = 255
                        blended_img = np.asarray(
                            Image.blend(Image.fromarray(patch_img),
                                        Image.fromarray(disp_mask_img), 0.5))
                        disp_img = np.concatenate(
                            (disp_img, disp_mask_img, blended_img), axis=1)
                csv_raw.append(raw_data)
            else:
                test_csv_raw.append(raw_data)
                if not map_to_bbox:
                    _test_fnames.append(out_img_path)

            if show_img and not map_to_bbox:
                # if _pause:
                #     print('frame {} :: {}'.format(file_id, filename))
                cv2.imshow(win_name, disp_img)
                k = cv2.waitKey(1 - _pause)
                if k == ord('q'):
                    _exit = 1
                    break
                elif k == 27:
                    _exit_seq = 1
                    break
                elif k == 32:
                    _pause = 1 - _pause
            obj_id += 1

        if map_to_bbox and img is not None:
            out_img_path = os.path.join(out_img_root_path, filename)
            if save_train and mask_img is not None:
                mask_out_fname = filename_no_ext + '.png'
                out_mask_path = os.path.join(out_mask_root_path,
                                             mask_out_fname)
                cv2.imwrite(out_mask_path, mask_img)

                if enable_xml_annotations:
                    out_xml_file = os.path.join(out_xml_path,
                                                os.path.basename(file))
                    xml_writer.save(targetFile=out_xml_file)

                _train_fnames.append((out_img_path, out_mask_path))

                if show_img:
                    disp_mask_img = mask_img
                    if save_raw_mask:
                        disp_mask_img[disp_mask_img > 0] = 255
                    blended_img = np.asarray(
                        Image.blend(Image.fromarray(img),
                                    Image.fromarray(disp_mask_img), 0.5))
                    disp_img = np.concatenate(
                        (disp_img, disp_mask_img, blended_img), axis=1)

            elif save_test:
                out_img_path = os.path.join(test_img_root_path, filename)
                if out_img_path in _test_fnames:
                    raise IOError(
                        'Duplicate out_img_path: {}'.format(out_img_path))
                _test_fnames.append(out_img_path)

            if show_img and disp_img is not None:
                cv2.imshow(win_name, disp_img)
                k = cv2.waitKey(1 - _pause)
                if k == ord('q'):
                    _exit = 1
                    break
                elif k == 27:
                    break
                elif k == 32:
                    _pause = 1 - _pause
        if _exit:
            break

        sys.stdout.write(
            '\rDone {:d}/{:d} files {:s} ({:d} masks found)'.format(
                file_id + 1, n_files, filename, n_masks))
        sys.stdout.flush()

        if masks_per_seq > 0 and n_masks >= masks_per_seq:
            break

    sys.stdout.write('\n')
    sys.stdout.flush()

    if not _exit_seq and save_train and n_masks == 0:
        raise IOError('\nNo masks found for {}\n'.format(seq_path))

    train_csv_path = test_csv_path = ''
    if csv_raw:
        print('Saved {} labeled files in training sequence'.format(
            len(csv_raw)))
        train_csv_path = os.path.join(out_img_root_path, 'annotations.csv')
        pd.DataFrame(csv_raw).to_csv(train_csv_path)
    if test_csv_raw:
        print('Saved {} unlabeled files in test sequence'.format(
            len(test_csv_raw)))
        test_csv_path = os.path.join(test_img_root_path, 'annotations.csv')
        pd.DataFrame(test_csv_raw).to_csv(test_csv_path)

    if show_img:
        cv2.destroyWindow(win_name)

    if save_train and train_fnames is not None:
        train_fnames[
            out_seq_name] = _train_fnames, train_root_path, csv_raw, train_csv_path

    if save_test and test_fnames is not None:
        test_fnames[
            out_test_seq_name] = _test_fnames, test_img_root_path, test_csv_raw, test_csv_path

    return n_masks
コード例 #7
0
def visualize(vis_params,
              logger,
              img_path,
              csv_path,
              class_dict,
              init_frame_id=0,
              n_frames=0,
              request_roi=None,
              generator_mode=0,
              enable_masks=0,
              label='',
              only_boxes=0,
              crop_size=()):
    """

    :param vis_params:
    :param logger:
    :param img_path:
    :param csv_path:
    :param class_dict:
    :param init_frame_id:
    :param n_frames:
    :param request_roi:
    :param generator_mode:
    :return:
    """
    global _pause, _quit
    save_fname_templ = os.path.splitext(os.path.basename(img_path))[0]

    # csv_path = os.path.join(img_path, 'annotations.csv')

    df = pd.read_csv(csv_path)

    frames_reader = get_frames_reader(img_path, save_as_bin=False)
    if request_roi is not None:
        frames_reader.setROI(request_roi)
    class_labels = dict((v, k) for k, v in class_dict.items())

    if generator_mode:
        vis_params.show = 0
        vis_params.save = 0

    visualizer = Visualizer(vis_params, logger, class_labels)
    init_frame = frames_reader.get_frame(init_frame_id)

    height, width, _ = init_frame.shape
    frame_size = width, height
    visualizer.initialize(save_fname_templ, frame_size, _pause)

    if n_frames <= 0:
        n_frames = frames_reader.num_frames
    print('Reading {:d} images from {:s}...'.format(n_frames, img_path))

    for frame_id in range(init_frame_id, n_frames):
        try:
            curr_frame = frames_reader.get_frame(frame_id)
        except IOError as e:
            print('{}'.format(e))
            break

        if only_boxes:
            curr_frame = np.zeros_like(curr_frame)

        file_path = frames_reader.get_file_path()
        if file_path is None:
            print('Visualization is only supported on image sequence data')
            return

        filename = os.path.basename(file_path)
        multiple_instance = df.loc[df['filename'] == filename]
        # Total # of object instances in a file
        n_bboxes = len(multiple_instance.index)
        # Remove from df (avoids duplication)
        df = df.drop(multiple_instance.index[:n_bboxes])

        frame_data = []
        masks = []

        generic_target_id = -1

        if enable_masks:
            filename = os.path.basename(file_path)
            xml_path = os.path.join(img_path, 'annotations',
                                    os.path.splitext(filename)[0] + '.xml')
            if not os.path.isfile(xml_path):
                print('{} :: annotations xml file not found: {}'.format(
                    filename, xml_path))
                continue
            xml_reader = PascalVocReader(xml_path)
            shapes = xml_reader.getShapes()
            n_shapes = len(shapes)

            if n_shapes != n_bboxes:
                raise IOError(
                    'Mismatch between n_bboxes in xml: {} and csv: {}'.format(
                        n_shapes, n_bboxes))

        for box_id in range(n_bboxes):

            bbox = multiple_instance.iloc[box_id]
            try:
                target_id = bbox['target_id']
            except KeyError:
                target_id = generic_target_id
                generic_target_id -= 1

            xmin = bbox.loc['xmin']
            ymin = bbox.loc['ymin']
            xmax = bbox.loc['xmax']
            ymax = bbox.loc['ymax']
            class_name = bbox.loc['class']

            try:
                class_id = class_dict[str(class_name)]
            except KeyError:
                print('Ignoring annotation with invalid class: {}'.format(
                    class_name))
                continue

            width = xmax - xmin
            height = ymax - ymin

            curr_frame_data = [
                frame_id, target_id, xmin, ymin, width, height, class_id
            ]

            if enable_masks:
                mask = shapes[box_id][-2]
                if mask is not None:
                    _contour_pts = Shape.getContourPts(mask)
                    masks.append(_contour_pts)

            frame_data.append(curr_frame_data)

        frame_data = np.asarray(frame_data)
        res = visualizer.update(frame_id, curr_frame, frame_data, masks, label,
                                crop_size)
        if generator_mode:
            yield res
        # elif not res:
        #     break

    _quit = visualizer._quit
    _pause = visualizer._pause

    visualizer.close()
    frames_reader.close()
コード例 #8
0
ファイル: Server.py プロジェクト: fisis/PTF
    def visualize(self, request):
        request_path = request["path"]
        csv_path = request["csv_path"]
        class_dict = request["class_dict"]
        request_roi = request["roi"]
        init_frame_id = request["frame_number"]

        save_fname_templ = os.path.splitext(os.path.basename(request_path))[0]

        df = pd.read_csv(csv_path)

        if request_path != self.current_path:
            self.frames_reader = get_frames_reader(
                request_path, save_as_bin=self.params.save_as_bin)
            if request_roi is not None:
                self.frames_reader.setROI(request_roi)
            self.current_path = request_path
        class_labels = dict((v, k) for k, v in class_dict.items())

        # print('self.params.visualizer.save: ', self.params.visualizer.save)
        visualizer = Visualizer(self.params.visualizer, self.logger,
                                class_labels)
        init_frame = self.frames_reader.get_frame(init_frame_id)

        height, width, _ = init_frame.shape
        frame_size = width, height
        visualizer.initialize(save_fname_templ, frame_size)

        n_frames = self.frames_reader.num_frames
        for frame_id in range(init_frame_id, n_frames):
            try:
                curr_frame = self.frames_reader.get_frame(frame_id)
            except IOError as e:
                print('{}'.format(e))
                break

            file_path = self.frames_reader.get_file_path()
            if file_path is None:
                print('Visualization is only supported on image sequence data')
                return

            filename = os.path.basename(file_path)

            multiple_instance = df.loc[df['filename'] == filename]
            # Total # of object instances in a file
            no_instances = len(multiple_instance.index)
            # Remove from df (avoids duplication)
            df = df.drop(multiple_instance.index[:no_instances])

            frame_data = []

            for instance in range(0, len(multiple_instance.index)):
                target_id = multiple_instance.iloc[instance].loc['target_id']
                xmin = multiple_instance.iloc[instance].loc['xmin']
                ymin = multiple_instance.iloc[instance].loc['ymin']
                xmax = multiple_instance.iloc[instance].loc['xmax']
                ymax = multiple_instance.iloc[instance].loc['ymax']
                class_name = multiple_instance.iloc[instance].loc['class']
                class_id = class_dict[class_name]

                width = xmax - xmin
                height = ymax - ymin

                frame_data.append(
                    [frame_id, target_id, xmin, ymin, width, height, class_id])

            frame_data = np.asarray(frame_data)
            if not visualizer.update(frame_id, curr_frame, frame_data):
                break

        visualizer.close()
コード例 #9
0
    def patchTracking(self, request=None, img_path=''):
        if self.params.mode == 2:
            sys.stdout.write('@@@ Starting tracker\n')
            sys.stdout.flush()
            cmd_args = sys.argv[1:]
        else:
            if request is not None:
                cmd_args = request['cmd_args']
            else:
                cmd_args = ''

        self.parseParams(self.parser, cmd_args)

        if request is not None:
            request_path = request["path"]
            request_roi = request["roi"]
            id_number = request['id_number']
            init_frame_id = request["frame_number"]
            init_bbox = request["bbox"]
            init_bbox_list = [
                int(init_bbox['xmin']),
                int(init_bbox['ymin']),
                int(init_bbox['xmax']),
                int(init_bbox['ymax']),
            ]
            label = request['label']
            request_port = request["port"]
        else:
            request_path = img_path if img_path else self.params.img_path
            request_roi = str2list(self.params.roi)
            id_number = self.params.id_number
            init_frame_id = self.params.init_frame_id

            if self.params.init_bbox:
                init_bbox_list = str2list(self.params.init_bbox)
                init_bbox = {
                    'xmin': init_bbox_list[0],
                    'ymin': init_bbox_list[1],
                    'xmax': init_bbox_list[2],
                    'ymax': init_bbox_list[3],
                }
            else:
                init_bbox = {}
            label = request_port = None

        gt_available = 0
        if request_path != self.current_path:
            self.frames_reader = get_frames_reader(request_path, save_as_bin=self.params.save_as_bin)
            if request_roi is not None:
                self.frames_reader.setROI(request_roi)
            self.current_path = request_path
            if not init_bbox:
                csv_path = os.path.join(request_path, 'annotations.csv')
                print('Reading annotations from {}'.format(csv_path))
                import pandas as pd
                df_gt = pd.read_csv(csv_path)
                _ = self.frames_reader.get_frame(init_frame_id)

                file_path = self.frames_reader.get_file_path()
                filename = os.path.basename(file_path)
                multiple_instance = df_gt.loc[df_gt['filename'] == filename]
                bbox = multiple_instance.iloc[0]
                xmin = bbox.loc['xmin']
                ymin = bbox.loc['ymin']
                xmax = bbox.loc['xmax']
                ymax = bbox.loc['ymax']
                init_bbox = {
                    'xmin': xmin,
                    'ymin': ymin,
                    'xmax': xmax,
                    'ymax': ymax,
                }
                gt_available = 1

        show_only = (self.params.mode == 1)
        tracker = PatchTracker(self.params.patch_tracker, self.logger, id_number, label, show_only=show_only)
        if not tracker.is_created:
            return

        init_frame = self.frames_reader.get_frame(init_frame_id)
        tracker.initialize(init_frame, init_bbox)
        if not tracker.is_initialized:
            self.logger.error('Tracker initialization was unsuccessful')
            return

        n_frames = self.frames_reader.num_frames

        if self.params.end_frame_id >= init_frame_id:
            end_frame_id = self.params.end_frame_id
        else:
            end_frame_id = n_frames - 1

        if self.params.mode == 1:

            if self.client is None:
                self.connectToExecutionServer()

            # print('init_bbox_list: ', init_bbox_list)
            remote_bbox = list2str(init_bbox_list)
            remote_img_path = os.path.join(self.params.remote_img_root_path, os.path.basename(request_path))
            cd_command = 'cd {:s}'.format(self.params.remote_path)
            exec_command = 'python3 Server.py --mode=2 ' \
                           '--cfg={:s} --img_path={:s} --id_number={:d} ' \
                           '--init_frame_id={:d} --init_bbox={:s}' \
                           ' --patch_tracker.tracker_type={:d}' \
                           ' --patch_tracker.cv_tracker_type={:d}' \
                           ' --patch_tracker.show=0' \
                           '' \
                           '\n'.format(
                self.params.remote_cfg,
                remote_img_path,
                id_number,
                init_frame_id,
                remote_bbox,
                self.params.patch_tracker.tracker_type,
                self.params.patch_tracker.cv_tracker_type,
            )
            # command = '{} && {}'.format(cd_command, exec_command)
            # if request_roi is not None:
            #     remote_roi = list2str(request_roi)
            #     command = '{:s} {:s}'.format(command, remote_roi)
            #
            print('Running:\n{:s}'.format(exec_command))
            curr_corners = np.zeros((2, 4), dtype=np.float64)

            # channel = self.client.invoke_shell(width=1000, height=3000)
            # _stdout = channel.makefile()

            self.channel.send(cd_command + '\n')

            # channel.send("sudo -s\n'''\n")
            # channel.send("'''" + '\n')

            self.channel.send(exec_command + '\n')
            # channel.send('exit' + '\n')

            # s = channel.recv(4096)
            # print(s)
            # client.close()
            # sys.exit()

            # _stdin, _stdout, _stderr = client.exec_command(command)
            # channel = _stdout.channel

            # print(_stdout.readlines())
            # while not _stderr.channel.recv_exit_status():
            #     if _stderr.channel.recv_ready():
            #         print(_stderr.read())

            pid = None
            tracking_started = 0
            wait_start_time = tracking_start_time = time.clock()
            while not self.channel.exit_status_ready():
                if not self.channel.recv_ready():
                    wait_time = time.clock() - wait_start_time
                    # sys.stdout.write('Waiting for stdout for {:f} secs\n'.format(wait_time))
                    # sys.stdout.flush()
                    if wait_time > self.params.wait_timeout:
                        print('Waiting time threshold exceeded')
                        break
                    continue

                wait_start_time = time.clock()
                # sys.stdout.write('\n')
                # sys.stdout.flush()
                # remote_output = channel.recv(45).decode("utf-8")
                remote_output = self._stdout.readline().replace("^C", "")
                # print('remote_output: ', remote_output)

                if remote_output.startswith('@@@'):
                    tracking_started = 1
                    continue

                if not remote_output.startswith('###'):
                    sys.stdout.write(remote_output)
                    sys.stdout.flush()
                    continue

                if not tracking_started:
                    continue

                # sys.stdout.write(remote_output)
                # sys.stdout.flush()

                result_list = remote_output.strip().split()
                # print('remote_output: ', remote_output)
                # print('result_list: ', result_list)

                if len(result_list) != 8:
                    print('remote_output: ', remote_output)
                    print('result_list: ', result_list)
                    raise SystemError('Invalid output from the remote server')

                pid = int(result_list[1])
                frame_id = int(result_list[2])
                xmin = int(result_list[3])
                ymin = int(result_list[4])
                xmax = int(result_list[5])
                ymax = int(result_list[6])
                remote_fps = float(result_list[7])

                curr_corners[:, 0] = (xmin, ymin)
                curr_corners[:, 1] = (xmax, ymin)
                curr_corners[:, 2] = (xmax, ymax)
                curr_corners[:, 3] = (xmin, ymax)

                try:
                    curr_frame = self.frames_reader.get_frame(frame_id)
                except IOError as e:
                    print('{}'.format(e))
                    break

                end_time = time.clock()
                fps = 1.0 / (end_time - tracking_start_time)

                tracker.show(curr_frame, curr_corners, frame_id, fps, remote_fps)

                out_bbox = dict(
                    xmin=xmin,
                    ymin=ymin,
                    xmax=xmax,
                    ymax=ymax,
                )
                self.send(curr_frame, out_bbox, label, request_path, frame_id, id_number, request_port)

                tracking_start_time = end_time
                if frame_id > end_frame_id or tracker.is_terminated:
                    # exit_client = paramiko.SSHClient()
                    # exit_client.set_missing_host_key_policy(paramiko.AutoAddPolicy)
                    # exit_client.connect(self.params.hostname, username=self.params.username,
                    #                     password=self.params.password)
                    # _stdin, _stdout, _stderr = exit_client.exec_command()
                    # exit_client.close()
                    # self.channel.send(chr(3))
                    # self.channel.send(chr(3))
                    # self.channel.send(chr(3))

                    # self.channel.send('exit' + '\n')

                    # channel_closed = 1

                    # channel.close()
                    # self.channel.send('pkill -P {:d}\n'.format(pid))
                    break

            # while not channel.recv_ready():
            #     sys.stdout.write('\rwaiting')
            #     sys.stdout.flush()
            #     continue

            tracker.close()

            self.channel.send(chr(3))
            if pid is not None:
                self.channel.send('pkill -P {:d}\n'.format(pid))

            # self.flushChannel()

            # p = multiprocessing.Process(target=self.flushChannel)
            # p.start()
            # # Wait for 1 second or until process finishes
            # p.join(self.params.flush_timeout)
            # if p.is_alive():
            #     p.terminate()
            #     p.join()

            print('\ndone execution')

            # _stdout.close()
            # _stderr.close()

            # channel.send('exit' + '\n')
            # self.channel.close()
            # client.close()
            return

        # init_frame = self.frames_reader.get_frame(init_frame_id)
        # init_file_path = self.frames_reader.get_file_path()

        # try:
        #     self.runTracker(tracker, init_frame_id)
        # except KeyboardInterrupt:
        #     pass

        # self.logger.info('Tracking target {:d} in sequence with {:d} frames '
        #                  'starting from frame {:d}'.format(
        #     id_numbers[0], n_frames, init_frame_id + 1))

        save_path = ''
        if self.params.save_dir:
            file_path = self.frames_reader.get_file_path()
            save_path = os.path.join('log', self.params.save_dir, os.path.basename(os.path.dirname(file_path)))
            if not os.path.isdir(save_path):
                os.makedirs(save_path)
            save_csv_path = os.path.join(save_path, 'annotations.csv')
            print('Saving results csv to {}'.format(save_csv_path))

        if self.params.track_init_frame:
            start_frame_id = init_frame_id
        else:
            start_frame_id = init_frame_id + 1

        csv_raw = []
        if label is None:
            label = 'generic'
        for frame_id in range(start_frame_id, end_frame_id + 1):
            try:
                curr_frame = self.frames_reader.get_frame(frame_id)
            except IOError as e:
                print('{}'.format(e))
                break

            file_path = self.frames_reader.get_file_path()
            filename = os.path.basename(file_path)

            gt_bbox = None
            if gt_available:
                multiple_instance = df_gt.loc[df_gt['filename'] == filename]
                bbox = multiple_instance.iloc[0]
                xmin = bbox.loc['xmin']
                ymin = bbox.loc['ymin']
                xmax = bbox.loc['xmax']
                ymax = bbox.loc['ymax']
                label = bbox.loc['class']
                gt_bbox = {
                    'xmin': xmin,
                    'ymin': ymin,
                    'xmax': xmax,
                    'ymax': ymax,
                }
            try:
                fps = tracker.update(curr_frame, frame_id, gt_bbox=gt_bbox)
            except KeyboardInterrupt:
                break

            if tracker.out_bbox is None:
                # self.logger.error('Tracker update was unsuccessful')
                break

            if save_path:

                if tracker.curr_mask_cropped is not None:
                    mask_filename = os.path.splitext(filename)[0] + '.png'
                    save_mask_path = os.path.join(save_path, mask_filename)
                    curr_mask_norm = (tracker.curr_mask_cropped * 255.0).astype(np.uint8)
                    cv2.imwrite(save_mask_path, curr_mask_norm)

                    mask_filename_bin = os.path.splitext(filename)[0] + '.npy'
                    save_mask_path_bin = os.path.join(save_path, mask_filename_bin)
                    np.save(save_mask_path_bin, tracker.curr_mask_cropped)


                if self.params.save_csv:
                    orig_height, orig_width = curr_frame.shape[:2]
                    xmin = tracker.out_bbox['xmin']
                    xmax = tracker.out_bbox['xmax']
                    ymin = tracker.out_bbox['ymin']
                    ymax = tracker.out_bbox['ymax']

                    raw_data = {
                        'filename': filename,
                        'width': orig_width,
                        'height': orig_height,
                        'class': label,
                        'xmin': int(xmin),
                        'ymin': int(ymin),
                        'xmax': int(xmax),
                        'ymax': int(ymax),
                        'confidence': tracker.score
                    }
                    csv_raw.append(raw_data)

            if self.params.mode == 2 and not self.params.patch_tracker.show:
                sys.stdout.write('### {:d} {:d} {:d} {:d} {:d} {:d} {:5.2f}\n'.format(
                    self.pid, frame_id, tracker.out_bbox['xmin'], tracker.out_bbox['ymin'],
                    tracker.out_bbox['xmax'], tracker.out_bbox['ymax'], fps
                ))
                sys.stdout.flush()
                continue

            if request_port is not None:
                mask = os.path.abspath(save_mask_path_bin)
                # if tracker.curr_mask_cropped is not None:
                #     # mask = np.expand_dims(tracker.curr_mask, axis=0).tolist()
                #     mask = tracker.curr_mask_cropped.tolist()
                # else:
                #     mask = None

                self.send(curr_frame, tracker.out_bbox, label, request_path, frame_id,
                          id_number, request_port, masks=mask)
            # self.single_object_tracking_results.append(tracking_result)

            if tracker.is_terminated:
                break

        sys.stdout.write('Closing tracker...\n')
        sys.stdout.flush()

        tracker.close()

        if save_path and self.params.save_csv:
            df = pd.DataFrame(csv_raw)
            df.to_csv(save_csv_path)