示例#1
0
    def btn_save_clicked(self):
        self.update_anno()
        do_save = True
        if os.path.exists(self.anno_file):
            reply = QMessageBox.question(
                self, 'Save Annotations',
                'File does already exist. Do you want to overwrite: %s' %
                self.anno_file)
            if reply == QMessageBox.No:
                do_save = False

        if do_save:
            json_dump(self.anno_file, self.anno, verbose=True)
示例#2
0
def detect_marker(marker_path,
                  data_path,
                  output_file=None,
                  cache=False,
                  verbose=0):
    # check if folder/image or video case
    if os.path.isdir(data_path):
        # folder case
        base_dir = data_path
    else:
        # video case
        base_dir = os.path.dirname(data_path)

    # check for existing detection file
    if output_file is not None:
        det_file = os.path.join(base_dir, output_file)
        if cache and os.path.exists(det_file):
            if verbose > 0:
                print('Loading detection from: %s' % det_file)
            return json_load(det_file)

    if verbose > 0:
        print('Detection marker on:')
        print('\tData path: %s' % data_path)
        print('\tMarker file: %s' % marker_path)

    if os.path.isdir(data_path):
        if verbose > 0:
            print('\tAssuming: Folder of images.')
        points2d, point_ids, img_shape, files, base_dir = _detect_marker_img_folder(
            marker_path, data_path, verbose)

    else:
        if verbose > 0:
            print('\tAssuming: Video file.')
        points2d, point_ids, img_shape, files, base_dir = _detect_marker_video(
            marker_path, data_path)

    # save detections
    det = {
        'p2d': points2d,
        'pid': point_ids,
        'img_shape': img_shape,
        'files': files
    }

    if output_file is not None:
        json_dump(det_file, det, verbose=verbose > 0)

    return det
示例#3
0
def merge_all_index_files(model):
    # create output structures
    dataset_index_labeled, calib_all = defaultdict(list), list()

    # Iter all recordings and check which output files exist
    set_names = list()
    for db in model.datasets:
        ident = get_ident(db)
        out_path = os.path.join(model.preprocessing['data_storage'], ident)
        set_names.append(db['db_set'])

        # check for record file
        file_out_rec = os.path.join(
            out_path, model.preprocessing['index_file_name'] % db['db_set'])
        if os.path.exists(file_out_rec):
            data = json_load(file_out_rec)
            data = update_calib_id(data, len(calib_all))
            dataset_index_labeled[db['db_set']].extend(data)

        # calib file
        calib_file = os.path.join(out_path, model.preprocessing['calib_file'])
        if os.path.exists(calib_file):
            calib_all.extend(json_load(calib_file))

    # Save merged indices
    for set_name in set(set_names):
        file_out = model.preprocessing['index_file_name'] % set_name
        if len(dataset_index_labeled[set_name]) > 0:
            json_dump(
                os.path.join(model.preprocessing['data_storage'], file_out),
                dataset_index_labeled[set_name])
            print(
                'Saved %d samples to %s' %
                (len(dataset_index_labeled[set_name]),
                 os.path.join(model.preprocessing['data_storage'], file_out)))

    # Save merged cam calibs
    if len(calib_all) > 0:
        json_dump(
            os.path.join(model.preprocessing['data_storage'],
                         model.preprocessing['calib_file']), calib_all)
示例#4
0
                        action='store_true',
                        help='If set, saves data.')

    args = parser.parse_args()

    # load model data
    model = Model(args.model)

    # sanity check input
    assert os.path.exists(
        args.pose_pred_file), 'Given pose prediction file was not found.'
    if args.plane:
        assert os.path.exists(
            args.plane_file), 'Given plane definition file was not found.'

    # output file to save results to
    output_file_name = os.path.join(os.path.dirname(args.pose_pred_file),
                                    args.file_out_name)
    print('Output file: %s' % output_file_name)

    # load pose data
    pose_pred = json_load(args.pose_pred_file)

    # run analyse
    variables = analyse(args, model, pose_pred)

    if args.save:
        print('Saving file...')
        # save calculated variables
        json_dump(output_file_name, variables, verbose=True)
示例#5
0
def calc_intrinsics(marker_path,
                    data_path,
                    det_file_name,
                    output_file=None,
                    estimate_dist=True,
                    dist_complexity=5,
                    cache=False,
                    verbose=0):
    if os.path.isdir(data_path):
        base_dir = data_path
    else:
        base_dir = os.path.dirname(data_path)

    # try to load precomputed
    if output_file is not None:
        calib_file = os.path.join(base_dir, output_file)
        if cache and os.path.exists(calib_file):
            if verbose > 0:
                print('Loading intrinsic calibration from: %s' % calib_file)
            calib = json_load(calib_file)
            return np.array(calib['K']), np.array(calib['dist'])

    if verbose > 0:
        print('Calculating intrinsic calibration for:')
        print('\tData path: %s' % data_path)
        print('\tMarker file: %s' % marker_path)

    # set up detector and estimator
    detector = BoardDetector(marker_path)

    if os.path.isdir(data_path):
        if verbose > 0:
            print('\tAssuming: Folder of images.')
        base_dir = data_path
    else:
        if verbose > 0:
            print('\tAssuming: Video file.')
        base_dir = os.path.dirname(data_path)

    # check for detections
    if det_file_name is None:
        det = detect_marker(marker_path,
                            data_path,
                            cache=cache,
                            verbose=verbose - 1)
    else:
        detections_file = os.path.join(base_dir, det_file_name)

        if not os.path.exists(detections_file):
            if verbose > 1:
                print(
                    'Could not locate marker detections. Running detector now and saving them to folder.'
                )
            det = detect_marker(marker_path,
                                data_path,
                                det_file_name,
                                verbose=verbose - 1)

        else:
            det = json_load(detections_file)

    # give points unique ids
    max_num_pts = len(detector.object_points)
    p2d, pid, p3dm, fid = enumerate_points(detector, det['p2d'], det['pid'],
                                           max_num_pts)
    if verbose > 0:
        print('Found %d unique points to estimate intrinsics from.' %
              pid.shape[0])

    # estimate intrinsics
    K, dist = estimate_intrinsics(p2d,
                                  fid,
                                  p3dm,
                                  det['img_shape'],
                                  estimate_dist=estimate_dist,
                                  dist_complexity=dist_complexity,
                                  verbose=verbose)

    # save intrinsics
    if output_file is not None:
        calib = {'K': K, 'dist': dist}
        json_dump(calib_file, calib, verbose=verbose > 0)
    return K, dist
示例#6
0
    for offset, name in jobs:
        # open a new canvas
        c = canvas.canvas()

        # draw the board
        generateAprilBoard(canvas, parsed.n_cols, parsed.n_rows, name, offset,
                           parsed.tsize, parsed.tagspacing, parsed.tagfamily,
                           parsed.border)

        # write to file
        my_mkdir(output_name_pdf % name, is_file=True)
        c.writePDFfile(output_name_pdf % name)
        print('Created %s.pdf' % output_name_pdf % name)

    tag_desc = dict()
    tag_desc['family'] = parsed.tagfamily
    tag_desc['border'] = parsed.border
    tag_desc['double'] = parsed.double
    tag_desc['tsize'] = parsed.tsize
    tag_desc[
        'tspace'] = parsed.tsize * parsed.tagspacing  # actual size in meters
    tag_desc['n_x'] = parsed.n_cols
    tag_desc['n_y'] = parsed.n_rows
    tag_desc['offset'] = [
        0.0, 0.0, 0.0
    ]  # these we dont know because they depend on the manufacturing process

    json_dump(output_name_json, tag_desc)
    print('Created %s' % output_name_json)
示例#7
0
            boxes[:, 2] /= orig_shapes[:, 0]
            boxes = np.expand_dims(boxes, 1)
            scores = np.ones_like(boxes[:, :, 0])

        # process boxes
        pred = post_process_detections(boxes, scores,
                                       K_list,
                                       M_list,
                                       imgs.shape[1:3],
                                       verbose=False)
        predictions.append(pred)

        if args.show:
            img_vis_list = list()
            for bid in range(imgs.shape[0]):
                root_uv = cl.project(cl.trafo_coords(pred['xyz'], M_list[bid]), K_list[bid])
                img = cv2.circle(imgs[bid].astype(np.uint8),
                                 (int(root_uv[0, 0]), int(root_uv[0, 1])),
                                 radius=5,
                                 color=(0, 255, 255),
                                 thickness=-1)
                img_vis_list.append(draw_bb(img,
                                            pred['boxes'][bid] * imgs.shape[1],
                                            mode='lrtb', color='g'))

            merge = StitchedImage(img_vis_list)
            cv2.imshow('img_bb_post', merge.image[:, :, ::-1])
            cv2.waitKey(100)

    json_dump(pred_file_name, predictions, verbose=True)
def attribution_by_permutation(output_file, pose_names, pose_mat,
                               class_labels):
    """ Attribute the influence to some body variables. """

    # masking
    mask = np.abs(class_labels) > 0.5
    y = class_labels[mask]
    X = pose_mat[mask]
    n_classes = np.unique(y).size

    result_table = PrettyTable(['Experiment', 'p-value'])

    # # classify from all factors
    # v = _p_value_from_permutation(X, y)
    # result_table.add_row(
    #     ['all_factors', '%.3e' % v]
    # )

    # iterate single factors
    results = list()
    for i, name in tqdm(enumerate(pose_names),
                        desc='Single factors',
                        total=len(pose_names)):
        pv, fs = _p_value_from_permutation(X[:, i:(i + 1)], y)
        results.append((name, pv, fs))

    all_scores = np.array([x[1] for x in results])
    mean_score = np.mean(all_scores)
    result_table.add_row(['single_factor_mean_score', '%.3e' % mean_score])
    result_table.add_row(['---', '---'])

    sorted_inds = np.argsort(all_scores)[::-1]
    for i in sorted_inds[:10]:
        name = pose_names[i]
        v = all_scores[i]
        result_table.add_row([name, '%.3e' % v])
    result_table.add_row(['---', '---'])

    for i in sorted_inds[-10:]:
        name = pose_names[i]
        v = all_scores[i]
        result_table.add_row([name, '%.3e' % v])
    num_sig = np.sum(all_scores < 0.1)

    print('Attribution summary:')
    print('Number of significant factors: %d' % num_sig)
    print('Data set: %d samples' % pose_mat.shape[0])
    print('Data set: %d samples valid' % np.sum(mask))
    print('Data set: %d factors' % pose_mat.shape[1])
    print(result_table)

    # with open(output_file, 'w') as fo:
    #     fo.write('Attribution summary Attribution by permutation\n')
    #     fo.write('Number of significant factors: %d\n' % num_sig)
    #     fo.write('Data set: %d samples\n' % pose_mat.shape[0])
    #     fo.write('Data set: %d samples valid\n' % np.sum(mask))
    #     fo.write('Data set: %d factors\n' % pose_mat.shape[1])
    #     fo.write(str(result_table))

    summary = {
        'num_samples': pose_mat.shape[0],
        'num_samples_valid': np.sum(mask),
        'num_factors': pose_mat.shape[1],
        'num_factors_sig': num_sig,
        'results': results,
    }
    json_dump(output_file, summary)
def attribution(cls_type,
                output_file,
                pose_names,
                pose_mat,
                class_labels,
                pose_mat_eval,
                class_labels_eval,
                output_file_pred,
                save_pred=False):
    """ Attribute the influence to some body variables. """

    # masking
    mask_train = np.abs(class_labels) > 0.5
    mask_eval = np.abs(class_labels_eval) > 0.5

    fmt = '%.3f'
    result_table = PrettyTable(['Experiment', 'p-value'])

    # classify from all factors
    if save_pred:
        ## looks like more than linear is not worth the effort
        # # for c in ['svm_linear', 'svm_linear_w',  'sgd', 'svm_nl', 'svm_nl_w', 'mlp', 'mlp', 'mlp', 'mlp']:
        # for c in ['svm_linear', 'svm_nl', 'mlp', 'mlp', 'mlp', 'mlp']:
        #     pv, fs, acc, pred = _classify(c,#cls_type,
        #                                   class_labels[mask_train], pose_mat[mask_train],
        #                                   class_labels_eval[mask_eval], pose_mat_eval[mask_eval], return_pred=save_pred)
        #     print('CLASSIFIER', c)
        #     print('ALL FACTORS ACCURACY:', acc)
        #     print('ALL FACTORS F1:', fs)
        #     print('---------')
        # exit()

        pv, fs, acc, stat, dof, expected, pred = _classify(
            cls_type,
            class_labels[mask_train],
            pose_mat[mask_train],
            class_labels_eval[mask_eval],
            pose_mat_eval[mask_eval],
            return_pred=save_pred)
        json_dump(output_file_pred, pred, verbose=True)
    else:
        pv, fs, acc, stat, dof, expected = _classify(
            'svm_linear', class_labels[mask_train], pose_mat[mask_train],
            class_labels_eval[mask_eval], pose_mat_eval[mask_eval])
    acc_all_factors = acc
    result_table.add_row(['all_factors', '%.3e' % pv])

    # iterate single factors
    results = list()
    for i, name in tqdm(enumerate(pose_names),
                        total=len(pose_names),
                        desc='Testing single factors'):
        pv, fs, acc, stat, dof, expected = _classify(
            'svm_linear', class_labels[mask_train], pose_mat[mask_train,
                                                             i:(i + 1)],
            class_labels_eval[mask_eval], pose_mat_eval[mask_eval, i:(i + 1)])

        results.append((name, pv, fs, acc, stat, dof, expected,
                        class_labels[mask_train].shape[0],
                        class_labels_eval[mask_eval].shape[0]))

    all_scores = np.array([x[1] for x in results])
    mean_score = np.mean(all_scores)
    result_table.add_row(['single_factor_mean_score', '%.3f' % mean_score])
    result_table.add_row(['---', '---'])

    sorted_inds = np.argsort(all_scores)[::-1]

    for i in sorted_inds:
        name = pose_names[i]
        v = all_scores[i]
        if v < 0.1:
            result_table.add_row([name, fmt % v])
    num_sig = np.sum(all_scores < 0.1)

    print('Attribution summary:')
    print('Number of significant factors %d' % num_sig)
    print('Train set: %d samples' % pose_mat.shape[0])
    print('Train set: %d samples valid' % np.sum(mask_train))
    print('Train set: %d factors' % pose_mat.shape[1])
    print('Eval set: %d samples' % pose_mat_eval.shape[0])
    print('Eval set: %d samples valid' % np.sum(mask_eval))
    print('Eval set: %d factors' % pose_mat_eval.shape[1])
    print(result_table)

    # with open(output_file, 'w') as fo:
    #     fo.write('Attribution summary: Attribution by classification\n')
    #     fo.write('Number of significant factors %d\n' % num_sig)
    #     fo.write('Train set: %d samples\n' % pose_mat.shape[0])
    #     fo.write('Train set: %d samples valid\n' % np.sum(mask_train))
    #     fo.write('Train set: %d factors\n' % pose_mat.shape[1])
    #     fo.write('Eval set: %d samples\n' % pose_mat_eval.shape[0])
    #     fo.write('Eval set: %d samples valid\n' % np.sum(mask_eval))
    #     fo.write('Eval set: %d factors\n' % pose_mat_eval.shape[1])
    #     fo.write(str(result_table))

    summary = {
        'num_train_samples': pose_mat.shape[0],
        'num_train_samples_valid': np.sum(mask_train),
        'num_eval_samples': pose_mat_eval.shape[0],
        'num_eval_samples_valid': np.sum(mask_eval),
        'num_factors': pose_mat.shape[1],
        'num_factors_sig': num_sig,
        'acc_all_factors': acc_all_factors,
        'results': results,
    }
    json_dump(output_file, summary)
示例#10
0
def preproc_data(model):
    """ Preprocess labeled data so we can train networks with it. """
    print('Running preprocessing for:', model)
    print('Saving to output folder:', model.preprocessing['data_storage'])

    # Init output structures
    calib_all = list()
    for i, db in enumerate(model.datasets):
        dataset_index = defaultdict(list)
        ident = get_ident(db)
        print('Preprocessing dataset entry %d: %s' % (i, ident))

        # where we want to save the processed frames
        output_path = os.path.join(model.preprocessing['data_storage'], ident)

        # check if we previously dealt with this record
        if os.path.exists(output_path):
            print(' > This record was already preprocessed previously.')
            continue

        # check base paths existance
        if not os.path.exists(db['path']):
            print(' > Base path not found: %s' % db['path'])
            continue

        # check calib file
        calib_file_path = os.path.join(db['path'], db['calib'])
        if not os.path.exists(calib_file_path):
            print(' > Calib file not found: %s' % calib_file_path)
            continue
        calib_all.append(
            load_calib_data(calib_file_path, return_cam2world=False))

        # check annotation file
        anno_file = os.path.join(db['path'], db['frame_dir'], db['anno'])
        if os.path.exists(anno_file):
            print(' > Loading annotations from %s' % anno_file)
            anno = json_load(anno_file)
            print(' > Got %d annotations' % len(anno))

        else:
            print(' > Cant find annotation file: %s' % anno_file)
            print(' > Assuming dataset is not labeled.')
            continue

        if check_if_labeled(anno):
            print(' > Found labeled sequence: %s' %
                  os.path.join(db['path'], db['frame_dir']))
            cnt = sum([len(x) for x in dataset_index.values()])
            this_index = process_labeled(model, cnt, output_path, db, anno,
                                         calib_all)
            print(' > Adding %d samples to labeled set %s' %
                  (len(this_index), db['db_set']))
            dataset_index[db['db_set']].extend(this_index)
        else:
            print(
                ' > Sequence appears to be unlabeled (f.e. annotation file is empty).'
            )

        if len(dataset_index[db['db_set']]) > 0:
            file_out_rec = os.path.join(
                output_path,
                model.preprocessing['index_file_name'] % db['db_set'])
            json_dump(file_out_rec, dataset_index[db['db_set']])
            print(' > Saved %d samples to %s' %
                  (len(dataset_index[db['db_set']]), file_out_rec))

            # save Calib file
            json_dump(
                os.path.join(output_path, model.preprocessing['calib_file']),
                calib_all)

    merge_all_index_files(model)
示例#11
0
    def btn_write(self):
        self.save_label_state()  # save current annotation

        num_kp = len(self.config['keypoints'])
        empty = {
            'kp_xyz': np.zeros((num_kp, 3)),
            'vis3d': np.zeros((num_kp, ))
        }

        # assemble all info we want to write to disk
        output_data = dict()
        for k in self.file_list_sel_full_keys:
            fid = int(k)
            if k in self.label_tasks.keys():
                output_data[fid] = self.label_tasks[k]

                # project into views
                for i, cid in enumerate(self.cam_range):
                    # project into frame
                    xyz = self.label_tasks[k]['kp_xyz']
                    kp_uv = cl.project(cl.trafo_coords(xyz, self.M_list[i]),
                                       self.K_list[i], self.dist_list[i])
                    output_data[fid]['cam%d' % cid] = {
                        'kp_uv': kp_uv,
                        'vis': self.label_tasks[k]['vis3d']
                    }

            else:
                output_data[fid] = empty

        self.pb_start(len(output_data))

        # figure out base path
        i = 0
        while True:
            base_path = os.path.join(os.path.dirname(self.video_list[0]),
                                     self.output_task_dir % i)
            if not os.path.exists(base_path):
                break
            i += 1

        # dump frames
        for fid, _ in output_data.items():
            img_list, K_list, M_list, dist_list = self.precacher.get_data(fid)

            # write image frames
            for cid, img in zip(self.cam_range, img_list):
                output_path = os.path.join(base_path, 'cam%d' % cid,
                                           '%08d.png' % fid)
                my_mkdir(output_path, is_file=True)
                cv2.imwrite(output_path, img)
                # print('Dumped: ', output_path)
            self.pb_update()

        self.pb_finish()

        # dump anno
        anno_out_path = os.path.join(base_path, 'anno.json')
        my_mkdir(anno_out_path, is_file=True)
        json_dump(anno_out_path,
                  {'%08d.png' % k: v
                   for k, v in output_data.items()},
                  verbose=True)