コード例 #1
0
ファイル: predict.py プロジェクト: amsword/ssd.pytorch
def predict(**kwargs):
    train_data = 'voc20'
    train_dataset = TSVDataset(train_data)
    labelmap = train_dataset.load_labelmap()

    # load net
    num_classes = len(labelmap) + 1  # +1 for background
    net = build_ssd('test', 300, num_classes)  # initialize SSD
    net.load_state_dict(torch.load(kwargs['trained_model']))
    net.eval()

    test_data = 'voc20'
    test_dataset = TSVDataset(test_data)
    from data.voc0712 import TSVDetection
    dataset_mean = (104, 117, 123)
    dataset = TSVDetection(test_dataset.get_data('test'),
                           train_dataset.get_labelmap_file(),
                           BaseTransform(300, dataset_mean))
    if kwargs['cuda']:
        net = net.cuda()
        cudnn.benchmark = True
    pred_file = kwargs['pred_file']
    # evaluation
    test_net(pred_file,
             net,
             kwargs['cuda'],
             dataset,
             labelmap,
             thresh=kwargs['confidence_threshold'])
    gt_file = test_dataset.get_data('test', 'label')
    from deteval import deteval_iter
    deteval_iter(tsv_reader(gt_file),
                 pred_file,
                 report_file=op.splitext(pred_file)[0] + '.report')
コード例 #2
0
ファイル: process_dataset.py プロジェクト: vardhman1996/qdv
def construct_low_shot(source_dataset, labels, num_train_images, shuffle_file):
    assert len(labels) == 1
    label_imageIdx = {}
    target_images = []
    rows = tsv_reader(source_dataset.get_train_tsv())
    shuffle = []
    for i, row in enumerate(rows):
        curr_labels = json.loads(row[1])
        if any([l['class'] == labels[0] for l in curr_labels]):
            target_images.append((i, row[0]))
        else:
            shuffle.append(i)
    assert len(target_images) >= num_train_images
    random.seed(777)
    random.shuffle(target_images)
    selected = target_images[:num_train_images]
    num_duplicate = (len(target_images) + num_train_images -
                     1) / num_train_images
    assert num_duplicate >= 1
    for d in range(num_duplicate):
        shuffle.extend([s[0] for s in selected])

    random.shuffle(shuffle)
    write_to_file('\n'.join(map(str, selected)), shuffle_file + '.selected')
    write_to_file('\n'.join(map(str, shuffle)), shuffle_file)
コード例 #3
0
ファイル: process_dataset.py プロジェクト: vardhman1996/qdv
def ensure_dataset_sample(source_dataset, sample_label, sample_image,
                          out_data):
    if op.exists(out_data):
        logging.info('skip to generate the samle data since it exists')
        return
    logging.info('start to generate the sample data')
    assert source_dataset.name == 'imagenet2012', 'only cls dataset is supp'
    assert sample_label > 0 and sample_label <= 1
    assert sample_image > 0 and sample_image <= 1
    random.seed(777)
    labels = source_dataset.load_labelmap()
    num_labels = len(labels)
    num_sampled_labels = int(num_labels * sample_label)
    assert num_sampled_labels > 0
    label_idx = range(num_labels)
    random.shuffle(label_idx)
    sampled_labels_idx = label_idx[:num_sampled_labels]
    train_rows = tsv_reader(source_dataset.get_train_tsv())
    train_images = [
        row for row in train_rows if int(row[1]) in sampled_labels_idx
    ]
    for row in train_images:
        row[1] = str(sampled_labels_idx.index(int(row[1])))
    random.shuffle(train_images)
    tsv_writer(train_images[:int(len(train_images) * sample_image)],
               op.join(out_data, 'train.tsv'))
    # process the test set
    test_rows = tsv_reader(source_dataset.get_test_tsv_file())
    test_images = [
        row for row in test_rows if int(row[1]) in sampled_labels_idx
    ]
    for row in test_images:
        row[1] = str(sampled_labels_idx.index(int(row[1])))
    tsv_writer(test_images, op.join(out_data, 'test.tsv'))
    # save teh label map
    sampled_labels = [labels[i] for i in sampled_labels_idx]
    write_to_file('\n'.join(sampled_labels), op.join(out_data, 'labelmap.txt'))
コード例 #4
0
ファイル: demo_detection.py プロジェクト: vardhman1996/qdv
 def __init__(self, source_image_tsv):
     self._cap = None
     self._rows = None
     self._im = None
     self._all_file = None
     self._all_file_idx = 0
     if source_image_tsv is None:
         self._cap = cv2.VideoCapture(0)
     else:
         if source_image_tsv.endswith('tsv'):
             self._rows = tsv_reader(source_image_tsv)
         elif op.isfile(source_image_tsv):
             self._im = cv2.imread(source_image_tsv, cv2.IMREAD_COLOR)
         elif op.isdir(source_image_tsv):
             self._all_file = []
             for f in glob.glob(op.join(source_image_tsv, '*')):
                 self._all_file.append(f)
コード例 #5
0
def convert_to_cocoformat(predict_tsv, predict_json, label_to_id):
    # read all the data and convert them into coco's json format
    rows = tsv_reader(predict_tsv)
    annotations = []
    image_names = []
    image_ids = []
    for row in rows:
        image_id = int(row[0])
        rects = json.loads(row[1])
        for rect in rects:
            r = rect['rect']
            ann = [r[0], r[1], r[2] - r[0], r[3] - r[1]]
            category_id = label_to_id[rect['class']]
            annotations.append({
                'image_id': image_id,
                'category_id': category_id,
                'bbox': ann,
                'score': rect['conf']
            })
    write_to_file(json.dumps(annotations), predict_json)
コード例 #6
0
ファイル: demo_detection.py プロジェクト: vardhman1996/qdv
 def next(self):
     if self._cap:
         r, im = self._cap.read()
         im = cv2.flip(im, 1)
         return im
     elif self._rows:
         x = next(self._rows)
         if x is not None:
             return img_from_base64(x[-1])
         else:
             self._rows = tsv_reader(source_image_tsv)
             return img_from_base64(next(self._rows)[-1])
     elif self._im is not None:
         return self._im
     elif self._all_file is not None:
         while True:
             f = self._all_file[self._all_file_idx]
             im = cv2.imread(f, cv2.IMREAD_COLOR)
             self._all_file_idx = self._all_file_idx + 1
             if self._all_file_idx > len(self._all_file):
                 self._all_file_idx = 0
             if im is not None:
                 return im
コード例 #7
0
ファイル: process_dataset.py プロジェクト: vardhman1996/qdv
def map_label(source, labels, synset_tree, source_label):
    assert synset_tree
    all_idx = []

    if is_noffset_list(labels):
        noffsets = labels
    else:
        mapper = LabelToSynset()
        noffsets = [synset_to_noffset(mapper.convert(l)) for l in labels]
        for noffset in noffsets:
            assert len(synset_tree.root.search_nodes(name=noffset)) == 1

    label_noffset = {
        label: noffset
        for label, noffset in zip(labels, noffsets)
    }
    rows = tsv_reader(source)

    def convert_label(rows, labels, label_noffset):
        is_cls_set = False
        for i, row in enumerate(rows):
            if row[1].isdigit():
                infos = [{
                    'class': label_noffset[labels[int(row[1])]],
                    'rect': [0, 0, 0, 0]
                }]
                is_cls_set = True
            else:
                assert not is_cls_set
                infos = json.loads(row[1])
                for info in infos:
                    info['class'] = label_noffset[info['class']]
            if (i % 1000) == 0:
                logging.info(i)
            yield row[0], json.dumps(infos)

    tsv_writer(convert_label(rows, labels, label_noffset), source_label)
コード例 #8
0
ファイル: process_dataset.py プロジェクト: vardhman1996/qdv
 def gen_rows():
     rows = tsv_reader(tsv_file)
     for row in rows:
         row[1] = str(old_new[int(float(row[1]))])
         yield row