Ejemplo n.º 1
0
def test_soft_nms_device_and_dtypes_cpu():
    """
    CommandLine:
        xdoctest -m tests/test_soft_nms.py test_soft_nms_device_and_dtypes_cpu
    """
    iou_thr = 0.7
    base_dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9],
                          [49.3, 32.9, 51.0, 35.3, 0.9],
                          [35.3, 11.5, 39.9, 14.5, 0.4],
                          [35.2, 11.7, 39.7, 15.7, 0.3]])

    # CPU can handle float32 and float64
    dets = base_dets.astype(np.float32)
    new_dets, inds = soft_nms(dets, iou_thr)
    assert dets.dtype == new_dets.dtype
    assert len(inds) == len(new_dets) == 4

    dets = torch.FloatTensor(base_dets)
    new_dets, inds = soft_nms(dets, iou_thr)
    assert dets.dtype == new_dets.dtype
    assert len(inds) == len(new_dets) == 4

    dets = base_dets.astype(np.float64)
    new_dets, inds = soft_nms(dets, iou_thr)
    assert dets.dtype == new_dets.dtype
    assert len(inds) == len(new_dets) == 4

    dets = torch.DoubleTensor(base_dets)
    new_dets, inds = soft_nms(dets, iou_thr)
    assert dets.dtype == new_dets.dtype
    assert len(inds) == len(new_dets) == 4
    print("cur test passed!")
Ejemplo n.º 2
0
def get_det_score(score, json_dets, dataset, model_configs, config_idx,
                  **kwargs):
    results = []
    for file_index, file_dets in enumerate(json_dets):
        all_dets = []
        config_dets = file_dets[config_idx]
        config = model_configs[config_idx]
        dets = []
        for index, det in enumerate(config_dets):
            if len(det) == 0:
                det = np.empty((0, 5))
            else:
                det = np.array(det)

            det = det[det[:, 4] >= score]
            det[:, 4] *= config["{}_weight".format(CLASSES[index])]
            dets.append(det)

        for index, det in enumerate(dets):
            if len(dets[index]) == 0:
                continue
            dets[index] = soft_nms(dets[index], 0.5)[0]
        results.append(dets)

    _, stats, aps = dataset.evaluate(results, 'bbox', **kwargs)
    return stats[0], aps
Ejemplo n.º 3
0
def debugging_test():
    from mmdet.ops.nms.nms_wrapper import soft_nms
    import numpy as np
    a = soft_nms(np.array([[0.5, 0.5, 0.75, 0.75, 0.6], [0.5, 0.5, 1, 1, 0.4]],
                          dtype=np.float32),
                 iou_thr=0.5,
                 min_score=0.05)
    print(a)
Ejemplo n.º 4
0
def main():
	args = parse_args()
	os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
	config_file = open(args.config)
	configs = json.load(config_file)
	config_file.close()

	infer_results = []
	dataset = MyDataset(args.data_dir)
	file_count = len(dataset)
	dataloader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1)

	models = []
	model_configs = []
	for config in configs:
		if not config['use']:
			continue
		print('config = {}, model = {}'.format(config['config'], config['model']))
		model = init_detector(config['config'], config['model'], device=torch.device('cuda', 0))
		models.append(model)
		model_configs.append(config)

	start = time.time()
	for image_index, data in enumerate(dataloader):
		im, filename = data[0][0].numpy(), data[1][0]
		image_id = filename.split('.')[0]
		all_dets = []
		for config, model in zip(model_configs, models):
			dets = inference_detector(model, im)
			for index, det in enumerate(dets):
				dets[index] = det[det[:,4]>=config[CLASSES[index]]]
				dets[index][:,4] *= config["{}_weight".format(CLASSES[index])]

			if len(all_dets) == 0:
				all_dets = dets
			else:
				for det_index, (all_det, det) in enumerate(zip(all_dets, dets)):
					all_dets[det_index] = np.concatenate((all_det, det), axis=0)

		for index, all_det in enumerate(all_dets):
			all_dets[index] = soft_nms(all_dets[index], 0.5, min_score=0.001)[0][0:100]

		for index, bboxes in enumerate(all_dets):
			category = CLASSES[index]
			for bbox in bboxes:
				infer_results.append((category, image_id+'.xml', str(bbox[4]), str(bbox[0]), str(bbox[1]), str(bbox[2]), str(bbox[3])))

		end = time.time()
		print('\r', '{}/{}, use time:{}'.format(image_index, file_count, end -start), end='', flush=True)
		start = end

	csvfile = open('../shengna_results.csv', 'w')
	writer = csv.writer(csvfile)
	writer.writerow(['name', 'image_id', 'confidence', 'xmin', 'ymin', 'xmax', 'ymax'])
	writer.writerows(infer_results)
	csvfile.close()
Ejemplo n.º 5
0
def main():
    args = parse_args()
    config_file = open(args.config)
    configs = json.load(config_file)
    config_file.close()

    model_configs = []
    for config in configs:
        if config['use']:
            print('config = {}, model = {}'.format(config['config'],
                                                   config['model']))
        model_configs.append(config)
        g_config = config['config']

    cfg = mmcv.Config.fromfile(g_config)
    coco_dataset = build_dataset(cfg.data.test)

    with open('shengna_config/assemble_results.json') as fp:
        results = []
        json_dets = json.load(fp)
        for file_index, file_dets in enumerate(json_dets):
            all_dets = []
            for config_index, config_dets in enumerate(file_dets):
                config = model_configs[config_index]
                if config['use'] == False:
                    continue

                dets = []
                for index, det in enumerate(config_dets):
                    if len(det) == 0:
                        det = np.empty((0, 5))
                    else:
                        det = np.array(det)

                    det = det[det[:, 4] >= config[CLASSES[index]]]
                    det[:, 4] *= config["{}_weight".format(CLASSES[index])]
                    dets.append(det)

                if len(all_dets) == 0:
                    all_dets = dets
                else:
                    for det_index, (all_det,
                                    det) in enumerate(zip(all_dets, dets)):
                        all_dets[det_index] = np.concatenate((all_det, det),
                                                             axis=0)

            for index, all_det in enumerate(all_dets):
                #all_dets[index] = soft_nms(all_dets[index], 0.5, min_score=0.001)[0]
                all_dets[index] = soft_nms(all_dets[index],
                                           0.5,
                                           min_score=0.001)[0][0:100]
            results.append(all_dets)

        kwargs = {} if args.options is None else args.options
        coco_dataset.evaluate(results, 'bbox', **kwargs)
Ejemplo n.º 6
0
def get_results(config_weights, json_dets, model_configs):
    print("\n\n\n\n current weights = {}".format(config_weights))
    results = []
    for file_index, file_dets in enumerate(json_dets):
        all_dets = []
        for config_index, config_dets in enumerate(file_dets):
            config = model_configs[config_index]
            if config['use'] == False:
                continue

            dets = []
            for index, det in enumerate(config_dets):
                if len(det) == 0:
                    det = np.empty((0, 5))
                else:
                    det = np.array(det)

                weight = config_weights[config_index]
                if weight == 0:
                    det = np.empty((0, 5))
                else:
                    det = det[det[:, 4] >= config[CLASSES[index]]]

                det[:, 4] *= weight
                dets.append(det)

            if len(all_dets) == 0:
                all_dets = dets
            else:
                for det_index, (all_det, det) in enumerate(zip(all_dets,
                                                               dets)):
                    all_dets[det_index] = np.concatenate((all_det, det),
                                                         axis=0)

        for index, all_det in enumerate(all_dets):
            if len(all_det) == 0:
                continue
            all_dets[index] = soft_nms(all_dets[index], 0.5)[0][0:100]

        results.append(all_dets)

    return results
Ejemplo n.º 7
0
for filename in tqdm.tqdm(deal_nms['image_id'].unique()):
    img_id = name2id[filename]
    base_dets = deal_nms[deal_nms['image_id'] == filename]
    #lis[jl] = filename
    for defect_label, value in underwater_classes.items():  # 查找标签,对应标签进行融合
        base_dets_1 = base_dets[base_dets['name'] == defect_label]
        dets = torch.FloatTensor(
            np.array(
                base_dets_1[['xmin', 'ymin', 'xmax', 'ymax',
                             'confidence']])).cuda()

        iou_thr = 0.5

        # surpressed, inds = nms(dets, iou_thr)
        surpressed, inds = soft_nms(dets, iou_thr)

        for press in surpressed:
            x1, y1, x2, y2, score = press.cpu().numpy()[:]
            x1, y1, x2, y2 = round(float(x1), 2), round(float(y1), 2), round(
                float(x2), 2), round(float(y2), 2)  # save 0.00
            result.append({
                'image_id': img_id,
                'bbox': [x1, y1, x2 - x1, y2 - y1],
                'category_id': int(value),
                'score': float(score)
            })

#for k in lis.keys():
#    lis_img = {}
#    lis_img['image_id'] = lis[k]
Ejemplo n.º 8
0
def main():
    args = parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    config_file = open(args.config)
    configs = json.load(config_file)
    config_file.close()

    results = []
    json_results = []
    image_dir = os.path.join(args.data_dir, 'JPEGImages')
    anno_dir = os.path.join(args.data_dir, 'Annotations')
    dataset = MyDataset(args.file_path, image_dir)
    file_count = len(dataset)
    dataloader = DataLoader(dataset=dataset,
                            batch_size=1,
                            shuffle=False,
                            num_workers=1)

    models = []
    model_configs = []
    for config in configs:
        if not config['use']:
            continue
        print('config = {}, model = {}'.format(config['config'],
                                               config['model']))
        model = init_detector(config['config'],
                              config['model'],
                              device=torch.device('cuda', 0))
        models.append(model)
        model_configs.append(config)
        g_config = config['config']

    print(g_config)
    cfg = mmcv.Config.fromfile(g_config)
    coco_dataset = build_dataset(cfg.data.test)

    category_counts = {}
    start = time.time()
    for image_index, data in enumerate(dataloader):
        im, filename = data[0][0].numpy(), data[1][0]
        all_dets = []
        all_config_dets = []
        for config, model in zip(model_configs, models):
            dets = inference_detector(model, im)
            json_dets = []
            for det in dets:
                json_dets.append(det.tolist())
            all_config_dets.append(json_dets)

            for index, det in enumerate(dets):
                dets[index] = det[det[:, 4] >= config[CLASSES[index]]]
                dets[index][:, 4] *= config["{}_weight".format(CLASSES[index])]

            if len(all_dets) == 0:
                all_dets = dets
            else:
                for det_index, (all_det, det) in enumerate(zip(all_dets,
                                                               dets)):
                    all_dets[det_index] = np.concatenate((all_det, det),
                                                         axis=0)

        json_results.append(all_config_dets)

        for index, all_det in enumerate(all_dets):
            all_dets[index] = soft_nms(all_dets[index], 0.5,
                                       min_score=0.001)[0][0:100]

        results.append(all_dets)
        end = time.time()
        print('\r',
              '{}/{}, use time:{}'.format(image_index, file_count,
                                          end - start),
              end='',
              flush=True)
        start = end

    #mmcv.dump(results, 'eval/assemble_result.pkl')
    kwargs = {} if args.options is None else args.options
    coco_dataset.evaluate(results, 'bbox', **kwargs)
    with open(args.result, 'w') as fp:
        json.dump(json_results, fp, indent=4, separators=(',', ': '))