Ejemplo n.º 1
0
    def to_Dets2(boxes, probs, img_ids, score_threshold=0.1):
        """for each box, there may be more than one class labels"""
        boxes, probs, img_ids = everything2numpy([boxes, probs, img_ids])
        Dets = []
        for i in range(0, cfg.batch_size):
            inds = np.where(img_ids == i)[0]
            probs_ = probs[inds]
            boxes_ = boxes[inds]
            if probs_.shape[1] == 2:
                cls_ids = np.ones((probs_.shape[0], ), dtype=np.int32)
                cls_probs = probs_[:, 1]
                dets = np.concatenate((boxes_.reshape(
                    -1, 4), cls_probs[:, np.newaxis], cls_ids[:, np.newaxis]),
                                      axis=1)
            else:
                d0_inds, d1_inds = np.where(probs_[:, 1:] > score_threshold)
                if d0_inds.size > 0:
                    cls_ids = d1_inds + 1
                    cls_probs = probs_[d0_inds, cls_ids]
                    boxes_ = boxes_[d0_inds, :]
                    dets = np.concatenate(
                        (boxes_.reshape(-1, 4), cls_probs[:, np.newaxis],
                         cls_ids[:, np.newaxis]),
                        axis=1)
                else:
                    cls_ids = probs_[:, 1:].argmax(axis=1) + 1
                    cls_probs = probs_[np.arange(probs_.shape[0]), cls_ids]
                    dets = np.concatenate(
                        (boxes_.reshape(-1, 4), cls_probs[:, np.newaxis],
                         cls_ids[:, np.newaxis]),
                        axis=1)

            Dets.append(dets)
        return Dets
Ejemplo n.º 2
0
    def to_Dets2_sigmoid(boxes, probs, img_ids, score_threshold=0.1):
        boxes, probs, img_ids = everything2numpy([boxes, probs, img_ids])
        Dets = []
        for i in range(0, cfg.batch_size):
            inds = np.where(img_ids == i)[0]
            probs_ = probs[inds]
            boxes_ = boxes[inds]
            if probs_.ndim == 1 or probs_.shape[1] == 1:
                cls_ids = np.ones((probs_.shape[0], ), dtype=np.int32)
                cls_probs = probs_.view(-1)
                dets = np.concatenate((boxes_.reshape(
                    -1, 4), cls_probs[:, np.newaxis], cls_ids[:, np.newaxis]),
                                      axis=1)
            else:
                d0_inds, d1_inds = np.where(probs_ > score_threshold)
                if d0_inds.size > 0:
                    cls_ids = d1_inds + 1
                    cls_probs = probs_[d0_inds, d1_inds]
                    boxes_ = boxes_[d0_inds, :]
                    dets = np.concatenate(
                        (boxes_.reshape(-1, 4), cls_probs[:, np.newaxis],
                         cls_ids[:, np.newaxis]),
                        axis=1)
                else:
                    cls_ids = probs_.argmax(axis=1) + 1
                    cls_probs = probs_[np.arange(probs_.shape[0]), cls_ids - 1]
                    dets = np.concatenate(
                        (boxes_.reshape(-1, 4), cls_probs[:, np.newaxis],
                         cls_ids[:, np.newaxis]),
                        axis=1)

            Dets.append(dets)
        return Dets
Ejemplo n.º 3
0
    def to_Dets_sigmoid(boxes, probs, img_ids):
        """
        For each bbox, assign the class with the max prob.
        NOTE: there is no background class,
        so the implementation is slightly different.
        """
        boxes, probs, img_ids = everything2numpy([boxes, probs, img_ids])
        Dets = list()

        for i in range(0, cfg.batch_size):
            inds = np.where(img_ids == i)[0]
            probs_ = probs[inds]
            boxes_ = boxes[inds]
            # !!! Difference is here. !!!
            if probs_.ndim == 1 or probs_.shape[1] == 1:
                cls_ids = np.ones((probs_.shape[0], ), dtype=np.int32)
                cls_probs = probs_.view(-1)
            else:
                cls_ids = probs_.argmax(axis=1) + 1
                cls_probs = probs_.max(axis=1)

            dets = np.concatenate((boxes_.reshape(
                -1, 4), cls_probs[:, np.newaxis], cls_ids[:, np.newaxis]),
                                  axis=1)

            Dets.append(dets)
        # end_for
        return Dets
Ejemplo n.º 4
0
    def to_Dets(boxes, probs, img_ids):
        """
        For each bbox, assign it with "the class" of the max prob.
        """
        boxes, probs, img_ids = everything2numpy([boxes, probs, img_ids])
        Dets = list()

        for i in range(0, cfg.batch_size):
            inds = np.where(img_ids == i)[0]
            probs_ = probs[inds]
            boxes_ = boxes[inds]
            if probs_.shape[1] == 2:
                cls_ids = np.ones((probs_.shape[0], ), dtype=np.int32)
                cls_probs = probs_[:, 1]
            else:
                cls_ids = probs_[:, 1:].argmax(axis=1) + 1
                cls_probs = probs_[np.arange(probs_.shape[0]), cls_ids]

            dets = np.concatenate((boxes_.reshape(
                -1, 4), cls_probs[:, np.newaxis], cls_ids[:, np.newaxis]),
                                  axis=1)

            Dets.append(dets)
        # end_for
        return Dets
Ejemplo n.º 5
0
## data loader
train_data = get_loader(cfg.data_dir, cfg.split, data_layer,
                        is_training=True, batch_size=cfg.batch_size, num_workers=cfg.data_workers)
ANCHORS = np.vstack([anc.reshape([-1, 4]) for anc in train_data.dataset.ANCHORS])
class_names = train_data.dataset.classes
print('dataset len: {}'.format(len(train_data.dataset)))

pixels = np.zeros((cfg.num_classes, ), dtype=np.int64)
instances = np.zeros((cfg.num_classes, ), dtype=np.int64)

timer = Timer()
timer.tic()
for step, batch in enumerate(train_data):
    _, _, inst_masks, _, _, gt_boxes, _ = batch
    inst_masks = \
        everything2numpy(inst_masks)

    for j, gt_box in enumerate(gt_boxes):
        if gt_box.size > 0:
            cls = gt_box[:, -1].astype(np.int32)
            for i, c in enumerate(cls):
                instances[c] += 1
                m = inst_masks[j][i]
                pixels[c] += m.sum()
    t = timer.toc(False)

    if step % 500 == 0:
        print ('step: %d, instances: %d, pixels: %d, time: %.2fs' % (step, instances.sum(), pixels.sum(), t))

        with open("statistics", "wb") as f:
            pickle.dump({
Ejemplo n.º 6
0
                writer.add_summary(s, float(global_step))

        # Save Model
        if step % 5000 == 0 and global_step != 0:
            if not cfg.save_prefix:
                save_path = os.path.join()
            else:
                save_path = os.path.join()
            save_net()
            print('')

        # Draw Detection Results (Stage-1, Stage-2)
        if global_step % cfg.log_image == 0:

            summary_out = []
            input_np = everything2numpy(input)

            # Get Detection Results
            dets_dict = model_ori.get_final_results()
            for key, dets in dets_dict.iteritems():
                Is = single_shot.draw_detection()
                Is = Is.astype(np.uint8)
                summary_out += log_images()

            # Draw Ground-Truth
            Is = single_shot.draw_gtboxes()
            Is = Is.astype(np.uint8)
            summary_out += log_images()

            summary = model_ori.get_summaries()
            for s in summary:
Ejemplo n.º 7
0
def main():
    # config model and lr
    num_anchors = len(cfg.anchor_ratios) * len(cfg.anchor_scales[0]) * len(cfg.anchor_shift) \
        if isinstance(cfg.anchor_scales[0], list) else \
        len(cfg.anchor_ratios) * len(cfg.anchor_scales)

    resnet = resnet50 if cfg.backbone == 'resnet50' else resnet101
    detection_model = MaskRCNN if cfg.model_type.lower(
    ) == 'maskrcnn' else RetinaNet

    model = detection_model(resnet(pretrained=True, maxpool5=cfg.maxpool5),
                            num_classes=cfg.num_classes,
                            num_anchors=num_anchors,
                            strides=cfg.strides,
                            in_channels=cfg.in_channels,
                            f_keys=cfg.f_keys,
                            num_channels=256,
                            is_training=False,
                            activation=cfg.class_activation)

    lr = cfg.lr
    start_epoch = 0
    if cfg.restore is not None:
        meta = load_net(cfg.restore, model)
        print(meta)
        if meta[0] >= 0:
            start_epoch = meta[0] + 1
            lr = meta[1]
        print('Restored from %s, starting from %d epoch, lr:%.6f' %
              (cfg.restore, start_epoch, lr))
    else:
        raise ValueError('restore is not set')

    model.cuda()
    model.eval()

    class_names = test_data.dataset.classes
    print('dataset len: {}'.format(len(test_data.dataset)))

    tb_dir = os.path.join(cfg.train_dir, cfg.backbone + '_' + cfg.datasetname,
                          'test', time.strftime("%h%d_%H"))
    writer = tbx.FileWriter(tb_dir)

    # main loop
    timer_all = Timer()
    timer_post = Timer()
    all_results1 = []
    all_results2 = []
    all_results_gt = []
    for step, batch in enumerate(test_data):

        timer_all.tic()

        # NOTE: Targets is in NHWC order!!
        # input, anchors_np, im_scale_list, image_ids, gt_boxes_list = batch
        # input = everything2cuda(input)
        input_t, anchors_np, im_scale_list, image_ids, gt_boxes_list = batch
        input = everything2cuda(input_t, volatile=True)

        outs = model(input, gt_boxes_list=None, anchors_np=anchors_np)

        if cfg.model_type == 'maskrcnn':
            rpn_logit, rpn_box, rpn_prob, rpn_labels, rpn_bbtargets, rpn_bbwghts, anchors, \
            rois, roi_img_ids, rcnn_logit, rcnn_box, rcnn_prob, rcnn_labels, rcnn_bbtargets, rcnn_bbwghts = outs
            outputs = [
                rois, roi_img_ids, rpn_logit, rpn_box, rpn_prob, rcnn_logit,
                rcnn_box, rcnn_prob, anchors
            ]
            targets = []
        elif cfg.model_type == 'retinanet':
            rpn_logit, rpn_box, rpn_prob, _, _, _ = outs
            outputs = [rpn_logit, rpn_box, rpn_prob]
        else:
            raise ValueError('Unknown model type: %s' % cfg.model_type)

        timer_post.tic()

        dets_dict = model.get_final_results(
            outputs,
            everything2cuda(anchors_np),
            score_threshold=0.01,
            max_dets=cfg.max_det_num * cfg.batch_size,
            overlap_threshold=cfg.overlap_threshold)
        if 'stage1' in dets_dict:
            Dets = dets_dict['stage1']
        else:
            raise ValueError('No stage1 results:', dets_dict.keys())
        Dets2 = dets_dict['stage2'] if 'stage2' in dets_dict else Dets

        t3 = timer_post.toc()
        t = timer_all.toc()

        formal_res1 = dataset.to_detection_format(copy.deepcopy(Dets),
                                                  image_ids, im_scale_list)
        formal_res2 = dataset.to_detection_format(copy.deepcopy(Dets2),
                                                  image_ids, im_scale_list)
        all_results1 += formal_res1
        all_results2 += formal_res2

        Dets_gt = []
        for gb in gt_boxes_list:
            cpy_mask = gb[:, 4] >= 1
            gb = gb[cpy_mask]
            n = cpy_mask.astype(np.int32).sum()
            res_gt = np.zeros((n, 6))
            res_gt[:, :4] = gb[:, :4]
            res_gt[:, 4] = 1.
            res_gt[:, 5] = gb[:, 4]
            Dets_gt.append(res_gt)
        formal_res_gt = dataset.to_detection_format(Dets_gt, image_ids,
                                                    im_scale_list)
        all_results_gt += formal_res_gt

        if step % cfg.log_image == 0:
            input_np = everything2numpy(input)
            summary_out = []
            Is = single_shot.draw_detection(input_np,
                                            Dets,
                                            class_names=class_names)
            Is = Is.astype(np.uint8)
            summary_out += log_images(Is, image_ids, step, prefix='Detection/')

            Is = single_shot.draw_detection(input_np,
                                            Dets2,
                                            class_names=class_names)
            Is = Is.astype(np.uint8)
            summary_out += log_images(Is,
                                      image_ids,
                                      step,
                                      prefix='Detection2/')

            Imgs = single_shot.draw_gtboxes(input_np,
                                            gt_boxes_list,
                                            class_names=class_names)
            Imgs = Imgs.astype(np.uint8)
            summary_out += log_images(Imgs,
                                      image_ids,
                                      float(step),
                                      prefix='GT')

            for s in summary_out:
                writer.add_summary(s, float(step))

        if step % cfg.display == 0:
            print(time.strftime("%H:%M:%S ") +
                  'Epoch %d iter %d: speed %.3fs (%.3fs)' % (0, step, t, t3) +
                  ' ImageIds: ' + ', '.join(str(s) for s in image_ids),
                  end='\r')

    res_dict = {
        'stage1': all_results1,
        'stage2': all_results2,
        'gt': all_results_gt
    }
    return res_dict
Ejemplo n.º 8
0
def main():
    # config model and lr
    num_anchors = len(cfg.anchor_ratios) * len(cfg.anchor_scales[0]) \
        if isinstance(cfg.anchor_scales[0], list) else \
        len(cfg.anchor_ratios) * len(cfg.anchor_scales)

    resnet = resnet50 if cfg.backbone == 'resnet50' else resnet101
    detection_model = MaskRCNN if cfg.model_type.lower(
    ) == 'maskrcnn' else RetinaNet

    model = detection_model(resnet(pretrained=True),
                            num_classes=cfg.num_classes,
                            num_anchors=num_anchors,
                            strides=cfg.strides,
                            in_channels=cfg.in_channels,
                            f_keys=cfg.f_keys,
                            num_channels=256,
                            is_training=False,
                            activation=cfg.class_activation)

    lr = cfg.lr
    start_epoch = 0
    if cfg.restore is not None:
        meta = load_net(cfg.restore, model)
        print(meta)
        if meta[0] >= 0:
            start_epoch = meta[0] + 1
            lr = meta[1]
        print('Restored from %s, starting from %d epoch, lr:%.6f' %
              (cfg.restore, start_epoch, lr))
    else:
        raise ValueError('restore is not set')

    model.cuda()
    model.eval()

    ANCHORS = np.vstack(
        [anc.reshape([-1, 4]) for anc in test_data.dataset.ANCHORS])
    model.anchors = everything2cuda(ANCHORS.astype(np.float32))

    class_names = test_data.dataset.classes
    print('dataset len: {}'.format(len(test_data.dataset)))

    tb_dir = os.path.join(cfg.train_dir, cfg.backbone + '_' + cfg.datasetname,
                          'test', time.strftime("%h%d_%H"))
    writer = tbx.FileWriter(tb_dir)
    summary_out = []

    # main loop
    timer_all = Timer()
    timer_post = Timer()
    all_results1 = []
    all_results2 = []
    all_results_gt = []
    for step, batch in enumerate(test_data):

        timer_all.tic()

        # NOTE: Targets is in NHWC order!!
        input, image_ids, gt_boxes_list, image_ori = batch
        input = everything2cuda(input)

        outs = model(input)

        timer_post.tic()

        dets_dict = model.get_final_results(
            score_threshold=0.05,
            max_dets=cfg.max_det_num * cfg.batch_size,
            overlap_threshold=cfg.overlap_threshold)
        if 'stage1' in dets_dict:
            Dets = dets_dict['stage1']
        else:
            raise ValueError('No stage1 results:', dets_dict.keys())
        Dets2 = dets_dict['stage2'] if 'stage2' in dets_dict else Dets

        t3 = timer_post.toc()
        t = timer_all.toc()

        formal_res1 = dataset.to_detection_format(
            copy.deepcopy(Dets),
            image_ids,
            ori_sizes=[im.shape for im in image_ori])
        formal_res2 = dataset.to_detection_format(
            copy.deepcopy(Dets2),
            image_ids,
            ori_sizes=[im.shape for im in image_ori])
        all_results1 += formal_res1
        all_results2 += formal_res2

        if step % cfg.log_image == 0:
            input_np = everything2numpy(input)
            summary_out = []
            Is = single_shot.draw_detection(input_np,
                                            Dets,
                                            class_names=class_names)
            Is = Is.astype(np.uint8)
            summary_out += log_images(Is, image_ids, step, prefix='Detection/')

            Is = single_shot.draw_detection(input_np,
                                            Dets2,
                                            class_names=class_names)
            Is = Is.astype(np.uint8)
            summary_out += log_images(Is,
                                      image_ids,
                                      step,
                                      prefix='Detection2/')

            Imgs = single_shot.draw_gtboxes(input_np,
                                            gt_boxes_list,
                                            class_names=class_names)
            Imgs = Imgs.astype(np.uint8)
            summary_out += log_images(Imgs,
                                      image_ids,
                                      float(step),
                                      prefix='GT')

            for s in summary_out:
                writer.add_summary(s, float(step))

        if step % cfg.display == 0:
            print(time.strftime("%H:%M:%S ") +
                  'Epoch %d iter %d: speed %.3fs (%.3fs)' % (0, step, t, t3) +
                  ' ImageIds: ' + ', '.join(str(s) for s in image_ids),
                  end='\r')

    res_dict = {
        'stage1': all_results1,
        'stage2': all_results2,
        'gt': all_results_gt
    }
    return res_dict