Esempio n. 1
0
        # Draw Detection Results (Stage-1, Stage-2)
        if global_step % cfg.log_image == 0:

            summary_out = []
            input_np = everything2numpy(input)

            # Get Detection Results
            dets_dict = model_ori.get_final_results()
            for key, dets in dets_dict.iteritems():
                Is = single_shot.draw_detection()
                Is = Is.astype(np.uint8)
                summary_out += log_images()

            # Draw Ground-Truth
            Is = single_shot.draw_gtboxes()
            Is = Is.astype(np.uint8)
            summary_out += log_images()

            summary = model_ori.get_summaries()
            for s in summary:
                writer.add_summary()
            for s in summary_out:
                writer.add_summary()
            summary_out = []
        # end_if
        global_step += 1
    # end_for

    if not cfg.save_prefix:
        save_path = os.path.join()
Esempio n. 2
0
                score_threshold=cfg.score_threshold,
                max_dets=cfg.max_det_num,
                overlap_threshold=cfg.overlap_threshold)
            for key, dets in dets_dict.iteritems():
                Is = single_shot.draw_detection(input_np,
                                                dets,
                                                class_names=class_names)
                Is = Is.astype(np.uint8)
                summary_out += log_images(Is,
                                          image_ids,
                                          global_step,
                                          prefix='Detection_' + key)

            # draw gt
            Is = single_shot.draw_gtboxes(input_np,
                                          gt_boxes_list,
                                          class_names=class_names)
            Is = Is.astype(np.uint8)
            summary_out += log_images(Is, image_ids, global_step, prefix='GT')

            # # draw positive anchors on images
            # if True:
            #     Imgs, cnt = single_shot.draw_anchors(everything2numpy(input), everything2numpy(rpn_targets),
            #                                          anchors_np, class_names=class_names)
            #     Imgs = Imgs.astype(np.uint8)
            #     summary_out += log_images(Imgs, image_ids, global_step, prefix='GT_anchor')
            #
            #     print (time.strftime("%H:%M:%S ") + '{} positive anchors'.format(cnt))

            summary = model_ori.get_summaries(is_training=True)
            for s in summary:
Esempio n. 3
0
def main():
    # config model and lr
    num_anchors = len(cfg.anchor_ratios) * len(cfg.anchor_scales[0]) * len(cfg.anchor_shift) \
        if isinstance(cfg.anchor_scales[0], list) else \
        len(cfg.anchor_ratios) * len(cfg.anchor_scales)

    resnet = resnet50 if cfg.backbone == 'resnet50' else resnet101
    detection_model = MaskRCNN if cfg.model_type.lower(
    ) == 'maskrcnn' else RetinaNet

    model = detection_model(resnet(pretrained=True, maxpool5=cfg.maxpool5),
                            num_classes=cfg.num_classes,
                            num_anchors=num_anchors,
                            strides=cfg.strides,
                            in_channels=cfg.in_channels,
                            f_keys=cfg.f_keys,
                            num_channels=256,
                            is_training=False,
                            activation=cfg.class_activation)

    lr = cfg.lr
    start_epoch = 0
    if cfg.restore is not None:
        meta = load_net(cfg.restore, model)
        print(meta)
        if meta[0] >= 0:
            start_epoch = meta[0] + 1
            lr = meta[1]
        print('Restored from %s, starting from %d epoch, lr:%.6f' %
              (cfg.restore, start_epoch, lr))
    else:
        raise ValueError('restore is not set')

    model.cuda()
    model.eval()

    class_names = test_data.dataset.classes
    print('dataset len: {}'.format(len(test_data.dataset)))

    tb_dir = os.path.join(cfg.train_dir, cfg.backbone + '_' + cfg.datasetname,
                          'test', time.strftime("%h%d_%H"))
    writer = tbx.FileWriter(tb_dir)

    # main loop
    timer_all = Timer()
    timer_post = Timer()
    all_results1 = []
    all_results2 = []
    all_results_gt = []
    for step, batch in enumerate(test_data):

        timer_all.tic()

        # NOTE: Targets is in NHWC order!!
        # input, anchors_np, im_scale_list, image_ids, gt_boxes_list = batch
        # input = everything2cuda(input)
        input_t, anchors_np, im_scale_list, image_ids, gt_boxes_list = batch
        input = everything2cuda(input_t, volatile=True)

        outs = model(input, gt_boxes_list=None, anchors_np=anchors_np)

        if cfg.model_type == 'maskrcnn':
            rpn_logit, rpn_box, rpn_prob, rpn_labels, rpn_bbtargets, rpn_bbwghts, anchors, \
            rois, roi_img_ids, rcnn_logit, rcnn_box, rcnn_prob, rcnn_labels, rcnn_bbtargets, rcnn_bbwghts = outs
            outputs = [
                rois, roi_img_ids, rpn_logit, rpn_box, rpn_prob, rcnn_logit,
                rcnn_box, rcnn_prob, anchors
            ]
            targets = []
        elif cfg.model_type == 'retinanet':
            rpn_logit, rpn_box, rpn_prob, _, _, _ = outs
            outputs = [rpn_logit, rpn_box, rpn_prob]
        else:
            raise ValueError('Unknown model type: %s' % cfg.model_type)

        timer_post.tic()

        dets_dict = model.get_final_results(
            outputs,
            everything2cuda(anchors_np),
            score_threshold=0.01,
            max_dets=cfg.max_det_num * cfg.batch_size,
            overlap_threshold=cfg.overlap_threshold)
        if 'stage1' in dets_dict:
            Dets = dets_dict['stage1']
        else:
            raise ValueError('No stage1 results:', dets_dict.keys())
        Dets2 = dets_dict['stage2'] if 'stage2' in dets_dict else Dets

        t3 = timer_post.toc()
        t = timer_all.toc()

        formal_res1 = dataset.to_detection_format(copy.deepcopy(Dets),
                                                  image_ids, im_scale_list)
        formal_res2 = dataset.to_detection_format(copy.deepcopy(Dets2),
                                                  image_ids, im_scale_list)
        all_results1 += formal_res1
        all_results2 += formal_res2

        Dets_gt = []
        for gb in gt_boxes_list:
            cpy_mask = gb[:, 4] >= 1
            gb = gb[cpy_mask]
            n = cpy_mask.astype(np.int32).sum()
            res_gt = np.zeros((n, 6))
            res_gt[:, :4] = gb[:, :4]
            res_gt[:, 4] = 1.
            res_gt[:, 5] = gb[:, 4]
            Dets_gt.append(res_gt)
        formal_res_gt = dataset.to_detection_format(Dets_gt, image_ids,
                                                    im_scale_list)
        all_results_gt += formal_res_gt

        if step % cfg.log_image == 0:
            input_np = everything2numpy(input)
            summary_out = []
            Is = single_shot.draw_detection(input_np,
                                            Dets,
                                            class_names=class_names)
            Is = Is.astype(np.uint8)
            summary_out += log_images(Is, image_ids, step, prefix='Detection/')

            Is = single_shot.draw_detection(input_np,
                                            Dets2,
                                            class_names=class_names)
            Is = Is.astype(np.uint8)
            summary_out += log_images(Is,
                                      image_ids,
                                      step,
                                      prefix='Detection2/')

            Imgs = single_shot.draw_gtboxes(input_np,
                                            gt_boxes_list,
                                            class_names=class_names)
            Imgs = Imgs.astype(np.uint8)
            summary_out += log_images(Imgs,
                                      image_ids,
                                      float(step),
                                      prefix='GT')

            for s in summary_out:
                writer.add_summary(s, float(step))

        if step % cfg.display == 0:
            print(time.strftime("%H:%M:%S ") +
                  'Epoch %d iter %d: speed %.3fs (%.3fs)' % (0, step, t, t3) +
                  ' ImageIds: ' + ', '.join(str(s) for s in image_ids),
                  end='\r')

    res_dict = {
        'stage1': all_results1,
        'stage2': all_results2,
        'gt': all_results_gt
    }
    return res_dict
Esempio n. 4
0
def main():
    # config model and lr
    num_anchors = len(cfg.anchor_ratios) * len(cfg.anchor_scales[0]) \
        if isinstance(cfg.anchor_scales[0], list) else \
        len(cfg.anchor_ratios) * len(cfg.anchor_scales)

    resnet = resnet50 if cfg.backbone == 'resnet50' else resnet101
    detection_model = MaskRCNN if cfg.model_type.lower(
    ) == 'maskrcnn' else RetinaNet

    model = detection_model(resnet(pretrained=True),
                            num_classes=cfg.num_classes,
                            num_anchors=num_anchors,
                            strides=cfg.strides,
                            in_channels=cfg.in_channels,
                            f_keys=cfg.f_keys,
                            num_channels=256,
                            is_training=False,
                            activation=cfg.class_activation)

    lr = cfg.lr
    start_epoch = 0
    if cfg.restore is not None:
        meta = load_net(cfg.restore, model)
        print(meta)
        if meta[0] >= 0:
            start_epoch = meta[0] + 1
            lr = meta[1]
        print('Restored from %s, starting from %d epoch, lr:%.6f' %
              (cfg.restore, start_epoch, lr))
    else:
        raise ValueError('restore is not set')

    model.cuda()
    model.eval()

    ANCHORS = np.vstack(
        [anc.reshape([-1, 4]) for anc in test_data.dataset.ANCHORS])
    model.anchors = everything2cuda(ANCHORS.astype(np.float32))

    class_names = test_data.dataset.classes
    print('dataset len: {}'.format(len(test_data.dataset)))

    tb_dir = os.path.join(cfg.train_dir, cfg.backbone + '_' + cfg.datasetname,
                          'test', time.strftime("%h%d_%H"))
    writer = tbx.FileWriter(tb_dir)
    summary_out = []

    # main loop
    timer_all = Timer()
    timer_post = Timer()
    all_results1 = []
    all_results2 = []
    all_results_gt = []
    for step, batch in enumerate(test_data):

        timer_all.tic()

        # NOTE: Targets is in NHWC order!!
        input, image_ids, gt_boxes_list, image_ori = batch
        input = everything2cuda(input)

        outs = model(input)

        timer_post.tic()

        dets_dict = model.get_final_results(
            score_threshold=0.05,
            max_dets=cfg.max_det_num * cfg.batch_size,
            overlap_threshold=cfg.overlap_threshold)
        if 'stage1' in dets_dict:
            Dets = dets_dict['stage1']
        else:
            raise ValueError('No stage1 results:', dets_dict.keys())
        Dets2 = dets_dict['stage2'] if 'stage2' in dets_dict else Dets

        t3 = timer_post.toc()
        t = timer_all.toc()

        formal_res1 = dataset.to_detection_format(
            copy.deepcopy(Dets),
            image_ids,
            ori_sizes=[im.shape for im in image_ori])
        formal_res2 = dataset.to_detection_format(
            copy.deepcopy(Dets2),
            image_ids,
            ori_sizes=[im.shape for im in image_ori])
        all_results1 += formal_res1
        all_results2 += formal_res2

        if step % cfg.log_image == 0:
            input_np = everything2numpy(input)
            summary_out = []
            Is = single_shot.draw_detection(input_np,
                                            Dets,
                                            class_names=class_names)
            Is = Is.astype(np.uint8)
            summary_out += log_images(Is, image_ids, step, prefix='Detection/')

            Is = single_shot.draw_detection(input_np,
                                            Dets2,
                                            class_names=class_names)
            Is = Is.astype(np.uint8)
            summary_out += log_images(Is,
                                      image_ids,
                                      step,
                                      prefix='Detection2/')

            Imgs = single_shot.draw_gtboxes(input_np,
                                            gt_boxes_list,
                                            class_names=class_names)
            Imgs = Imgs.astype(np.uint8)
            summary_out += log_images(Imgs,
                                      image_ids,
                                      float(step),
                                      prefix='GT')

            for s in summary_out:
                writer.add_summary(s, float(step))

        if step % cfg.display == 0:
            print(time.strftime("%H:%M:%S ") +
                  'Epoch %d iter %d: speed %.3fs (%.3fs)' % (0, step, t, t3) +
                  ' ImageIds: ' + ', '.join(str(s) for s in image_ids),
                  end='\r')

    res_dict = {
        'stage1': all_results1,
        'stage2': all_results2,
        'gt': all_results_gt
    }
    return res_dict