예제 #1
0
def voc_ap(model, cfg):
    imdb_name = 'voc_2007_test'
    output_dir = 'models/testing/' + cfg['exp_name']
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    imdb = VOCDataset(imdb_name, '../data', cfg['batch_size'],
                      yolo_utils.preprocess_test, processes=4, shuffle=False, dst_size=cfg['inp_size'])

    net = Darknet19(cfg)
    net_utils.load_net(model, net)

    net.cuda()
    net.eval()

    mAP = eval_net(net, cfg, imdb, max_per_image=300, thresh=0.001, output_dir=output_dir, vis=False)

    imdb.close()
    return mAP
예제 #2
0
def test_voc_ap(model):
    print(model)
    imdb = VOCDataset(imdb_name,
                      cfg.DATA_DIR,
                      cfg.batch_size,
                      yolo_utils.preprocess_test,
                      processes=4,
                      shuffle=False,
                      dst_size=cfg.inp_size)

    net = Darknet19()
    net_utils.load_net(model, net)

    net.cuda()
    net.eval()

    mAP = test_net(net, imdb, max_per_image, thresh, vis)

    imdb.close()
    return mAP
예제 #3
0
import utils.yolo as yolo_utils
import utils.network as net_utils
from utils.timer import Timer
import cfgs.config as cfg
from random import randint

try:
    from tensorboardX import SummaryWriter
except ImportError:
    SummaryWriter = None

# data loader
imdb = VOCDataset(cfg.imdb_train,
                  cfg.DATA_DIR,
                  cfg.train_batch_size,
                  yolo_utils.preprocess_train,
                  processes=2,
                  shuffle=True,
                  dst_size=cfg.multi_scale_inp_size)
# dst_size=cfg.inp_size)
print('load data succ...')

net = Darknet19()
# net_utils.load_net(cfg.trained_model, net)
pretrained_model = os.path.join(cfg.train_output_dir,
                                'darknet19_voc07trainval_exp3_17.h5')
#pretrained_model = cfg.trained_model
net_utils.load_net(pretrained_model, net)
#net.load_from_npz(cfg.pretrained_model, num_conv=18)
net.cuda()
net.train()
예제 #4
0
                                                cfg,
                                                thr=0.1)
            if im2show.shape[0] > 1100:
                im2show = cv2.resize(im2show,
                                     (int(1000. * float(im2show.shape[1]) / im2show.shape[0]), 1000))  # noqa
            cv2.imshow('test', im2show)
            cv2.waitKey(0)

    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    imdb.evaluate_detections(all_boxes, output_dir)


if __name__ == '__main__':
    # data loader
    imdb = VOCDataset(imdb_name, cfg.DATA_DIR, cfg.batch_size,
                      yolo_utils.preprocess_test,
                      processes=1, shuffle=False, dst_size=cfg.multi_scale_inp_size)

    net = Darknet19()
    net_utils.load_net(trained_model, net)

    net.cuda()
    net.eval()

    test_net(net, imdb, max_per_image, thresh, vis=False)  #yuanlai shi false

    imdb.close()
예제 #5
0
    box_regression = box_regression.reshape(N, -1, 4)

    box_loss = det_utils.smooth_l1_loss(
        box_regression[sampled_pos_inds_subset, labels_pos],
        regression_targets[sampled_pos_inds_subset],
        beta=1 / 9,
        size_average=False,
    )
    box_loss = box_loss / labels.numel()

    return classification_loss, box_loss


# dataset_train = VOCDataset(root='/home/neuroplex/data/VOCdevkit/VOC2007')
dataset_train = VOCDataset(
    root=
    '/Users/pranoyr/code/Pytorch/faster-rcnn.pytorch/data/VOCdevkit2007/VOC2007'
)
dataloader = DataLoader(dataset_train,
                        num_workers=0,
                        collate_fn=collater,
                        batch_size=1)

# dataset_train = VRDDataset('/Users/pranoyr/code/Pytorch/faster-rcnn.pytorch/data/VRD', 'train')
# dataloader = DataLoader(
# 	dataset_train, num_workers=0, collate_fn=collater, batch_size=1)


class RoIHeads(torch.nn.Module):
    __annotations__ = {
        'box_coder': det_utils.BoxCoder,
        'proposal_matcher': det_utils.Matcher,
예제 #6
0
                                                cfg,
                                                thr=0.1)
            if im2show.shape[0] > 1100:
                im2show = cv2.resize(im2show,
                                     (int(1000. * float(im2show.shape[1]) / im2show.shape[0]), 1000))  # noqa
            cv2.imshow('test', im2show)
            cv2.waitKey(0)

    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    imdb.evaluate_detections(all_boxes, output_dir)


if __name__ == '__main__':
    # data loader
    imdb = VOCDataset(imdb_name, cfg.DATA_DIR, cfg.batch_size,
                      yolo_utils.preprocess_test,
                      processes=1, shuffle=False, dst_size=cfg.multi_scale_inp_size)

    net = Darknet19()
    net_utils.load_net(trained_model, net)

    net.cuda()
    net.eval()

    test_net(net, imdb, max_per_image, thresh, vis)

    imdb.close()