示例#1
0
def voc_ap(model, cfg):
    imdb_name = 'voc_2007_test'
    output_dir = 'models/testing/' + cfg['exp_name']
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    imdb = VOCDataset(imdb_name, '../data', cfg['batch_size'],
                      yolo_utils.preprocess_test, processes=4, shuffle=False, dst_size=cfg['inp_size'])

    net = Darknet19(cfg)
    net_utils.load_net(model, net)

    net.cuda()
    net.eval()

    mAP = eval_net(net, cfg, imdb, max_per_image=300, thresh=0.001, output_dir=output_dir, vis=False)

    imdb.close()
    return mAP
示例#2
0
def test_voc_ap(model):
    print(model)
    imdb = VOCDataset(imdb_name,
                      cfg.DATA_DIR,
                      cfg.batch_size,
                      yolo_utils.preprocess_test,
                      processes=4,
                      shuffle=False,
                      dst_size=cfg.inp_size)

    net = Darknet19()
    net_utils.load_net(model, net)

    net.cuda()
    net.eval()

    mAP = test_net(net, imdb, max_per_image, thresh, vis)

    imdb.close()
    return mAP
示例#3
0
                image.shape,
                cfg,
                thresh=0.3,
                size_index=size_index)
            im2show = yolo_utils.draw_detection(image, bboxes, scores,
                                                cls_inds, cfg)
            summary_writer.add_image('predict', im2show, step)

        train_loss = 0
        bbox_loss, iou_loss, cls_loss = 0., 0., 0.
        cnt = 0
        t.clear()
        size_index = randint(0, len(cfg.multi_scale_inp_size) - 1)
        print("image_size {}".format(cfg.multi_scale_inp_size[size_index]))

    if step > 0 and (step % imdb.batch_per_epoch == 0):
        if imdb.epoch in cfg.lr_decay_epochs:
            lr *= cfg.lr_decay
            optimizer = torch.optim.SGD(net.parameters(),
                                        lr=lr,
                                        momentum=cfg.momentum,
                                        weight_decay=cfg.weight_decay)

        save_name = os.path.join(cfg.train_output_dir,
                                 '{}_{}.h5'.format(cfg.exp_name, imdb.epoch))
        net_utils.save_net(save_name, net)
        print(('save model: {}'.format(save_name)))
        step_cnt = 0

imdb.close()
示例#4
0
                                                cfg,
                                                thr=0.1)
            if im2show.shape[0] > 1100:
                im2show = cv2.resize(im2show,
                                     (int(1000. * float(im2show.shape[1]) / im2show.shape[0]), 1000))  # noqa
            cv2.imshow('test', im2show)
            cv2.waitKey(0)

    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    imdb.evaluate_detections(all_boxes, output_dir)


if __name__ == '__main__':
    # data loader
    imdb = VOCDataset(imdb_name, cfg.DATA_DIR, cfg.batch_size,
                      yolo_utils.preprocess_test,
                      processes=1, shuffle=False, dst_size=cfg.multi_scale_inp_size)

    net = Darknet19()
    net_utils.load_net(trained_model, net)

    net.cuda()
    net.eval()

    test_net(net, imdb, max_per_image, thresh, vis)

    imdb.close()
示例#5
0
                                   cfg.DATA_DIR,
                                   cfg.batch_size,
                                   yolo_utils.preprocess_test,
                                   processes=1,
                                   shuffle=False,
                                   dst_size=cfg.multi_scale_inp_size)

        print('start_test............................')
        save_txt_f = open('test_ap_newmodel.txt', 'a+')
        save_txt_f.writelines(['test_new_model', '\n'])
        save_txt_f.writelines(
            ['{}_{}.h5'.format(cfg.exp_name, imdb.epoch), '\n'])
        save_txt_f.close()

        net.eval()
        test_net(net, imdb_test_map, max_per_image=10, thresh=0.5, vis=False)
        net.train()
        imdb_test_map.close()
        # =============================================================================

        save_name = os.path.join(cfg.train_output_dir,
                                 '{}_{}.h5'.format(cfg.exp_name, imdb.epoch))
        net_utils.save_net(save_name, net)
        print(('save model: {}'.format(save_name)))
        step_cnt = 0

imdb.close()
# =============================================================================
#imdb_test_map.close()
# =============================================================================
示例#6
0
        origin_time = max_ctime
        print('start_write_and_test')
        print('newfile_time', max_ctime)
        print('file', file_dict[max_ctime])

        test_f = open('test_ap_newmodel.txt', mode='a+')
        test_f.writelines(['MODELS', '\n', file_dict[max_ctime], '\n'])
        test_f.close()

        new_mdoels_path = os.path.join(file_dir, file_dict[max_ctime])
        imdb_map_newf = VOCDataset(imdb_name,
                                   cfg.DATA_DIR,
                                   cfg.batch_size,
                                   yolo_utils.preprocess_test,
                                   processes=1,
                                   shuffle=False,
                                   dst_size=cfg.multi_scale_inp_size)
        net = Darknet19()
        net_utils.load_net(new_mdoels_path, net)
        net.cuda()
        net.eval()

        test_net(net, imdb_map_newf, max_per_image, thresh, vis=False)
        print('test_this_models_done.....')

        imdb_map_newf.close()
        torch.cuda.empty_cache()
        gc.collect()
    time.sleep(1200)
#    print(file_dict[max_ctime]) #打印出最新文件名
#    print(max_ctime)