Ejemplo n.º 1
0
def evaluation_models():

    datacfg = 'data/kaist.data'
    data_options = read_data_cfg(datacfg)
    testlist = data_options['valid']
    class_names = data_options['names']

    outfile = 'det_test_'
    res_prefix = 'results/' + outfile

    # cfgfile = 'cfg/yolov3_kaist_tc_det.cfg'
    # modelfile = 'weights/yolov3_kaist_tc_det_thermal.model'
    cfgfile = 'cfg/yolov3_kaist.cfg'
    modelfile = 'weights/yolov3_kaist_mix_80_20.weights'

    #    valid(datacfg, cfgfile, modelfile, outfile)
    #    cur_mAP = _do_python_eval(res_prefix, testlist, class_names, output_dir='output')
    convert_predict_to_JSON()
    #    print('mAP: %.4f \n' %(cur_mAP))
    all_ap, day_ap, night_ap, all_mr, day_mr, night_mr = meanAP_LogAverageMissRate(
    )
    print(
        'ap: %.4f ap_d: %.4f ap_n: %.4f lamr: %.4f mr_d: %.4f mr_n: %.4f \n' %
        (all_ap / 100.0, day_ap / 100.0, night_ap / 100.0, all_mr / 100.0,
         day_mr / 100.0, night_mr / 100.0))
Ejemplo n.º 2
0
def main():
    # Validation parameters
    conf_thresh = FLAGS.conf_threshold
    nms_thresh = FLAGS.nms_threshold
    iou_thresh = FLAGS.iou_threshold

    # output file
    out_path = FLAGS.out_path

    # Training settings
    datacfg = FLAGS.data
    cfgfile = FLAGS.config

    data_options = read_data_cfg(datacfg)
    file_list = data_options['valid']
    gpus = data_options['gpus']  # e.g. 0,1,2,3
    ngpus = len(gpus.split(','))

    num_workers = int(data_options['num_workers'])
    # for testing, batch_size is set to 1 (one)
    batch_size = FLAGS.batch_size

    global use_cuda
    use_cuda = torch.cuda.is_available() and use_cuda

    ###############
    torch.manual_seed(seed)
    if use_cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpus
        torch.cuda.manual_seed(seed)

    global model
    model = Darknet(cfgfile)
    # model.print_network()

    init_width = model.width
    init_height = model.height

    kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}

    val_loader = torch.utils.data.DataLoader(
        dataset.listDataset(file_list, shape=(init_width, init_height),
                            shuffle=False, jitter=False,
                            transform=transforms.Compose([
                                transforms.ToTensor(),
                            ]), validate=True),
        batch_size=batch_size, shuffle=False, **kwargs)

    if use_cuda:
        if ngpus > 1:
            model = torch.nn.DataParallel(model)
            model = model.module
    model = model.to(torch.device("cuda" if use_cuda else "cpu"))
    for w in FLAGS.weights:
        # model.load_weights(w)
        checkpoint = torch.load(w)
        model.load_state_dict(checkpoint['model_state_dict'])
        logging('evaluating ... %s' % (w))
        test(val_loader, conf_thresh, nms_thresh, iou_thresh, out_path, batch_size)
Ejemplo n.º 3
0
def main():
    # Training settings
    datacfg = FLAGS.data
    cfgfile = FLAGS.config

    data_options = read_data_cfg(datacfg)
    testlist = data_options['valid']
    gpus = data_options['gpus']  # e.g. 0,1,2,3
    ngpus = len(gpus.split(','))

    num_workers = int(data_options['num_workers'])
    # for testing, batch_size is setted to 1 (one)
    batch_size = 1  # int(net_options['batch'])

    global use_cuda
    use_cuda = torch.cuda.is_available() and (True if use_cuda is None else
                                              use_cuda)

    ###############
    torch.manual_seed(seed)
    if use_cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpus
        torch.cuda.manual_seed(seed)

    global model
    model = Darknet(cfgfile)
    #model.print_network()

    init_width = model.width
    init_height = model.height

    kwargs = {
        'num_workers': num_workers,
        'pin_memory': True
    } if use_cuda else {}

    global test_loader
    test_loader = torch.utils.data.DataLoader(dataset.listDataset(
        testlist,
        shape=(init_width, init_height),
        shuffle=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
        ]),
        train=False),
                                              batch_size=batch_size,
                                              shuffle=False,
                                              **kwargs)

    if use_cuda:
        if ngpus > 1:
            model = torch.nn.DataParallel(model)
            model = model.module
    model = model.to(torch.device("cuda" if use_cuda else "cpu"))
    for w in FLAGS.weights:
        model.load_weights(w)
        logging('evaluating ... %s' % (w))
        test()
Ejemplo n.º 4
0
FLAGS = None

parser = argparse.ArgumentParser()
parser.add_argument('--data', '-d',    type=str, 
        default='cfg/sketch.data', help='data definition file')
parser.add_argument('--config', '-c',  type=str, 
        default='cfg/sketch.cfg', help='network configuration file')
parser.add_argument('--weights', '-w', type=str, nargs='+', 
        default=['weights/yolov3.weights'], help='initial weights file')
FLAGS, _ = parser.parse_known_args()

# Training settings
datacfg       = FLAGS.data
cfgfile       = FLAGS.config

data_options  = read_data_cfg(datacfg)
testlist      = data_options['valid']
gpus          = data_options['gpus']  # e.g. 0,1,2,3
ngpus         = len(gpus.split(','))

num_workers   = int(data_options['num_workers'])
# for testing, batch_size is setted to 1 (one)
batch_size    = 16 # int(net_options['batch'])

model = Darknet(cfgfile)

data = []
target = []
org_w = []
org_h = []
Ejemplo n.º 5
0
def valid(datacfg, cfgfile, weightfile, outfile):
    options = read_data_cfg(datacfg)
    valid_images = options['valid']
    name_list = options['names']
    prefix = 'results'
    names = load_class_names(name_list)

    with open(valid_images) as fp:
        tmp_files = fp.readlines()
        valid_files = [item.rstrip() for item in tmp_files]

    m = Darknet(cfgfile)
    m.print_network()
    m.load_weights(weightfile)
    m.cuda()
    m.eval()

    valid_dataset = dataset.listDataset(valid_images,
                                        shape=(m.width, m.height),
                                        shuffle=False,
                                        transform=transforms.Compose([
                                            transforms.ToTensor(),
                                        ]))
    valid_batchsize = 2
    assert (valid_batchsize > 1)

    kwargs = {'num_workers': 4, 'pin_memory': True}
    valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                               batch_size=valid_batchsize,
                                               shuffle=False,
                                               **kwargs)

    fps = [0] * m.num_classes
    if not os.path.exists('results'):
        os.mkdir('results')
    for i in range(m.num_classes):
        buf = '%s/%s%s.txt' % (prefix, outfile, names[i])
        fps[i] = open(buf, 'w')

    lineId = -1

    conf_thresh = 0.005
    nms_thresh = 0.45
    if m.net_name() == 'region':  # region_layer
        shape = (0, 0)
    else:
        shape = (m.width, m.height)
    for _, (data, target, org_w, org_h) in enumerate(valid_loader):
        data = data.cuda()
        output = m(data)
        batch_boxes = get_all_boxes(output,
                                    shape,
                                    conf_thresh,
                                    m.num_classes,
                                    only_objectness=0,
                                    validation=True)

        for i in range(len(batch_boxes)):
            lineId += 1
            fileId = os.path.basename(valid_files[lineId]).split('.')[0]
            #width, height = get_image_size(valid_files[lineId])
            width, height = float(org_w[i]), float(org_h[i])
            print(valid_files[lineId])
            boxes = batch_boxes[i]
            correct_yolo_boxes(boxes, width, height, m.width, m.height)
            boxes = nms(boxes, nms_thresh)
            for box in boxes:
                x1 = (box[0] - box[2] / 2.0) * width
                y1 = (box[1] - box[3] / 2.0) * height
                x2 = (box[0] + box[2] / 2.0) * width
                y2 = (box[1] + box[3] / 2.0) * height

                det_conf = box[4]
                for j in range((len(box) - 5) // 2):
                    cls_conf = box[5 + 2 * j]
                    cls_id = int(box[6 + 2 * j])
                    prob = det_conf * cls_conf
                    fps[cls_id].write('%s %f %f %f %f %f\n' %
                                      (fileId, prob, x1, y1, x2, y2))

    for i in range(m.num_classes):
        fps[i].close()
Ejemplo n.º 6
0
args = parser.parse_args()

args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    print('Using cuda.')
    torch.cuda.manual_seed(args.seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

transform = transforms.Compose([
                 transforms.ToTensor(),
            ])

cfg_params = utils.read_data_cfg(args.cfgfile)

train_txt = cfg_params['train']
test_txt = cfg_params['test']
backup_dir = cfg_params['backup']

if args.load_model is not None:
    print('Loading model from %s.' % args.load_model)
    model = models.model.UNet(args.im_size, args.kernel_size)
    model.load_state_dict(torch.load(args.load_model))
elif args.test:
    print('Missing model file for evaluating test set.')
    exit()
else:
    model = models.model.UNet(args.im_size, args.kernel_size)
Ejemplo n.º 7
0
        datacfg = 'datacfg.data'
        netcfg = 'netcfg.data'
        weightcfg = 'weightcfg.data'
    if argv_len == 4:
        datacfg = sys.argv[1]
        netcfg = sys.argv[2]
        weightcfg = sys.argv[3]

    # Parse configuration files
    print(torch.cuda.is_available())
    print(torch.__version__)
    print(torch.version.cuda)
    print(torch.backends.cudnn.version())
    print(torch.cuda.get_device_name(0))
    print(torch.cuda.device_count())
    datacfg_options = utils.read_data_cfg(datacfg)
    datacfg_options['gpus'] = '0'
    datacfg_options['num_workers'] = '10'

    netcfg_options = utils.read_net_cfg(netcfg)[0]

    trainlist = datacfg_options['train']
    nsamples = utils.read_train_list(trainlist)
    gpus = datacfg_options['gpus']
    num_workers = int(datacfg_options['num_workers'])
    backupdir = datacfg_options['backup']
    if not os.path.exists(backupdir):
        os.makedirs(backupdir)
    batch_size = int(netcfg_options['batch'])
    max_batches = int(netcfg_options['max_batches'])
    learning_rate = float(netcfg_options['learning_rate'])
Ejemplo n.º 8
0
args = parser.parse_args()

args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    print('Using cuda.')
    torch.cuda.manual_seed(args.seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

transform = transforms.Compose([
    transforms.ToTensor(),
])

cfg_params = utils.read_data_cfg(args.cfgfile)

train_txt = cfg_params['train']
test_txt = cfg_params['test']
backup_dir = cfg_params['backup']

if args.load_model is not None:
    print('Loading model from %s.' % args.load_model)
    model = torch.load(args.load_model)
elif args.test:
    print('Missing model file for evaluating test set.')
    exit()
else:
    model = models.model.UNet(args.im_size, args.kernel_size)

# Datasets and dataloaders.
Ejemplo n.º 9
0
def run():

    logger = logging.getLogger()

    # Parse command window input
    parser = argparse.ArgumentParser(description='SingleShotPose')
    parser.add_argument('--datacfg', type=str,
                        default='cfg/ape.data')  # data config
    parser.add_argument('--modelcfg', type=str,
                        default='cfg/yolo-pose.cfg')  # network config
    parser.add_argument(
        '--initweightfile', type=str,
        default='backup/init.weights')  # initialization weights
    parser.add_argument('--pretrain_num_epochs', type=int,
                        default=0)  # how many epoch to pretrain
    args = parser.parse_args()
    datacfg = args.datacfg
    modelcfg = args.modelcfg
    initweightfile = args.initweightfile
    pretrain_num_epochs = args.pretrain_num_epochs

    print("ARGS: ", args)

    # Parse data configuration file
    data_options = read_data_cfg(datacfg)
    trainlist = data_options['valid']
    gpus = data_options['gpus']
    num_workers = int(data_options['num_workers'])
    backupdir = data_options['backup']
    im_width = int(data_options['width'])
    im_height = int(data_options['height'])
    fx = float(data_options['fx'])
    fy = float(data_options['fy'])
    u0 = float(data_options['u0'])
    v0 = float(data_options['v0'])

    print("DATA OPTIONS: ", data_options)

    # Parse network and training configuration parameters
    net_options = parse_cfg(modelcfg)[0]
    loss_options = parse_cfg(modelcfg)[-1]
    batch_size = int(net_options['batch'])
    max_batches = int(net_options['max_batches'])
    max_epochs = int(net_options['max_epochs'])
    learning_rate = float(net_options['learning_rate'])
    momentum = float(net_options['momentum'])
    decay = float(net_options['decay'])
    conf_thresh = float(net_options['conf_thresh'])
    num_keypoints = int(net_options['num_keypoints'])
    num_classes = int(loss_options['classes'])
    num_anchors = int(loss_options['num'])
    steps = [float(step) for step in net_options['steps'].split(',')]
    scales = [float(scale) for scale in net_options['scales'].split(',')]
    # anchors       = [float(anchor) for anchor in loss_options['anchors'].split(',')]

    print("NET OPTIONS: ", net_options)
    print("LOSS OPTIONS: ", loss_options)

    # Specifiy the model and the loss
    model = Darknet(modelcfg)

    # # Model settings
    model.load_weights(initweightfile)
    model.print_network()
    # model.seen        = 0
    # processed_batches = model.seen/batch_size
    init_width = 416  # model.width
    init_height = 416  # model.height
    batch_size = 1
    num_workers = 0

    # print("Size: ", init_width, init_height)

    bg_file_names = get_all_files('../VOCdevkit/VOC2012/JPEGImages')
    # Specify the number of workers
    use_cuda = True
    kwargs = {
        'num_workers': num_workers,
        'pin_memory': True
    } if use_cuda else {}

    logger.info("Loading data")

    # valid_dataset = dataset_multi.listDataset("../LINEMOD/duck/test_occlusion.txt", shape=(init_width, init_height),
    #                                             shuffle=False,
    #                                             objclass="duck",
    #                                             transform=transforms.Compose([
    #                                                 transforms.ToTensor(),
    #                                             ]))

    # Get the dataloader for training dataset

    dataloader = torch.utils.data.DataLoader(dataset.listDataset(
        trainlist,
        shape=(init_width, init_height),
        shuffle=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
        ]),
        train=False,
        seen=0,
        batch_size=batch_size,
        num_workers=num_workers,
        bg_file_names=bg_file_names),
                                             batch_size=batch_size,
                                             shuffle=False,
                                             **kwargs)

    model.cuda()
    model.eval()

    delay = {True: 0, False: 1}
    paused = True

    # print("Classes in dataset ", num_classes)
    print("Batches in dataloader: ", len(dataloader))
    tbar = tqdm(dataloader, ascii=True, dynamic_ncols=True)
    for ii, s in enumerate(tbar):
        images, targets = s
        # print(ii, "IMAGES:" , images.shape)
        # print(ii, "TARGET\n", targets.shape)
        bs = images.shape[0]
        t = targets.cpu().numpy().reshape(bs, 50, -1)
        # print("TARGET [0, 0:1] \n", t[0, :1])
        # print("CLASSES ", t[0, :, 0])

        images_gpu = images.cuda()

        model_out = model(images_gpu).detach()
        all_boxes = np.array(
            get_region_boxes(model_out,
                             num_classes,
                             num_keypoints,
                             anchor_dim=num_anchors)).reshape(
                                 batch_size, 1, -1)

        # print("Model OUT", all_boxes.shape)

        pred = np.zeros_like(all_boxes)
        pred[:, 0, 0] = all_boxes[:, 0, -1]
        pred[:, 0, 1:-2] = all_boxes[:, 0, :-3]

        viz = visualize_results(images, t, pred, img_size=416, show_3d=True)

        cv2.imshow("Res ", viz)

        k = cv2.waitKey(delay[paused])
        if k & 0xFF == ord('q'):
            break
        if k & 0xFF == ord('p'):
            paused = not paused
Ejemplo n.º 10
0
def valid(datacfg, cfgfile, weightfile, save_path, use_cuda = False, size = 416):
    options = read_data_cfg(datacfg)
    valid_images = options['valid']
    name_list = options['names']
    if os.path.exists(save_path) == False:
        os.mkdir(save_path)
    prefix = save_path
    names = load_class_names(name_list)

    with open(valid_images) as fp:
        tmp_files = fp.readlines()
        valid_files = [item.rstrip() for item in tmp_files]
    
    m = Darknet(cfgfile)

    m.load_weights(weightfile)
    num_classes = len(names)

    if use_cuda:
        m.cuda()
    m.eval()

    valid_dataset = MyDataset(valid_images, shape=(size, size),
                       is_train = False,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                       ]))
    valid_batchsize = 10
    assert(valid_batchsize > 1)
    
    if use_cuda:
        kwargs = {'num_workers': 4, 'pin_memory': True}
    else:
        kwargs = {}
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset, batch_size=valid_batchsize, shuffle=False, **kwargs) 

    fps = [0]*num_classes
    if not os.path.exists('results'):
        os.mkdir('results')
    for i in range(num_classes):
        buf = '%s/%s.txt' % (prefix, names[i])
        fps[i] = open(buf, 'w')
   
    lineId = -1
    
    conf_thresh = 0.01
    nms_thresh = 0.5
    for batch_id, (data, target) in enumerate(valid_loader):
        if use_cuda:
            data = data.cuda()
        print('start processing batch{}'.format(batch_id))
        start1 = time.time()
        output = m(data)
        batch_boxes = get_all_boxes(output, conf_thresh, num_classes, only_objectness=0, validation=True, use_cuda = use_cuda)
        for i in range(data.size(0)):
            lineId = lineId + 1
            fileId = os.path.basename(valid_files[lineId]).split('.')[0]
            width, height = get_image_size(valid_files[lineId])
            boxes = batch_boxes[i]
            if boxes.numel() == 0:
                continue
            for cls_id in range(num_classes):
                cls_ind = (boxes[:, 6] == cls_id)
                cls_boxes = nms(boxes[cls_ind],nms_thresh)
                if cls_boxes.numel == 0:
                    continue
                for box in cls_boxes:
                    x1 = (box[0] - box[2]/2.0) * width
                    y1 = (box[1] - box[3]/2.0) * height
                    x2 = (box[0] + box[2]/2.0) * width
                    y2 = (box[1] + box[3]/2.0) * height 
                    fps[cls_id].write('%s %f %f %f %f %f\n' %(fileId, box[4] * box[5], x1, y1, x2, y2))
        end1 = time.time()
        print('average time {}s'.format((end1 - start1) / len(data)))
        del data,target
    for i in range(num_classes):
        fps[i].close()