Пример #1
0
def validate(val_list, model, criterion):
    print('begin test')
    test_loader = torch.utils.data.DataLoader(dataset.ListDataset(
        val_list,
        shuffle=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ]),
        train=False),
                                              batch_size=args.batch_size)

    model.eval()

    mae = 0

    for i, (img, target) in enumerate(test_loader):
        img = img.cuda()
        img = Variable(img)
        output = model(img)

        mae += abs(output.data.sum() -
                   target.sum().type(torch.FloatTensor).cuda())

    mae = mae / len(test_loader)
    print(' * MAE {mae:.3f} '.format(mae=mae))

    return mae
Пример #2
0
def train(train_list, model, criterion, optimizer, epoch):

    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    train_loader = torch.utils.data.DataLoader(dataset.ListDataset(
        train_list,
        shuffle=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ]),
        train=True,
        seen=model.seen,
        batch_size=args.batch_size,
        num_workers=args.workers),
                                               batch_size=args.batch_size)
    print('epoch %d, processed %d samples, lr %.10f' %
          (epoch, epoch * len(train_loader.dataset), args.lr))

    model.train()
    end = time.time()

    for i, (img, target) in enumerate(train_loader):
        data_time.update(time.time() - end)

        img = img.cuda()
        img = Variable(img)
        output = model(img)

        target = target.type(torch.FloatTensor).unsqueeze(0).cuda()
        target = Variable(target)

        loss = criterion(output, target)

        losses.update(loss.item(), img.size(0))
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))
Пример #3
0
def evaluate(model, opt, iou_thres, conf_thres, nms_thres, img_size,
             batch_size):
    model.eval()

    # Get dataloader
    # dataset = ListDataset(path, img_size=img_size, augment=False, multiscale=False)
    # dataloader = torch.utils.data.DataLoader(
    #     dataset, batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn
    # )
    testDataSet = dataset.ListDataset(opt)
    dataloader = DataLoader(testDataSet,
                            batch_size=opt.batchSize,
                            shuffle=False,
                            num_workers=0,
                            pin_memory=True,
                            collate_fn=testDataSet.collate_fn)

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor

    labels = []
    sample_metrics = []  # List of tuples (TP, confs, pred)
    for batch_i, (imgs, targets) in enumerate(
            tqdm.tqdm(dataloader, desc="Detecting objects")):
        for batch_i_in, (imgs_in_batch,
                         imgs_in_target) in enumerate(zip(imgs, targets)):
            # Extract labels
            targets = imgs_in_target
            imgs = imgs_in_batch
            # print('labels : ', targets)
            labels += targets[:, 1].tolist()
            # Rescale target
            targets[:, 2:] = xywh2xyxy(targets[:, 2:])
            targets[:, 2:] *= img_size
            # print('targets : ', targets)
            imgs = Variable(imgs.type(Tensor),
                            requires_grad=False).unsqueeze(0)
            # print('imgs shape : ', imgs.shape)
            with torch.no_grad():
                # print('Output')
                outputs, _ = model(imgs)
                # print('outputs : ', outputs[0].shape)
                # print('out shape : ', outputs, type(outputs))
                outputs = torch.cat(outputs, dim=1).cpu()
                outputs = utils.non_max_suppression(outputs,
                                                    conf_thres=conf_thres,
                                                    nms_thres=nms_thres)
                # print('outputs : ', outputs)
            sample_metrics += get_batch_statistics(outputs,
                                                   targets,
                                                   iou_threshold=iou_thres)

    # Concatenate sample statistics
    true_positives, pred_scores, pred_labels = [
        np.concatenate(x, 0) for x in list(zip(*sample_metrics))
    ]
    precision, recall, AP, f1, ap_class = ap_per_class(true_positives,
                                                       pred_scores,
                                                       pred_labels, labels)

    print('precision, recall, AP, f1, ap_class : ', precision, recall, AP, f1,
          ap_class)
    return precision, recall, AP, f1, ap_class
Пример #4
0
        np.concatenate(x, 0) for x in list(zip(*sample_metrics))
    ]
    precision, recall, AP, f1, ap_class = ap_per_class(true_positives,
                                                       pred_scores,
                                                       pred_labels, labels)

    print('precision, recall, AP, f1, ap_class : ', precision, recall, AP, f1,
          ap_class)
    return precision, recall, AP, f1, ap_class


if __name__ == "__main__":
    opt = parse_args()
    print(opt)

    trainDataSet = dataset.ListDataset(opt)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    class_names = load_classes(opt.names)

    # net = model.objDetNet(opt)
    # net.to(device)
    # net.loadPretrainedParams()

    # Initiate model
    model = model.objDetNet(opt).to(device)
    model.loadPretrainedParams()
    # if opt.weights_path.endswith(".weights"):
    #     # Load darknet weights
    #     model.load_pretrained_params(opt.weights_path)
    # else:
    #     # Load checkpoint weights
Пример #5
0
def train(epoch):
    global processed_batches
    t0 = time.time()
    #     if ngpus > 1:
    #         cur_model = model.module
    #     else:
    #         cur_model = model
    cur_model = model
    train_loader = torch.utils.data.DataLoader(dataset.ListDataset(
        trainlist,
        shape=(init_width, init_height),
        shuffle=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
        ]),
        train=True,
        seen=cur_model.seen,
        batch_size=batch_size,
        num_workers=num_workers),
                                               batch_size=batch_size,
                                               shuffle=False,
                                               **kwargs)

    lr = adjust_learning_rate(optimizer, processed_batches)
    logging('epoch %d, processed %d samples, lr %f' %
            (epoch, epoch * len(train_loader.dataset), lr))
    model.train()
    t1 = time.time()
    avg_time = torch.zeros(9)
    for batch_idx, (data, target) in enumerate(train_loader):
        t2 = time.time()
        adjust_learning_rate(optimizer, processed_batches)
        processed_batches = processed_batches + 1
        #if (batch_idx+1) % dot_interval == 0:
        #    sys.stdout.write('.')

        if use_cuda:
            data = data.cuda()
            #target= target.cuda()
        t3 = time.time()
        data, target = Variable(data), Variable(target)
        t4 = time.time()
        optimizer.zero_grad()
        t5 = time.time()
        output = model(data)
        t6 = time.time()
        region_loss.seen = region_loss.seen + data.data.size(0)
        loss = region_loss(output, target)
        t7 = time.time()
        loss.backward()
        t8 = time.time()
        optimizer.step()
        t9 = time.time()
        if False and batch_idx > 1:
            avg_time[0] = avg_time[0] + (t2 - t1)
            avg_time[1] = avg_time[1] + (t3 - t2)
            avg_time[2] = avg_time[2] + (t4 - t3)
            avg_time[3] = avg_time[3] + (t5 - t4)
            avg_time[4] = avg_time[4] + (t6 - t5)
            avg_time[5] = avg_time[5] + (t7 - t6)
            avg_time[6] = avg_time[6] + (t8 - t7)
            avg_time[7] = avg_time[7] + (t9 - t8)
            avg_time[8] = avg_time[8] + (t9 - t1)
            print('-------------------------------')
            print('       load data : %f' % (avg_time[0] / (batch_idx)))
            print('     cpu to cuda : %f' % (avg_time[1] / (batch_idx)))
            print('cuda to variable : %f' % (avg_time[2] / (batch_idx)))
            print('       zero_grad : %f' % (avg_time[3] / (batch_idx)))
            print(' forward feature : %f' % (avg_time[4] / (batch_idx)))
            print('    forward loss : %f' % (avg_time[5] / (batch_idx)))
            print('        backward : %f' % (avg_time[6] / (batch_idx)))
            print('            step : %f' % (avg_time[7] / (batch_idx)))
            print('           total : %f' % (avg_time[8] / (batch_idx)))
        t1 = time.time()
    print('')
    t1 = time.time()
    logging('training with %f samples/s' % (len(train_loader.dataset) /
                                            (t1 - t0)))
    if (epoch + 1) % save_interval == 0:
        logging('save weights to %s/%06d.weights' % (backupdir, epoch + 1))
        cur_model.seen = (epoch + 1) * len(train_loader.dataset)
        cur_model.save_weights('%s/%06d.weights' % (backupdir, epoch + 1))
Пример #6
0
model.load_weights(weightfile)
model.print_network()

region_loss.seen = model.seen
processed_batches = model.seen / batch_size

init_width = model.width
init_height = model.height
init_epoch = model.seen / nsamples

kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(dataset.ListDataset(
    testlist,
    shape=(init_width, init_height),
    shuffle=False,
    transform=transforms.Compose([
        transforms.ToTensor(),
    ]),
    train=False),
                                          batch_size=batch_size,
                                          shuffle=False,
                                          **kwargs)

if use_cuda:
    if ngpus > 1:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

params_dict = dict(model.named_parameters())
params = []
Пример #7
0
from create_train_dataset import create_training_image_list, DATA_PATH
"""
This help me understand data loader

"""

if __name__ == "__main__":
    train_list = create_training_image_list(DATA_PATH)

    train_loader = torch.utils.data.DataLoader(
        dataset.ListDataset(train_list,
                            shuffle=True,
                            transform=transforms.Compose([
                                transforms.ToTensor(),
                                transforms.Normalize(
                                    mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225]),
                            ]),
                            train=True,
                            batch_size=1,
                            num_workers=1),
        batch_size=1
    )  # if you have different image size, then batch_size must be 1

    print(type(train_loader))

    for batch_ndx, sample in enumerate(train_loader):
        img = sample[0]
        density = sample[1]
        print("img shape ", img.shape)
        print("density shape", density.shape)