Esempio n. 1
0
def test(opt):
    """ dataset preparation """
    print("dataset preparation ...")

    dataset_val = Dateloader(opt.val_path,mode="test", dataset=opt.Datasets)
    data_loader_val = torch.utils.data.DataLoader(
        dataset_val,
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.num_workers)

    print('| Building net...')
    model = create_model(opt.Backbone,opt.num_classes)
    model = torch.nn.DataParallel(model)
    cudnn.benchmark = True

    model.load_state_dict(torch.load(opt.resume))

    # get_parameter(model)

    model.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for batch_idx, (inputs, targets) in tqdm(enumerate(data_loader_val)):
            inputs, targets = inputs.cuda(), targets.cuda()
            outputs = model(inputs)
            _, predicted = torch.max(outputs, 1)

            total += targets.size(0)
            correct += predicted.eq(targets).cpu().sum().item()
    acc = 100. * correct / total
    print("\n| Validation\t Net  Acc: %.2f%%" % acc)
def get_model(opt, name="face2mask_weighted_l1_mask_19k_vanilla"):
    opt.name = name
    model = create_model(opt)
    model.setup(opt)
    if opt.eval:
        model.eval()
    return model
def train(opt):
    """ dataset preparation """
    logging.info("dataset preparation ...")
    dataset = Dateloader(opt.data_path, mode="train", dataset=opt.Datasets)
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=opt.batch_size,
                                              shuffle=True,
                                              num_workers=opt.num_workers,
                                              drop_last=True,
                                              pin_memory=True)

    dataset_val = Dateloader(opt.data_path, mode="test", dataset=opt.Datasets)
    data_loader_val = torch.utils.data.DataLoader(dataset_val,
                                                  batch_size=opt.batch_size,
                                                  shuffle=False,
                                                  num_workers=opt.num_workers)

    logging.info('| Building net...')
    model = create_model(opt.Backbone, opt.num_classes)
    model = torch.nn.DataParallel(model)
    cudnn.benchmark = True

    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=0.9,
                          weight_decay=2e-5)
    # optimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=[80, 130, 170, 200, 230, 250], gamma=0.1)
    CEloss = nn.CrossEntropyLoss()

    best_acc = 0
    for epoch in range(opt.epoch_iter):
        model.train()
        epoch_loss = 0
        lr_scheduler.step()
        epoch_time = time.time()
        for i, (image, gt) in enumerate(data_loader):

            start_time = time.time()
            inputs, labels = image.cuda(), gt.cuda()
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = CEloss(outputs, labels)
            epoch_loss += loss.item()
            loss.backward()
            optimizer.step()
            logging.info('Epoch is [{}/{}], mini-batch is [{}/{}], time consumption is {:.8f}, batch_loss is {:.8f}'.format( \
                epoch + 1, opt.epoch_iter, i + 1, int(len(data_loader)), time.time() - start_time, loss.item()))

        if epoch > 1:
            validate(data_loader_val, model, CEloss)
            best_acc = test(epoch, model, data_loader_val, best_acc)
            model.train()
        logging.info(
            "----------------------------------------------------------")
        logging.info("            best_acc: {:.3f}".format(best_acc))
        logging.info("              lr: {:.3f}".format(
            optimizer.param_groups[0]['lr']))
        logging.info(
            "----------------------------------------------------------")

        logging.info('epoch_loss is {:.8f}, epoch_time is {:.8f}'.format(
            epoch_loss / int(len(data_loader)),
            time.time() - epoch_time))
        logging.info(time.asctime(time.localtime(time.time())))