Exemple #1
0
def train():
    train_dataset = FurnitureDataset('train',
                                     transform=preprocess_with_augmentation(
                                         normalize_05, IMAGE_SIZE))
    val_dataset = FurnitureDataset('val',
                                   transform=preprocess(
                                       normalize_05, IMAGE_SIZE))
    training_data_loader = DataLoader(dataset=train_dataset,
                                      num_workers=8,
                                      batch_size=BATCH_SIZE,
                                      shuffle=True)
    validation_data_loader = DataLoader(dataset=val_dataset,
                                        num_workers=1,
                                        batch_size=BATCH_SIZE,
                                        shuffle=False)

    model = get_model()

    criterion = nn.CrossEntropyLoss().cuda()
    writer = SummaryWriter(log_dir='logs')
    nb_learnable_params = sum(p.numel() for p in model.fresh_params())
    print('nb learnable params: {}'.format(nb_learnable_params))

    lx, px = utils.predict(model, validation_data_loader)
    min_loss = criterion(Variable(px), Variable(lx)).data[0]

    lr = 0
    patience = 0
    for epoch in range(30):
        print('epoch {}'.format(epoch))
        if epoch == 1:
            lr = 0.00005
            print('set lr={}'.format(lr))
        if patience == 2:
            patience = 0
            model.load_state_dict(torch.load('inception4_lyc.pth'))
            lr = lr / 5
            print('set lr={}'.format(lr))
        if epoch == 0:
            lr = 0.001
            print('set lr={}'.format(lr))
            optimizer = torch.optim.Adam(model.fresh_params(), lr=lr)
        else:
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=lr,
                                         weight_decay=0.0001)

        running_loss = RunningMean()
        running_score = RunningMean()
        step = 0
        all_step = int(len(training_data_loader) / BATCH_SIZE)
        model.train()
        pbar = tqdm(training_data_loader, total=len(training_data_loader))
        for inputs, labels in pbar:
            step += 1
            batch_size = inputs.size(0)

            inputs = Variable(inputs)
            labels = Variable(labels)
            if use_gpu:
                inputs = inputs.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()
            outputs = model(inputs)
            _, preds = torch.max(outputs.data, dim=1)

            loss = criterion(outputs, labels)
            running_loss.update(loss.data[0], 1)
            running_score.update(torch.sum(preds != labels.data), batch_size)
            writer.add_scalar('train_loss', loss, epoch * all_step + step)
            loss.backward()
            optimizer.step()

            pbar.set_description(
                'running_loss.value{:.5f} running_score.value{:.3f}'.format(
                    running_loss.value, running_score.value))
        print('epoch {}: running_loss.value{:.5f} running_score.value{:.3f}'.
              format(epoch, running_loss.value, running_score.value))

        lx, px = utils.predict(model, validation_data_loader)
        log_loss = criterion(Variable(px), Variable(lx))
        log_loss = log_loss.data[0]
        _, preds = torch.max(px, dim=1)
        accuracy = torch.mean((preds != lx).float())
        print('val: log_loss{:.5f} accuracy{:.3f}'.format(log_loss, accuracy))

        if log_loss < min_loss:
            torch.save(model.state_dict(), 'inception4_lyc.pth')
            print('val score improved from {:.5f} to {:.5f}. Saved!'.format(
                min_loss, log_loss))
            min_loss = log_loss
            patience = 0
        else:
            patience += 1

        writer.add_scalar('val_loss', log_loss, epoch + 1)
        writer.add_scalar('val_acc', accuracy, epoch + 1)
    writer.close()
def train():
    train_dataset = ImageFolder('./data/train/', transform=preprocess_with_augmentation(normalize_torch, IMAGE_SIZE))
    valid_dataset = ImageFolder('./data/train/', transform=preprocess(normalize_torch, IMAGE_SIZE))
    training_data_loader, valid_data_loader = (split_train_val_loader(train_dataset, valid_dataset,
                           len(train_dataset), valid_size=VALID_SIZE, batch_size=BATCH_SIZE,
			   train_enlarge_factor=TRAIN_ENLARGE_FACTOR,
                           pin_memory=True, num_workers=1, random_seed=RANDOM_SEED
                           ))


    model = get_model(MODEL, NB_CLASSES)

    criterion = nn.CrossEntropyLoss().cuda()

    nb_learnable_params = sum(p.numel() for p in model.fresh_params())
    print(f'[+] nb learnable params {nb_learnable_params}')

    lx, px = utils.predict(model, valid_data_loader, prob=False)
    min_loss = criterion(Variable(px), Variable(lx)).item()
    _, preds = torch.max(px.data, dim=1)
    accuracy = torch.mean((preds != lx).float())
    print(f' original loss: {min_loss}, accuracy: {accuracy}')

    lr = 0.001
    patience = 0
    earlystop = 0
    optimizer = torch.optim.Adam(model.fresh_params(), lr=lr)
    torch.save(model.state_dict(), MODEL_FILE_NAME)
    for epoch in range(EPOCH):
        if epoch == 1:
            lr = 0.0005
            print(f'[+] set lr={lr}')
        if patience == PATIENCE_LIMIT:
            patience = 0
            model.load_state_dict(torch.load(MODEL_FILE_NAME))
            lr = lr / 10
            print(f'[+] set lr={lr}')
        if earlystop > EARLY_STOP:
            model.load_state_dict(torch.load(MODEL_FILE_NAME))
            print('EARLY STOPPED')
            continue
        if epoch > 0:
            optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=0.0001)

        running_loss = RunningMean()
        running_score = RunningMean()

        model.train()
        pbar = tqdm(training_data_loader, total=len(training_data_loader))
        for inputs, labels in pbar:
            batch_size = inputs.size(0)

            inputs = Variable(inputs)
            labels = Variable(labels)
            if use_gpu:
                inputs = inputs.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()
            outputs = model(inputs)
            _, preds = torch.max(outputs.data, dim=1)

            loss = criterion(outputs, labels)
            running_loss.update(loss.item(), 1)
            running_score.update(torch.sum(preds == labels.data).float(), batch_size)

            loss.backward()
            optimizer.step()

            pbar.set_description(f'{epoch}: {running_loss.value:.5f} {running_score.value:.3f}')
	
        model.eval()
        lx, px = utils.predict(model, valid_data_loader)
        log_loss = criterion(Variable(px), Variable(lx))
        log_loss = log_loss.item()
        _, preds = torch.max(px, dim=1)
        accuracy = torch.mean((preds == lx).float())
        print(f'[+] val loss: {log_loss:.5f} acc: {accuracy:.3f}')

        if (log_loss < min_loss):
            torch.save(model.state_dict(), MODEL_FILE_NAME)
            print(f'[+] val loss improved from {min_loss:.5f} to {log_loss:.5f}, accuracy={accuracy}. Saved!')
            min_loss = log_loss
            patience = 0
        else:
            patience += 1
            earlystop += 1
def train(model_name, outputDir):
    train_dataset = FurnitureDataset('train', transform=preprocess_with_augmentation)
    val_dataset = FurnitureDataset('val', transform=preprocess)
    training_data_loader = DataLoader(dataset=train_dataset, num_workers=12,
                                      batch_size=BATCH_SIZE,
                                      shuffle=True)
    validation_data_loader = DataLoader(dataset=val_dataset, num_workers=1,
                                        batch_size=BATCH_SIZE,
                                        shuffle=False)

    model = get_model(model_name)

    nb_learnable_params = sum(p.numel() for p in model.fresh_params())
    print('Number of learnable params: %s' % str(nb_learnable_params))

    # Use model.fresh_params() to train only the newly initialized weights
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=1e-5)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=2)

    if model_name.endswith("_focal"):
        print ("Using Focal loss instead of normal cross-entropy")
        criterion = FocalLoss(NB_CLASSES).to(device)
    else:
        criterion = nn.CrossEntropyLoss().to(device)

    min_loss = float("inf")
    max_acc = 0.0
    patience = 0
    for epoch in range(NUM_EPOCHS):
        print('Epoch: %d' % epoch)
        
        running_loss = RunningMean()
        running_error = RunningMean()
        running_accuracy = RunningMean()

        model.train()
        pbar = tqdm(training_data_loader, total=len(training_data_loader))
        for inputs, labels in pbar:
            batch_size = inputs.size(0)

            inputs = Variable(inputs)
            labels = Variable(labels)
            inputs = inputs.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()

            outputs = model(inputs)
            _, preds = torch.max(outputs.data, dim=1)

            loss = criterion(outputs, labels)
            running_loss.update(loss.data[0], 1)
            running_error.update(torch.sum(preds != labels.data), batch_size)
            running_accuracy.update(torch.sum(preds == labels.data), batch_size)

            loss.backward()
            optimizer.step()

            pbar.set_description('%.5f %.3f %.3f' % (running_loss.value, running_accuracy.value, running_error.value))
        print('Epoch: %d | Running loss: %.5f | Running accuracy: %.3f | Running error: %.3f' % (epoch, running_loss.value, running_accuracy.value, running_error.value))

        lx, px = utils.predict(model, validation_data_loader, device)
        log_loss = criterion(Variable(px), Variable(lx))
        log_loss = log_loss.data[0]
        _, preds = torch.max(px, dim=1)
        accuracy = torch.mean((preds == lx).float())
        error = torch.mean((preds != lx).float())
        print('Validation loss: %.5f | Accuracy: %.3f | Error: %.3f' % (log_loss, accuracy, error))
        scheduler.step(log_loss)

        # Save model after each epoch
        torch.save(model.state_dict(), os.path.join(outputDir, 'weight_' + model_name + '.pth'))

        betterModelFound = False
        if log_loss < min_loss:
            torch.save(model.state_dict(), os.path.join(outputDir, 'best_val_loss_weight_' + model_name + '.pth'))
            print('Validation score improved from %.5f to %.5f. Model snapshot saved!' % (min_loss, log_loss))
            min_loss = log_loss
            patience = 0
            betterModelFound = True

        if accuracy > max_acc:
            torch.save(model.state_dict(), os.path.join(outputDir, 'best_val_acc_weight_' + model_name + '.pth'))
            print('Validation accuracy improved from %.5f to %.5f. Model snapshot saved!' % (max_acc, accuracy))
            max_acc = accuracy
            patience = 0
            betterModelFound = True

        if not betterModelFound:
            patience += 1
Exemple #4
0
def update_dest(acc_meter, key, content, target):
    if not key in acc_meter.keys():
        acc_meter[key] = RunningMean()
    acc_tmp = accuracy(content, target, topk=(1, ))
    acc_meter[key].update(acc_tmp[0], len(target))
def train():
    train_dataset = FurnitureDataset('train',
                                     transform=preprocess_with_augmentation)
    val_dataset = FurnitureDataset('val', transform=preprocess)
    training_data_loader = DataLoader(dataset=train_dataset,
                                      num_workers=8,
                                      batch_size=BATCH_SIZE,
                                      shuffle=True)
    validation_data_loader = DataLoader(dataset=val_dataset,
                                        num_workers=1,
                                        batch_size=BATCH_SIZE,
                                        shuffle=False)

    model = get_model()

    criterion = nn.CrossEntropyLoss().cuda()

    nb_learnable_params = sum(p.numel() for p in model.fresh_params())
    print(f'[+] nb learnable params {nb_learnable_params}')

    min_loss = float("inf")
    lr = 0
    patience = 0
    for epoch in range(20):
        print(f'epoch {epoch}')
        if epoch == 1:
            lr = 0.00003
            print(f'[+] set lr={lr}')
        if patience == 2:
            patience = 0
            model.load_state_dict(torch.load('best_val_weight.pth'))
            lr = lr / 10
            print(f'[+] set lr={lr}')
        if epoch == 0:
            lr = 0.001
            print(f'[+] set lr={lr}')
            optimizer = torch.optim.Adam(model.fresh_params(), lr=lr)
        else:
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=lr,
                                         weight_decay=0.0001)

        running_loss = RunningMean()
        running_score = RunningMean()

        model.train()
        pbar = tqdm(training_data_loader, total=len(training_data_loader))
        for inputs, labels in pbar:
            batch_size = inputs.size(0)

            inputs = Variable(inputs)
            labels = Variable(labels)
            if use_gpu:
                inputs = inputs.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()
            outputs = model(inputs)
            _, preds = torch.max(outputs.data, dim=1)

            loss = criterion(outputs, labels)
            running_loss.update(loss.data[0], 1)
            running_score.update(torch.sum(preds != labels.data), batch_size)

            loss.backward()
            optimizer.step()

            pbar.set_description(
                f'{running_loss.value:.5f} {running_score.value:.3f}')
        print(
            f'[+] epoch {epoch} {running_loss.value:.5f} {running_score.value:.3f}'
        )

        lx, px = utils.predict(model, validation_data_loader)
        log_loss = criterion(Variable(px), Variable(lx))
        log_loss = log_loss.data[0]
        _, preds = torch.max(px, dim=1)
        accuracy = torch.mean((preds != lx).float())
        print(f'[+] val {log_loss:.5f} {accuracy:.3f}')

        if log_loss < min_loss:
            torch.save(model.state_dict(), 'best_val_weight.pth')
            print(
                f'[+] val score improved from {min_loss:.5f} to {log_loss:.5f}. Saved!'
            )
            min_loss = log_loss
            patience = 0
        else:
            patience += 1
Exemple #6
0
def train(train_loader, model, criterion, optimizer, args):
    model = model.train()

    loss_keys = args.loss_keys
    acc_keys = args.acc_keys
    loss_meter = {p: RunningMean() for p in loss_keys}
    acc_meter = {p: RunningMean() for p in acc_keys}

    time_start = time.time()
    for idx, (input, target) in enumerate(train_loader):

        input = input.cuda()
        target = target.cuda()

        # compute output
        output_dict = model(input, target)
        logits = output_dict['logits']

        # -----------------
        loss_values = [
            criterion['entropy'](logit, target)
            for k, logit in enumerate(logits)
        ]

        if len(loss_keys) > 1:
            kl_loss_1 = kl_loss(logits[5], logits[2].detach(), target)
            kl_loss_2 = kl_loss(logits[8], logits[5].detach(), target)

            loss_values.extend([kl_loss_1, kl_loss_2])
            loss_values.append(sum(loss_values))
        loss_content = {
            loss_keys[k]: loss_values[k]
            for k in range(len(loss_keys))
        }

        # update acc and loss
        acc_values = [
            accuracy(logit, target, topk=(1, ))[0] for logit in logits
        ]
        acc_content = {
            acc_keys[k]: acc_values[k]
            for k in range(len(acc_keys))
        }

        update_meter(loss_meter, loss_content, input.size(0))
        update_meter(acc_meter, acc_content, input.size(0))

        tmp_str = ''
        for k, v in loss_meter.items():
            tmp_str = tmp_str + f"{k}:{v.value:.4f} "
        tmp_str = tmp_str + "\n"
        for k, v in acc_meter.items():
            tmp_str = tmp_str + f"{k}:{v.value:.1f} "

        optimizer.zero_grad()
        loss_values[-1].backward()
        optimizer.step()

    time_eclapse = time.time() - time_start
    print(tmp_str + f"t:{time_eclapse:.1f}s")
    return loss_meter[loss_keys[-1]].value
def train(attention=False):
    train_dataset = FurnitureDataset('train',
                                     transform=preprocess_with_augmentation)
    train_val_dataset = FurnitureDataset(
        'validation', transform=preprocess_with_augmentation)
    val_dataset = FurnitureDataset('validation', transform=preprocess)

    training_data_loader = DataLoader(dataset=train_dataset,
                                      num_workers=8,
                                      batch_size=BATCH_SIZE,
                                      shuffle=True)
    train_val_data_loader = DataLoader(dataset=val_dataset,
                                       num_workers=8,
                                       batch_size=BATCH_SIZE,
                                       shuffle=True)
    validation_data_loader = DataLoader(dataset=val_dataset,
                                        num_workers=0,
                                        batch_size=BATCH_SIZE // 2,
                                        shuffle=False)

    if USE_FOCAL_LOSS:
        criterion = nn.CrossEntropyLoss(reduce=False).cuda()
    else:
        criterion = nn.CrossEntropyLoss().cuda()

    print("loading model...")
    if not attention:
        model = get_model()
        save_name = ""
    else:
        save_name = "att_"
        model = dense_attention201(num_classes=128)
        if use_gpu:
            model.cuda()
        fresh_params = [p['params'] for p in model.fresh_params()]
        nb_learnable_params = 0
        for pp in fresh_params:
            nb_learnable_params += sum(p.numel() for p in pp)
        print('[+] nb learnable params {}'.format(nb_learnable_params))
    print("done.")

    min_loss = float("inf")
    patience = 0

    for epoch in range(STARTER, STARTER + 10):
        print('epoch {}'.format(epoch))
        if epoch == 1:
            lr = 0.00002
            model.load_state_dict(torch.load('best_val_weight_0.pth'))
            print("[+] loading best_val_weight_0.pth")
        if patience == 2:
            patience = 0
            model.load_state_dict(torch.load('best_val_weight.pth'))
            lr = lr / 5
        elif epoch + 1 % 3 == 0:
            ckpt = save_name + 'best_val_weight_%s.pth' % (epoch - 1)
            if not os.path.exists(ckpt):
                ckpt = save_name + 'best_val_weight.pth'
            print("loading {}".format(ckpt))
            model.load_state_dict(torch.load(ckpt))
            lr = lr / 2

        if epoch == 0:
            lr = 0.001
            optimizer = torch.optim.Adam(model.fresh_params(), lr=lr)
        else:
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=lr,
                                         weight_decay=wd)

        print('[+] set lr={}'.format(lr))
        running_loss = RunningMean()
        running_score = RunningMean()

        model.train()
        ### FOR TRAINING VALIDATION SET
        # if epoch - STARTER + 1 % 2 == 0 and epoch - STARTER > 4:
        #     loader =train_val_data_loader
        #     print("[+] trianing with validation set")
        # else:
        #     loader = training_data_loader
        ### FOR TRAINING VALIDATION SET
        loader = training_data_loader
        pbar = tqdm(loader, total=len(loader))
        for inputs, labels in pbar:
            batch_size = inputs.size(0)

            inputs = Variable(inputs)
            target = Variable(labels)
            if use_gpu:
                inputs = inputs.cuda()
                target = target.cuda()

            optimizer.zero_grad()
            outputs = model(inputs)
            _, preds = torch.max(outputs.data, dim=1)
            loss = criterion(outputs, target)
            if USE_FOCAL_LOSS:
                y_index = torch.LongTensor(np.arange(labels.shape[0])).cpu()
                l_weight = F.softmax(outputs,
                                     dim=1).cpu()[y_index,
                                                  torch.LongTensor(labels)]
                l_weight = l_weight.detach()
                loss = torch.mean(4 * l_weight.cuda() * loss)
            running_loss.update(loss.data[0], 1)
            running_score.update(
                torch.sum(preds != target.data, dtype=torch.float32),
                batch_size)
            loss.backward()
            optimizer.step()

            pbar.set_description('{:.5f} {:.3f}'.format(
                running_loss.value, running_score.value))
        print('[+] epoch {} {:.5f} {:.3f}'.format(epoch, running_loss.value,
                                                  running_score.value))

        torch.save(model.state_dict(),
                   save_name + 'best_val_weight_%s.pth' % epoch)

        lx, px = utils.predict(model, validation_data_loader)
        log_loss = criterion(Variable(px), Variable(lx))
        log_loss = log_loss.data[0]
        _, preds = torch.max(px, dim=1)
        accuracy = torch.mean((preds != lx).float())
        print('[+] val {:.5f} {:.3f}'.format(log_loss, accuracy))

        if log_loss < min_loss:
            torch.save(model.state_dict(), 'best_val_weight.pth')
            print(
                '[+] val score improved from {:.5f} to {:.5f}. Saved!'.format(
                    min_loss, log_loss))
            min_loss = log_loss
            patience = 0
        else:
            patience += 1
def train(args):

    train_dataset = FurnitureDataset('train',
                                     transform=preprocess_with_augmentation)
    val_dataset = FurnitureDataset('val', transform=preprocess)
    training_data_loader = DataLoader(dataset=train_dataset,
                                      num_workers=8,
                                      batch_size=BATCH_SIZE,
                                      shuffle=True)
    validation_data_loader = DataLoader(dataset=val_dataset,
                                        num_workers=1,
                                        batch_size=BATCH_SIZE,
                                        shuffle=False)

    model = get_model(args.name)

    class_weight = np.load('./class_weight.npy')

    #criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor(class_weight)).cuda()
    criterion = nn.CrossEntropyLoss().cuda()
    #criterion = FocalLoss(alpha=alpha, gamma=0).cuda()

    nb_learnable_params = sum(p.numel() for p in model.fresh_params())
    print(f'[+] nb learnable params {nb_learnable_params}')

    min_loss = float("inf")
    lr = 0
    patience = 0
    for epoch in range(30):
        print(f'epoch {epoch}')
        if epoch == 1:
            lr = 0.00003
            print(f'[+] set lr={lr}')
        if patience == 2:
            patience = 0
            model.load_state_dict(
                torch.load(
                    'models_trained/{}_{}_{}/best_val_weight_{}.pth'.format(
                        args.name, args.aug, args.alpha, args.name)))
            lr = lr / 10
            if lr < 3e-6:
                lr = 3e-6
            print(f'[+] set lr={lr}')
        if epoch == 0:
            lr = 0.001
            print(f'[+] set lr={lr}')
            optimizer = torch.optim.Adam(model.fresh_params(), lr=lr)
        else:
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=lr,
                                         weight_decay=0.0001)

        running_loss = RunningMean()
        running_score = RunningMean()

        model.train()
        pbar = tqdm(training_data_loader, total=len(training_data_loader))
        for inputs, labels in pbar:
            batch_size = inputs.size(0)

            inputs = Variable(inputs)
            labels = Variable(labels)
            if use_gpu:
                inputs = inputs.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()

            if args.aug:
                inputs, targets_a, targets_b, lam = mixup_data(
                    inputs, labels, args.alpha, use_gpu)

            outputs = model(inputs)

            if args.aug:
                loss_func = mixup_criterion(targets_a, targets_b, lam)
                loss = loss_func(criterion, outputs)
            else:
                loss = criterion(outputs, labels)

            _, preds = torch.max(outputs.data, dim=1)
            running_loss.update(loss.data[0], 1)

            if args.aug:
                running_score.update(
                    batch_size - lam * preds.eq(targets_a.data).cpu().sum() -
                    (1 - lam) * preds.eq(targets_b.data).cpu().sum(),
                    batch_size)
            else:
                running_score.update(torch.sum(preds != labels.data),
                                     batch_size)

            loss.backward()
            optimizer.step()

            pbar.set_description(
                f'{running_loss.value:.5f} {running_score.value:.3f}')
        print(
            f'[+] epoch {epoch} {running_loss.value:.5f} {running_score.value:.3f}'
        )

        lx, px = utils.predict(model, validation_data_loader)
        log_loss = criterion(Variable(px), Variable(lx))
        log_loss = log_loss.data[0]
        _, preds = torch.max(px, dim=1)
        accuracy = torch.mean((preds != lx).float())
        print(f'[+] val {log_loss:.5f} {accuracy:.3f}')

        if log_loss < min_loss:
            torch.save(
                model.state_dict(),
                'models_trained/{}_{}_{}/best_val_weight_{}.pth'.format(
                    args.name, args.aug, args.alpha, args.name))
            print(
                f'[+] val score improved from {min_loss:.5f} to {log_loss:.5f}. Saved!'
            )
            min_loss = log_loss
            patience = 0
        else:
            patience += 1