Esempio n. 1
0
def infer(valid_queue, model, criterion):
    objs = utils.AvgrageMeter()
    top1 = utils.AvgrageMeter()
    top5 = utils.AvgrageMeter()
    model.eval()

    test_loss = 0
    correct = 0
    total = 0
    max_step = 0
    best_acc = 0

    with torch.no_grad():
        for step, (input, target) in enumerate(valid_queue):
            input = input.cuda()
            target = target.cuda(non_blocking=True)

            logits = model(input)
            loss = criterion(logits, target)

            test_loss += loss.item()
            _, predicted = logits.max(1)
            total += target.size(0)
            correct += predicted.eq(target).sum().item()
            max_step = step

            progress_bar(
                step, len(valid_queue), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                (test_loss /
                 (step + 1), 100. * correct / total, correct, total))

    # Save checkpoint.
    acc = 100. * correct / total
    if acc > best_acc:
        print('Saving..')
        state = {
            'net': net.state_dict(),
            'acc': acc,
            'epoch': epoch,
        }
        if not os.path.isdir('checkpoint'):
            os.mkdir('checkpoint')
        torch.save(state, './checkpoint/ckpt.pth')
        best_acc = acc


#             prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
#             n = input.size(0)
#             objs.update(loss.data, n)
#             top1.update(prec1.data, n)
#             top5.update(prec5.data, n)

#             if step % args.report_freq == 0:
#                 logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
#                 if 'debug' in args.save:
#                     break
#     return top1.avg, objs.avg

    return 100. * correct / total, test_loss / (max_step + 1)
Esempio n. 2
0
def train4(train_queue, valid_queue, model, architect, criterion, optimizer,
           lr, perturb_alpha, epsilon_alpha, model2, epoch, delta):
    objs = utils.AvgrageMeter()
    top1 = utils.AvgrageMeter()
    top5 = utils.AvgrageMeter()

    train_loss = 0
    correct = 0
    total = 0
    max_step = 0
    #     delta = torch.empty(5, 3, 32, 32)
    m = 64

    for step, (input, target) in enumerate(train_queue):
        model.train()
        n = input.size(0)

        input = input.cuda()
        target = target.cuda(non_blocking=True)

        #         logits2 = resnet18(input*diff, updateType='weight')

        #         pert_inp = input * epsilon_alpha
        #         pert_inp = input * diff
        if delta.size() != input.size():
            print(list(delta.size()))
            print(list(input.size()))
            break
        else:
            pert_inp = torch.mul(input, delta)
        logits2 = model2(pert_inp)
        #         logits2 = model2(x)
        loss2 = criterion(logits2, target)

        loss2.backward()
        nn.utils.clip_grad_norm_(model2.parameters(), args.grad_clip)
        optimizer.step()
        #         model.restore_arch_parameters()

        train_loss += loss2.item()
        _, predicted = logits2.max(1)
        total += target.size(0)
        correct += predicted.eq(target).sum().item()
        max_step = step

        progress_bar(
            step, len(train_queue), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
            (train_loss / (step + 1), 100. * correct / total, correct, total))

    return 100. * correct / total, train_loss / (max_step + 1)
def infer(valid_queue, model, criterion):
    objs = utils.AvgrageMeter()
    top1 = utils.AvgrageMeter()
    top5 = utils.AvgrageMeter()
    model.eval()

    test_loss = 0
    correct = 0
    total = 0
    max_step = 0
    best_acc = 0

    with torch.no_grad():
        for step, (input, target) in enumerate(valid_queue):
            input = input.cuda()
            target = target.cuda(non_blocking=True)

            logits = model(input)
            loss = criterion(logits, target)

            test_loss += loss.item()
            _, predicted = logits.max(1)
            total += target.size(0)
            correct += predicted.eq(target).sum().item()
            max_step = step

            progress_bar(
                step, len(valid_queue), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                (test_loss /
                 (step + 1), 100. * correct / total, correct, total))

    # Save checkpoint.
    acc = 100. * correct / total
    if acc > best_acc:
        print('Saving..')
        state = {
            'net': model.state_dict(),
            'acc': acc,
        }
        if not os.path.isdir('checkpoint'):
            os.mkdir('checkpoint')
        torch.save(state, './checkpoint/ckpt.pth')
        best_acc = acc

    return 100. * correct / total, test_loss / (max_step + 1)
def trainres(train_queue, model2, criterion, optimizer):
    epoch = 50
    print('\nEpoch: %d' % epoch)
    model2.train()
    train_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(train_queue):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        outputs = model2(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        progress_bar(
            batch_idx, len(train_queue), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
            (train_loss /
             (batch_idx + 1), 100. * correct / total, correct, total))
Esempio n. 5
0
def train2(train_queue, valid_queue, model, architect, criterion, optimizer,
           lr, perturb_alpha, epsilon_alpha, model2, epoch):
    objs = utils.AvgrageMeter()
    top1 = utils.AvgrageMeter()
    top5 = utils.AvgrageMeter()

    train_loss = 0
    correct = 0
    total = 0
    max_step = 0
    #     delta = torch.empty(5, 3, 32, 32)
    m = 64
    for step, (input, target) in enumerate(train_queue):
        model.train()
        n = input.size(0)
        print(n)

        input = input.cuda()
        target = target.cuda(non_blocking=True)

        # get a random minibatch from the search queue with replacement
        input_search, target_search = next(iter(valid_queue))
        input_search = input_search.cuda()
        target_search = target_search.cuda(non_blocking=True)
        #         if epoch>=15:
        #             architect.step(input, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)

        architect.step(input,
                       target,
                       input_search,
                       target_search,
                       lr,
                       optimizer,
                       unrolled=args.unrolled)
        #         optimizer.zero_grad()
        architect.optimizer.zero_grad()

        # print('before softmax', model.arch_parameters())
        #         model.softmax_arch_parameters()

        ############################################################################################################
        ############################################################################################################
        #         model_adv = AttackPGD(model)
        # # #         logits1, diff = model_adv(input, target)
        #         logits1, diff, x = model_adv(input, target)
        #         loss1 = criterion(logits1, target)

        #         optimizer.zero_grad()
        #         loss1.backward()
        #         optimizer.step()

        ############################################################################################################
        #         if perturb_alpha:
        #             diff = perturb_alpha(model, input, target, epsilon_alpha)
        #             optimizer.zero_grad()
        #             architect.optimizer.zero_grad()
        # print('after perturb', model.arch_parameters())
        ############################################################################################################
        ############################################################################################################

        logits = model(input, updateType='weight')
        loss = criterion(logits, target)

        optimizer.zero_grad()
        loss.backward()
        nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
        optimizer.step()
        #         model.restore_arch_parameters()

        ############################################################################################################
        ############################################################################################################
        model_adv = AttackPGD(model)
        #         logits1, diff = model_adv(input, target)
        logits1, diff, x = model_adv(input, target)
        loss1 = criterion(logits1, target)

        optimizer.zero_grad()
        loss1.backward()
        optimizer.step()

        ############################################################################################################
        #         if perturb_alpha:
        #                 diff = perturb_alpha(model, input, target, epsilon_alpha)
        #                 print(diff)
        #                 print(epsilon_alpha)
        #
        #                 optimizer.zero_grad()
        #                 architect.optimizer.zero_grad()

        ############################################################################################################
        ############################################################################################################
        if diff.size() != delta.size():
            print(list(diff.size()))
            print(list(input.size()))
            break
        delta = diff

        prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
        objs.update(loss.data, n)
        top1.update(prec1.data, n)
        top5.update(prec5.data, n)

        if step % args.report_freq == 0:
            logging.info('train %03d %e %f %f', step, objs.avg, top1.avg,
                         top5.avg)
            if 'debug' in args.save:
                break


#         if step > 5:
#             break

    for step, (input, target) in enumerate(train_queue):
        model.train()
        n = input.size(0)

        input = input.cuda()
        target = target.cuda(non_blocking=True)

        #         logits2 = resnet18(input*diff, updateType='weight')

        #         pert_inp = input * epsilon_alpha
        #         pert_inp = input * diff
        if delta.size() != input.size():
            print(list(delta.size()))
            print(list(input.size()))
            break
        else:
            pert_inp = torch.mul(input, delta)
        logits2 = model2(pert_inp)
        #         logits2 = model2(x)
        loss2 = criterion(logits2, target)

        loss2.backward()
        nn.utils.clip_grad_norm_(model2.parameters(), args.grad_clip)
        optimizer.step()
        #         model.restore_arch_parameters()

        train_loss += loss2.item()
        _, predicted = logits2.max(1)
        total += target.size(0)
        correct += predicted.eq(target).sum().item()
        max_step = step

        progress_bar(
            step, len(train_queue), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
            (train_loss / (step + 1), 100. * correct / total, correct, total))

    return 100. * correct / total, train_loss / (max_step + 1)
Esempio n. 6
0
def train(train_queue, valid_queue, model, architect, criterion, optimizer, lr,
          perturb_alpha, epsilon_alpha, model2, epoch):
    objs = utils.AvgrageMeter()
    top1 = utils.AvgrageMeter()
    top5 = utils.AvgrageMeter()

    train_loss = 0
    correct = 0
    total = 0
    max_step = 0

    for step, (input, target) in enumerate(train_queue):
        model.train()
        n = input.size(0)

        input = input.cuda()
        target = target.cuda(non_blocking=True)

        # get a random minibatch from the search queue with replacement
        input_search, target_search = next(iter(valid_queue))
        input_search = input_search.cuda()
        target_search = target_search.cuda(non_blocking=True)
        #         if epoch>=15:
        #             architect.step(input, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)

        architect.step(input,
                       target,
                       input_search,
                       target_search,
                       lr,
                       optimizer,
                       unrolled=args.unrolled)
        #         optimizer.zero_grad()
        architect.optimizer.zero_grad()

        # print('before softmax', model.arch_parameters())
        #         model.softmax_arch_parameters()

        ############################################################################################################
        ############################################################################################################
        #         model_adv = AttackPGD(model)
        # # #         logits1, diff = model_adv(input, target)
        #         logits1, diff, x = model_adv(input, target)
        #         loss1 = criterion(logits1, target)

        #         optimizer.zero_grad()
        #         loss1.backward()
        #         optimizer.step()

        ############################################################################################################
        #         if perturb_alpha:
        #             diff = perturb_alpha(model, input, target, epsilon_alpha)
        #             optimizer.zero_grad()
        #             architect.optimizer.zero_grad()
        # print('after perturb', model.arch_parameters())
        ############################################################################################################
        ############################################################################################################

        logits = model(input, updateType='weight')
        loss = criterion(logits, target)

        optimizer.zero_grad()
        loss.backward()
        nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
        optimizer.step()
        #         model.restore_arch_parameters()

        ############################################################################################################
        ############################################################################################################
        model_adv = AttackPGD(model)
        #         logits1, diff = model_adv(input, target)
        logits1, diff, x = model_adv(input, target)
        loss1 = criterion(logits1, target)

        optimizer.zero_grad()
        loss1.backward()
        optimizer.step()

        ############################################################################################################
        #         if perturb_alpha:
        #                 diff = perturb_alpha(model, input, target, epsilon_alpha)
        #                 print(diff)
        #                 print(epsilon_alpha)
        #
        #                 optimizer.zero_grad()
        #                 architect.optimizer.zero_grad()

        ############################################################################################################
        ############################################################################################################

        #         logits2 = resnet18(input*diff, updateType='weight')

        #         pert_inp = input * epsilon_alpha
        #         pert_inp = input * diff
        pert_inp = torch.mul(input, diff)
        logits2 = model2(pert_inp)
        #         logits2 = model2(x)
        loss2 = criterion(logits2, target)

        loss2.backward()
        nn.utils.clip_grad_norm_(model2.parameters(), args.grad_clip)
        optimizer.step()

        train_loss += loss2.item()
        _, predicted = logits2.max(1)
        total += target.size(0)
        correct += predicted.eq(target).sum().item()
        max_step = step

        progress_bar(
            step, len(train_queue), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
            (train_loss / (step + 1), 100. * correct / total, correct, total))

    return 100. * correct / total, train_loss / (max_step + 1)