예제 #1
0
def train(epoch, optimizer):
    print('\nEpoch: %d' % epoch)
    global dataframeStarted, dataframe
    net.train()
    train_loss2 = 0
    train_loss1 = 0
    train_loss = 0
    correct = 0.
    total = 0.

    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), (targets - 1).cuda()
        optimizer.zero_grad()
        if args.adversary:
            targets = Variable(targets)
            inputs1, targets1 = Variable(inputs, requires_grad=True), targets
            relus, _ = net(inputs1, normalize=True)
            inputs2 = create_adversary(inputs1, targets1, net, criterion)
            _, outputs = net(inputs2, normalize=True)
            loss1 = criterion(outputs, targets)
        else:
            inputs, targets = Variable(inputs), Variable(targets)
            relus, outputs = net(inputs, normalize=True)
            loss1 = criterion(outputs, targets)
        if args.gamma > 0:
            if args.k > 0:
                loss2 = force_smooth_network(relus,
                                             targets,
                                             m=args.m,
                                             k=args.k)
            else:
                loss2 = force_smooth_network(relus, targets, m=args.m)
            value = 1 / args.gamma
            loss = loss1 + loss2 / (value**args.m)
            train_loss2 += loss2.data.item()
        else:
            loss2 = 0
            train_loss2 += loss2
            loss = loss1
        loss.backward()
        optimizer.step()
        if args.beta > 0:
            do_parseval(parseval_parameters)
        train_loss += loss.item()
        train_loss1 += loss1.item()
        _, predicted = torch.max(outputs.data, 1)
        total += float(targets.size(0))
        correct += float(predicted.eq(targets.data).cpu().sum())

        progress_bar(
            batch_idx, len(trainloader),
            'CC: %.3f | SM: %.3f | L: %.3f | A: %.3f%% (%d/%d)' %
            (train_loss1 / (batch_idx + 1), train_loss2 /
             (batch_idx + 1), train_loss /
             (batch_idx + 1), 100. * correct / total, correct, total))
    f = open(path + 'score_training.txt', 'a')
    f.write(str(1. * correct / total))
    f.write('\n')
    f.close()
def train(epoch, optimizer):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss1 = 0
    train_loss2 = 0
    train_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
        inputs, targets = Variable(inputs), Variable(targets)
        relus, outputs = net(inputs)
#        relus = relus[4:]
        loss1 = criterion(outputs, targets) 
        loss2 = force_smooth_network(relus,targets,classes=100,m=2)
        loss = loss1 + loss2/(100**2)
        loss.backward()
        optimizer.step()
        do_parseval(parseval_parameters)

        train_loss += loss.data[0]
        train_loss1 += loss1.data[0]
        train_loss2 += loss2.data[0]
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        progress_bar(batch_idx, len(trainloader), 'Log Loss: %.3f | Smooth Loss: %.3f | Loss: %.3f | Acc: %.3f%% (%d/%d)'
            % (train_loss1/(batch_idx+1),train_loss2/(batch_idx+1),train_loss/(batch_idx+1), 100.*correct/total, correct, total))
    f = open(path + 'score_training.txt','a')
    f.write(str(1.*correct/total))
    f.write('\n')
    f.close()
def test(epoch):
    global best_acc
    net.eval()
    test_loss = 0
    test_loss1 = 0
    test_loss2 = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(testloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        relus, outputs = net(inputs)
#        relus = relus[4:]
        loss1 = criterion(outputs, targets) 
        loss2 = force_smooth_network(relus,targets,classes=100,m=2)
        loss = loss1 + loss2/(100**2)

        test_loss += loss.data[0]
        test_loss1 += loss1.data[0]
        test_loss2 += loss2.data[0]
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        progress_bar(batch_idx, len(testloader), 'Log Loss: %.3f | Smooth Loss: %.3f | Loss: %.3f | Acc: %.3f%% (%d/%d)'
            % (test_loss1/(batch_idx+1),test_loss2/(batch_idx+1),test_loss/(batch_idx+1), 100.*correct/total, correct, total))

    # Save checkpoint.
    acc = 100.*correct/total
    f = open(path + 'score.txt','a')
    f.write(str(1.*correct/total))
    f.write('\n')
    f.close()
    """
예제 #4
0
    def train(epoch, optimizer):
        print('\nEpoch: %d' % epoch)
        net.train()
        train_loss2 = 0
        train_loss1 = 0
        train_loss = 0
        correct = 0.
        total = 0.

        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            if uses_regularizer:
                relus, outputs = net(inputs, normalize=True)
            else:
                outputs = net(inputs, normalize=True)

            loss = criterion(outputs, targets)
            if uses_regularizer:
                _lambda = (args.gamma**args.m)
                loss2 = _lambda * gsp.force_smooth_network(
                    relus, targets, m=args.m)
                loss = loss + loss2
                train_loss2 += loss2.data.item()
            loss.backward()
            optimizer.step()
            if uses_parseval:
                do_parseval(parseval_parameters)
            train_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += float(targets.size(0))
            correct += float(predicted.eq(targets.data).cpu().sum())

            progress_bar(
                         batch_idx, len(trainloader),
                         'L: %.3f | SM: %.3f | A: %.3f%% (%d/%d)'
                         % (train_loss/(batch_idx+1),
                            train_loss2/(batch_idx+1),
                            100.*correct/total, correct, total)
                        )

        f = open(path + 'score_training.txt', 'a')
        f.write(str(1.*correct/total))
        f.write('\n')
        f.close()