Пример #1
0
def validate(data_loader, net, loss):
    start_time = time.time()
    
    net.eval()

    metrics = []
    for i, (data, target, coord) in enumerate(data_loader):
        data = Variable(data.cuda(async = True), volatile = True)
        target = Variable(target.cuda(async = True), volatile = True)
        coord = Variable(coord.cuda(async = True), volatile = True)

        output = net(data, coord)
        loss_output = loss(output, target, train = False)

        loss_output[0] = loss_output[0].data[0]
        metrics.append(loss_output)    
    end_time = time.time()

    metrics = np.asarray(metrics, np.float32)
    print('Validation: tpr %3.2f, tnr %3.8f, total pos %d, total neg %d, time %3.2f' % (
        100.0 * np.sum(metrics[:, 6]) / np.sum(metrics[:, 7]),
        100.0 * np.sum(metrics[:, 8]) / np.sum(metrics[:, 9]),
        np.sum(metrics[:, 7]),
        np.sum(metrics[:, 9]),
        end_time - start_time))
    print('loss %2.4f, classify loss %2.4f, regress loss %2.4f, %2.4f, %2.4f, %2.4f' % (
        np.mean(metrics[:, 0]),
        np.mean(metrics[:, 1]),
        np.mean(metrics[:, 2]),
        np.mean(metrics[:, 3]),
        np.mean(metrics[:, 4]),
        np.mean(metrics[:, 5])))
    print
    print
Пример #2
0
def singletest(data,net,config,splitfun,combinefun,n_per_run,margin = 64,isfeat=False):
    z, h, w = data.size(2), data.size(3), data.size(4)
    print(data.size())
    data = splitfun(data,config['max_stride'],margin)
    data = Variable(data.cuda(async = True), volatile = True,requires_grad=False)
    splitlist = range(0,args.split+1,n_per_run)
    outputlist = []
    featurelist = []
    for i in range(len(splitlist)-1):
        if isfeat:
            output,feature = net(data[splitlist[i]:splitlist[i+1]])
            featurelist.append(feature)
        else:
            output = net(data[splitlist[i]:splitlist[i+1]])
        output = output.data.cpu().numpy()
        outputlist.append(output)
        
    output = np.concatenate(outputlist,0)
    output = combinefun(output, z / config['stride'], h / config['stride'], w / config['stride'])
    if isfeat:
        feature = np.concatenate(featurelist,0).transpose([0,2,3,4,1])
        feature = combinefun(feature, z / config['stride'], h / config['stride'], w / config['stride'])
        return output,feature
    else:
        return output
Пример #3
0
def train(data_loader, net, loss, epoch, optimizer, get_lr, save_freq, save_dir):
    start_time = time.time()
    
    net.train()
    lr = get_lr(epoch)
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

    metrics = []
    for i, (data, target, coord) in enumerate(data_loader):
        data = Variable(data.cuda(async = True))
        target = Variable(target.cuda(async = True))
        coord = Variable(coord.cuda(async = True))

        output = net(data, coord)
        loss_output = loss(output, target)
        optimizer.zero_grad()
        loss_output[0].backward()
        optimizer.step()

        loss_output[0] = loss_output[0].data[0]
        metrics.append(loss_output)

    if epoch % args.save_freq == 0:            
        state_dict = net.module.state_dict()
        for key in state_dict.keys():
            state_dict[key] = state_dict[key].cpu()
            
        torch.save({
            'epoch': epoch,
            'save_dir': save_dir,
            'state_dict': state_dict,
            'args': args},
            os.path.join(save_dir, '%03d.ckpt' % epoch))

    end_time = time.time()

    metrics = np.asarray(metrics, np.float32)
    print('Epoch %03d (lr %.5f)' % (epoch, lr))
    print('Train:      tpr %3.2f, tnr %3.2f, total pos %d, total neg %d, time %3.2f' % (
        100.0 * np.sum(metrics[:, 6]) / np.sum(metrics[:, 7]),
        100.0 * np.sum(metrics[:, 8]) / np.sum(metrics[:, 9]),
        np.sum(metrics[:, 7]),
        np.sum(metrics[:, 9]),
        end_time - start_time))
    print('loss %2.4f, classify loss %2.4f, regress loss %2.4f, %2.4f, %2.4f, %2.4f' % (
        np.mean(metrics[:, 0]),
        np.mean(metrics[:, 1]),
        np.mean(metrics[:, 2]),
        np.mean(metrics[:, 3]),
        np.mean(metrics[:, 4]),
        np.mean(metrics[:, 5])))
    print
Пример #4
0
def test():
    global best_acc
    model.eval()
    test_loss = 0
    correct = 0
    bin_op.binarization()
    for data, target in testloader:
        data, target = Variable(data.cuda()), Variable(target.cuda())

        output = model(data)
        test_loss += criterion(output, target).item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()
    bin_op.restore()
    acc = 100. * torch.tensor(correct, dtype=torch.float64) / len(
        test_loader.dataset)

    if acc > best_acc:
        best_acc = acc
        save_state(model, best_acc)

    test_loss /= len(testloader.dataset)
    writer.add_scalar('testing_loss', test_loss * args.batch_size, epoch)
    writer.add_scalar('testing_accuracy',
                      100. * correct / len(testloader.dataset), epoch)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
        test_loss * args.batch_size, correct, len(testloader.dataset),
        100. * correct / len(testloader.dataset)))
    print('Best Accuracy: {:.2f}%\n'.format(best_acc))
    return
Пример #5
0
def train(epoch, writer):
    model.train()
    for batch_idx, (data, target) in enumerate(trainloader):
        # process the weights including binarization
        bin_op.binarization()

        # forwarding
        data, target = Variable(data.cuda()), Variable(target.cuda())
        optimizer.zero_grad()
        output = model(data)

        # backwarding
        loss = criterion(output, target)
        loss.backward()

        # restore weights
        bin_op.restore()
        bin_op.updateBinaryGradWeight()

        optimizer.step()
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.
                  format(epoch, batch_idx * len(data),
                         len(trainloader.dataset),
                         100. * batch_idx / len(trainloader), loss.item(),
                         optimizer.param_groups[0]['lr']))

    #To-Do: Fix it
    bin_op.binarization()
    #writer.add_histogram('weights',model.state_dict()['bin_conv2.conv.weight'], epoch)
    bin_op.restore()

    return
def test():
    global best_acc
    model.eval()
    test_loss = 0
    correct = 0
    testing = True

    bin_params.binarize()
    for data, target in testloader:
        if not args.cpu:
            data, target = data.cuda(), target.cuda()

        data, target = Variable(data), Variable(target)
                                    
        output = model(data, testing)
        test_loss += criterion(output, target).data.item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    bin_params.restore()
    acc = 100. * correct.item() / float(len(testloader.dataset))

    # Save the model params if the accuracy is the highest yet
    if acc > best_acc:
        best_acc = acc
        save_state(model, best_acc)
    
    test_loss /= float(len(testloader.dataset))
    print('\nTest Accuracy: {}/{} ({:.2f}%)'.format(
        correct, len(testloader.dataset), 100. * correct.item() / float(len(testloader.dataset)))
    )
    return
Пример #7
0
def batchify(data, bsz):
    nbatch = data.size(0) // bsz 																				# Work out how cleanly we can divide the dataset into bsz parts.
    data   = data.narrow(0, 0, nbatch * bsz)																	# Trim off any extra elements that wouldn't cleanly fit (remainders).
    data   = data.view(bsz, -1).t().contiguous()																# Evenly divide the data across the bsz batches.
    if args.cuda:																								# If we can do this on the gpu
        data = data.cuda()																						# Then move the data to the gpu
    return data 																								# Return the answer
Пример #8
0
def batchify(data, bsz):
    nbatch = data.size(0) // bsz
    data = data.narrow(0, 0, nbatch * bsz)
    data = data.view(bsz, -1).t().contiguous()
    if args.cuda:
        data = data.cuda()
    return data
Пример #9
0
def test2(evaluate=False):
    global best_acc

    model.eval()
    test_loss = 0
    correct = 0

    bin_op.binarizationTest()
    for data, target in test_loader:
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        test_loss += criterion(output, target).data[0]
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    bin_op.restore()

    acc = 100. * correct / len(test_loader.dataset)

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
        test_loss * 128., correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
    print('Best Accuracy: {:.2f}%\n'.format(best_acc))
    return
Пример #10
0
def test(save_name, best_acc, sample_weights=torch.Tensor(np.ones((50000,1))/50000.0)):
	#global best_acc
	model.eval()
	test_loss = 0
	correct = 0
	bin_op.binarization()

	sampler = torch.utils.data.sampler.WeightedRandomSampler(sample_weights[:,0].double(), 50000)
	testloader_in = torch.utils.data.DataLoader(trainset, batch_size=1000,
 	shuffle=False, num_workers=4, sampler=sampler)

	for data, target in testloader_in:
		data, target = Variable(data.cuda()), Variable(target.cuda())

		output = model(data)
		test_loss += criterion(output, target).data[0]
		pred = output.data.max(1, keepdim=True)[1]
		correct += pred.eq(target.data.view_as(pred)).cpu().sum()
	bin_op.restore()
	acc = 100. * correct / len(testloader_in.dataset)

	if acc > best_acc:
		best_acc = acc
		save_state(model, best_acc, save_name)

	test_loss = test_loss / len(testloader_in.dataset) * 1000
	print('\nTrain set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
		test_loss , correct, len(testloader_in.dataset),
		100. * correct / len(testloader_in.dataset)))
	print('Best Train Accuracy: {:.2f}%\n'.format(best_acc))
	return best_acc
Пример #11
0
def test():
    net.eval()
    # two_nets = TwoNets(model, net)
    loss_avg = 0.0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:

            data, target = data.cuda(), target.cuda()

            # adv_data = adversary_test(two_nets, data, target)
            #
            # # forward
            # output = two_nets(adv_data)
            generated = model(data, mode='edge_forward').detach()
            output = net(generated)
            loss = F.cross_entropy(output, target)

            # accuracy
            pred = output.data.max(1)[1]
            correct += pred.eq(target.data).sum().item()

            # test loss average
            loss_avg += float(loss.data)

    state['test_loss'] = loss_avg / len(test_loader)
    state['test_accuracy'] = correct / len(test_loader.dataset)
Пример #12
0
def get_error_output(data, target, batch_sample_weights):

	data, target = Variable(data.cuda()), Variable(target.cuda())
	output = model(data)
	loss = (criterion_seperated(output, target)*Variable(batch_sample_weights.cuda().float())).mean()

	return output
Пример #13
0
def validate(data_loader, net, loss):
    start_time = time.time()

    net.eval()

    metrics = []
    for i, (data, target, coord) in enumerate(data_loader):
        #with torch.no_grad():
        data = Variable(data.cuda(), volatile=True)
        target = Variable(target.cuda(), volatile=True)
        coord = Variable(coord.cuda(), volatile=True)

        output = net(data, coord)
        loss_output = loss(output, target, train=False)

        loss_output[0] = loss_output[0].item()
        metrics.append(loss_output)
    end_time = time.time()

    metrics = np.asarray(metrics, np.float32)
    print(
        'Validation: tpr %3.2f, tnr %3.8f, total pos %d, total neg %d, time %3.2f'
        % (100.0 * np.sum(metrics[:, 6]) / np.sum(metrics[:, 7]), 100.0 *
           np.sum(metrics[:, 8]) / np.sum(metrics[:, 9]), np.sum(
               metrics[:, 7]), np.sum(metrics[:, 9]), end_time - start_time))
    print(
        'loss %2.4f, classify loss %2.4f, regress loss %2.4f, %2.4f, %2.4f, %2.4f'
        % (np.mean(metrics[:, 0]), np.mean(
            metrics[:, 1]), np.mean(metrics[:, 2]), np.mean(metrics[:, 3]),
           np.mean(metrics[:, 4]), np.mean(metrics[:, 5])))
    print
    print
Пример #14
0
def train(epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(trainloader):
        # process the weights including binarization
        bin_op.binarization()
        
        # forwarding
        if torch.cuda.is_available():
            data, target = Variable(data.cuda()), Variable(target.cuda())
        optimizer.zero_grad()
        output = model(data)
        
        # backwarding
        loss = criterion(output, target)
        loss.backward()
        
        # restore weights
        bin_op.restore()
        bin_op.updateBinaryGradWeight()
        
        optimizer.step()
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.format(
                epoch, batch_idx * len(data), len(trainloader.dataset),
                100. * batch_idx / len(trainloader), loss.data[0],
                optimizer.param_groups[0]['lr']))
    return
Пример #15
0
def singletest(data,
               net,
               config,
               splitfun,
               combinefun,
               n_per_run,
               margin=64,
               isfeat=False):
    z, h, w = data.size(2), data.size(3), data.size(4)
    print(data.size())
    data = splitfun(data, config['max_stride'], margin)
    data = Variable(data.cuda())
    splitlist = range(0, args.split + 1, n_per_run)
    outputlist = []
    featurelist = []
    for i in range(len(splitlist) - 1):
        if isfeat:
            output, feature = net(data[splitlist[i]:splitlist[i + 1]])
            featurelist.append(feature)
        else:
            output = net(data[splitlist[i]:splitlist[i + 1]])
        output = output.data.cpu().numpy()
        outputlist.append(output)

    output = np.concatenate(outputlist, 0)
    output = combinefun(output, z / config['stride'], h / config['stride'],
                        w / config['stride'])
    if isfeat:
        feature = np.concatenate(featurelist, 0).transpose([0, 2, 3, 4, 1])
        feature = combinefun(feature, z / config['stride'],
                             h / config['stride'], w / config['stride'])
        return output, feature
    else:
        return output
Пример #16
0
def validate(data_loader, net, loss):
    start_time = time.time()

    net.eval()

    metrics = []
    for i, (data, target, coord) in enumerate(data_loader):
        data = Variable(data.cuda())
        target = Variable(target.cuda())
        coord = Variable(coord.cuda())

        output = net(data, coord)
        loss_output = loss(output, target, train=False)
        # print('output',len(output))
        # for numi in range(len(output)):
        # print('-------',output[numi].shape,target[numi].shape)
        # print('target',len(target))
        # loss_output[0] = loss_output[0].data[0]
        loss_output[0] = loss_output[0].item()
        metrics.append(loss_output)
    end_time = time.time()

    metrics = np.asarray(metrics, np.float32)
    print(
        'Validation: tpr %3.2f, tnr %3.8f, total pos %d, total neg %d, time %3.2f'
        % (100.0 * np.sum(metrics[:, 6]) / np.sum(metrics[:, 7]), 100.0 *
           np.sum(metrics[:, 8]) / np.sum(metrics[:, 9]), np.sum(
               metrics[:, 7]), np.sum(metrics[:, 9]), end_time - start_time))
    print(
        'loss %2.4f, classify loss %2.4f, regress loss %2.4f, %2.4f, %2.4f, %2.4f'
        % (np.mean(metrics[:, 0]), np.mean(
            metrics[:, 1]), np.mean(metrics[:, 2]), np.mean(metrics[:, 3]),
           np.mean(metrics[:, 4]), np.mean(metrics[:, 5])))
    print
    print
Пример #17
0
def test():
    global best_acc
    model.eval()
    test_loss = 0
    correct = 0
    bin_op.binarization()
    for data, target in testloader:
        data, target = Variable(data.cuda()), Variable(target.cuda())
                                    
        output = model(data)
        test_loss += criterion(output, target).data.item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()
    bin_op.restore()
    acc = 100. * float(correct) / len(testloader.dataset)

    if acc > best_acc:
        best_acc = acc
        save_state(model, best_acc)
    
    test_loss /= len(testloader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
        test_loss * 128., correct, len(testloader.dataset),
        100. * float(correct) / len(testloader.dataset)))
    print('Best Accuracy: {:.2f}%\n'.format(best_acc))
    return
Пример #18
0
def train(epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(trainloader):

        # forwarding
        if torch.cuda.is_available():
            data = data.cuda()
            target = target.cuda()

        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)

        # backwarding
        loss = criterion(output, target)
        loss.backward()

        optimizer.step()
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.
                  format(epoch, batch_idx * len(data),
                         len(trainloader.dataset),
                         100. * batch_idx / len(trainloader), loss.data.item(),
                         optimizer.param_groups[0]['lr']))
    return
Пример #19
0
def train(data_loader, net, loss, epoch, optimizer, get_lr, save_freq,
          save_dir):
    start_time = time.time()

    net.train()
    lr = get_lr(epoch)
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

    metrics = []

    for i, (data, target, coord) in enumerate(data_loader):
        data = Variable(data.cuda(), requires_grad=True)
        target = Variable(target.cuda())
        coord = Variable(coord.cuda(), requires_grad=True)

        output = net(data, coord)
        # print('---------', data.size(),coord.size(),target.size(),output.size())#torch.Size([1, 1, 96, 96, 96]), torch.Size([1, 3, 24, 24, 24]), torch.Size([1, 24, 24, 24, 3, 5]), torch.Size([1, 24, 24, 24, 3, 5]))
        loss_output = loss(output, target)
        optimizer.zero_grad()
        # print('loss_output[0]',loss_output.shape, loss_output[0].shape)
        loss_output[0].backward()
        optimizer.step()

        # loss_output[0] = loss_output[0].data[0]
        loss_output[0] = loss_output[0].item()
        # print('loss_output[1]',loss_output[0])

        metrics.append(loss_output)
    # metrics = np.asarray(metrics)
    # print('metrics',type(metrics))

    if epoch % args.save_freq == 0:
        state_dict = net.module.state_dict()
        for key in state_dict.keys():
            state_dict[key] = state_dict[key].cpu()

        torch.save(
            {
                'epoch': epoch,
                'save_dir': save_dir,
                'state_dict': state_dict,
                'args': args
            }, os.path.join(save_dir, '%03d.ckpt' % epoch))

    end_time = time.time()

    metrics = np.asarray(metrics, dtype=np.float32)
    print('Epoch %03d (lr %.5f)' % (epoch, lr))
    print(
        'Train:      tpr %3.2f, tnr %3.2f, total pos %d, total neg %d, time %3.2f'
        % (100.0 * np.sum(metrics[:, 6]) / np.sum(metrics[:, 7]), 100.0 *
           np.sum(metrics[:, 8]) / np.sum(metrics[:, 9]), np.sum(
               metrics[:, 7]), np.sum(metrics[:, 9]), end_time - start_time))
    print(
        'loss %2.4f, classify loss %2.4f, regress loss %2.4f, %2.4f, %2.4f, %2.4f'
        % (np.mean(metrics[:, 0]), np.mean(
            metrics[:, 1]), np.mean(metrics[:, 2]), np.mean(metrics[:, 3]),
           np.mean(metrics[:, 4]), np.mean(metrics[:, 5])))
    print
def test():
    global best_acc
    model.eval()  # 모듈을 평가모드로 설정
    test_loss = 0
    correct = 0
    bin_op.binarization()
    for data, target in testloader:
        data, target = Variable(data.cuda()), Variable(target.cuda())

        output = model(data)
        test_loss += criterion(output, target).data.item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()
    bin_op.restore()
    acc = 100. * float(correct) / len(testloader.dataset)

    if acc > best_acc:
        best_acc = acc
        save_state(model, best_acc)

    test_loss /= len(testloader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
        test_loss * 128., correct, len(testloader.dataset),
        100. * float(correct) / len(testloader.dataset)))
    print('Best Accuracy: {:.2f}%\n'.format(best_acc))
    return
Пример #21
0
def test(model, best_acc, studflag=True):
    test_loss = 0.0
    correct = 0

    model.eval()
    bin_op.binarization()
    for data, target in testloader:

        data, target = Variable(data.cuda()), Variable(target.cuda())
        output, h1_student, h2_student = model(data)
        test_loss += criterion(output, target).data[0]
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    bin_op.restore()
    acc = 100. * correct / len(testloader.dataset)

    if studflag == False:
        print("Teacher showing student")
    else:
        print(acc, best_acc)
        if acc > best_acc:
            best_acc = acc
            save_state(model, best_acc)

    test_loss /= len(testloader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
        test_loss * 128., correct, len(testloader.dataset),
        100. * correct / len(testloader.dataset)))

    print('Best Accuracy: {:.2f}%\n'.format(best_acc))

    return best_acc
Пример #22
0
def train(epoch, sample_weights=torch.Tensor(np.ones((50000, 1)) / 50000.0)):
    adjust_learning_rate(optimizer, epoch)
    model.train()

    sampler = torch.utils.data.sampler.WeightedRandomSampler(
        sample_weights[:, 0].double(), 50000)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, \
     shuffle=False, num_workers=4, \
     sampler=sampler)
    for batch_idx, (data, target) in enumerate(trainloader):
        bin_op.binarization()
        data, target = Variable(data.cuda()), Variable(target.cuda())
        optimizer.zero_grad()
        output = model(data)
        # backwarding
        loss = criterion(output, target)
        loss.backward()
        # restore weights
        bin_op.restore()
        bin_op.updateBinaryGradWeight()
        optimizer.step()
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.
                  format(epoch, batch_idx * len(data),
                         len(trainloader.dataset),
                         100. * batch_idx / len(trainloader), loss.data[0],
                         optimizer.param_groups[0]['lr']))
    return trainloader
Пример #23
0
def get_batch(source, i, evaluation=False):
    seq_len = min(args.bptt, len(source) - 1 - i)
    data = Variable(source[i:i + seq_len], volatile=evaluation)
    target = Variable(source[i + 1:i + 1 + seq_len].view(-1))
    if args.cuda:
        data = data.cuda()
    return data, target
Пример #24
0
def train(epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(trainloader):
        # process the weights including binarization
        bin_op.binarization()
        
        # forwarding
        data, target = Variable(data.cuda()), Variable(target.cuda())
        optimizer.zero_grad()
        output = model(data)
        
        # backwarding
        loss = criterion(output, target)
        loss.backward()
        
        # restore weights
        bin_op.restore()
        bin_op.updateBinaryGradWeight()
        
        optimizer.step()
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.format(
                epoch, batch_idx * len(data), len(trainloader.dataset),
                100. * batch_idx / len(trainloader), loss.data.item(),
                optimizer.param_groups[0]['lr']))
    return
Пример #25
0
def train(epoch, trainloader, model, criterion, optimizer):
    model.train()
    for batch_idx, (data, target) in enumerate(trainloader):

        # process the weights including binarization
        # quantize the conv weights, and store the full-precision weights
        if args.quantization != 'none':
            bin_op.binarization()

        # forwarding
        data, target = Variable(data.cuda()), Variable(target.cuda())
        optimizer.zero_grad()
        output = model(data)

        # backwarding
        loss = criterion(output, target)
        loss.backward()

        # restore the full precision weights
        if args.quantization != 'none':
            bin_op.restore()
            bin_op.updateBinaryGradWeight()

        optimizer.step()
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.
                  format(epoch, batch_idx * len(data),
                         len(trainloader.dataset),
                         100. * batch_idx / len(trainloader), loss.data.item(),
                         optimizer.param_groups[0]['lr']))
    return
Пример #26
0
def test(evaluate=False):
    global best_acc
    model.eval()
    test_loss = 0
    correct = 0

    for data, target in test_loader:
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        test_loss += criterion(output, target).data[0]
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    acc = 100. * correct / len(test_loader.dataset)
    if ((args.prune == 'node') and (not args.retrain)) or (acc > best_acc):
        best_acc = acc
        if not evaluate:
            save_state(model, best_acc)

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
        test_loss * args.batch_size, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
    print('Best Accuracy: {:.2f}%\n'.format(best_acc))
    return
def test():
    net.eval()
    # two_nets = TwoNets(model, net)
    loss_avg = 0.0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:

            data, target = data.cuda(), target.cuda()

            interp_z = torch.zeros_like(data)[:,0:1,:,:].uniform_(0, 1).cuda()

            generated1 = model(data, mode='just_cannyedge1').detach().cuda()

            generated2 = model(data, mode='just_catedge2').detach().cuda()

            generated = interp_z * generated1 + (1 - interp_z) * generated2

            output = net(generated)
            loss = F.cross_entropy(output, target)

            # accuracy
            pred = output.data.max(1)[1]
            correct += pred.eq(target.data).sum().item()

            # test loss average
            loss_avg += float(loss.data)

    state['test_loss'] = loss_avg / len(test_loader)
    state['test_accuracy'] = correct / len(test_loader.dataset)
Пример #28
0
def train(model, train_loader, epoch, optimizer, criterion):
    print('Training %s...' % model_str)

    train_total = 0
    train_correct = 0

    for i, (data, labels) in enumerate(train_loader):
        data = data.cuda()
        labels = labels.cuda()

        # Forward + Backward + Optimize
        optimizer.zero_grad()
        _, logits = model(data, revision=False)
        prec1, = accuracy(logits, labels, topk=(1, ))
        train_total += 1
        train_correct += prec1
        loss = criterion(logits, labels)
        loss.backward()
        optimizer.step()

        if (i + 1) % args.print_freq == 0:
            print(
                'Epoch [%d/%d], Iter [%d/%d] Training Accuracy: %.4F, Loss: %.4f'
                % (epoch + 1, args.n_epoch_1, i + 1,
                   len(train_dataset) // batch_size, prec1, loss.item()))

    train_acc = float(train_correct) / float(train_total)

    return train_acc
Пример #29
0
def test():
    global best_acc
    model.eval()
    flag=True
    training=False
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    bin_op.binarization()
    for batch_idx,(data, target) in enumerate(testloader):
        target = target.cuda(async=True)
        data_var = torch.autograd.Variable(data.cuda(), volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)
        
	output,activations = model(data_var)
	#Layers to run PCA on
	key_idx = range(0,19)
	
	for i in key_idx: #Run PCA layer-wise
            size_keyidx = activations[i].size()
	    activation_i = activations[i]
	    run_PCA(activations,i,size_keyidx[1], threshold=0.99)
	
	
	loss= criterion(output, target_var)

	prec1, prec5 = accuracy(output.data, target, training, topk=(1, 5))
        losses.update(loss.data[0], data.size(0))
        top1.update(prec1[0], data.size(0))
        top5.update(prec5[0], data.size(0))

	if flag == True:
	     if batch_idx % 10 == 0:
	       print('[{0}/{1}({2:.0f}%)]\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                   batch_idx, len(testloader), 100. *float(batch_idx)/len(testloader),
                   loss=losses, top1=top1, top5=top5))
	else:
	    if batch_idx % 10 == 0:
	       print('Epoch: [{0}][{1}/{2}({3:.0f}%)]\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                   epoch, batch_idx, len(testloader), 100. *float(batch_idx)/len(testloader),
                   loss=losses, top1=top1, top5=top5))
    bin_op.restore()
    acc = top1.avg
    if acc > best_acc:
        best_acc = acc
	if flag == False:
           save_state(model, best_acc)
    #test_loss /= len(testloader.dataset)
    print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Loss {loss.avg:.4f}'
          .format(top1=top1, top5=top5,loss = losses))

    print('Best Accuracy: {:.2f}%\n'.format(best_acc))
    return acc, losses.avg
Пример #30
0
def test():  #Testing function
    global best_acc
    flag = False
    model.eval()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    bin_op.binarization()
    for batch_idx, (data, target) in enumerate(testloader):
        target = target.cuda(async=True)
        data_var = torch.autograd.Variable(data.cuda(), volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)
        #data_var, target_var = Variable(data.cuda()), Variable(target.cuda())

        output = model(data_var)
        loss = criterion(output, target_var)
        prec1, prec5 = accuracy(output.data, target, False, topk=(1, 5))
        losses.update(loss.data[0], data.size(0))
        top1.update(prec1[0], data.size(0))
        top5.update(prec5[0], data.size(0))
        if flag == True:
            if batch_idx % 10 == 0:
                print('[{0}/{1}({2:.0f}%)]\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                          batch_idx,
                          len(testloader),
                          100. * float(batch_idx) / len(testloader),
                          loss=losses,
                          top1=top1,
                          top5=top5))
        else:
            if batch_idx % 10 == 0:
                print('Epoch: [{0}][{1}/{2}({3:.0f}%)]\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                          epoch,
                          batch_idx,
                          len(testloader),
                          100. * float(batch_idx) / len(testloader),
                          loss=losses,
                          top1=top1,
                          top5=top5))
    bin_op.restore()
    acc = top1.avg
    if acc > best_acc:
        best_acc = acc
        save_state(model, best_acc)

    #test_loss /= len(testloader.dataset)
    print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1,
                                                                  top5=top5))
    #print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
    #    test_loss * 128., correct, len(testloader.dataset),
    #    100. * correct / len(testloader.dataset)))
    print('Best Accuracy: {:.2f}%\n'.format(best_acc))
    return acc, losses.avg
Пример #31
0
def batchify(data, bsz):
    print("batchify")
    nbatch = data.size(0) // bsz
    data = data.narrow(0, 0, nbatch * bsz)
    data = data.view(bsz, -1).t().contiguous()
    if USE_CUDA:
        data = data.cuda()
    return data
Пример #32
0
def batchify(data, bsz):
    # Work out how cleanly we can divide the dataset into bsz parts.
    nbatch = data.size(0) // bsz
    # Trim off any extra elements that wouldn't cleanly fit (remainders).
    data = data.narrow(0, 0, nbatch * bsz)
    # Evenly divide the data across the bsz batches.
    data = data.view(bsz, -1).t().contiguous()
    return data.cuda() if args.cuda else data
Пример #33
0
def batchify(data, bsz):  # 这里的data类型是Tensor
    nbatch = data.size(0) // bsz  # 总词数除以batch_size得到需要的batch轮数
    data = data.narrow(0, 0, nbatch *
                       bsz)  # 第一个参数用来确定行(0)、列(1),第二个参数确定开始的行/列,第三个参数确定行数/列数
    data = data.view(bsz, -1).t().contiguous(
    )  # the size -1 is inferred from other dimensions, view成bsz行nbatch列,经过t()行列颠倒成nbatch行bsz列
    if args.cuda:
        data = data.cuda()
    return data  # 返回的是nbatch行bsz列的矩阵
Пример #34
0
def test_get_batch(source, evaluation=False):
    seq_len = len(source) - 1
    data = Variable(source[:seq_len], volatile=evaluation)
    target = Variable(source[1:1 + seq_len].view(-1))
    # This is where data should be CUDA-fied to lessen OOM errors
    if args.cuda:
        return data.cuda(), target.cuda()
    else:
        return data, target
Пример #35
0
def get_batch(source, i, evaluation=False):
    seq_len = min(args.bptt, len(source) - 1 - i)
    data = Variable(source[i:i + seq_len], volatile=evaluation)
    target = Variable(source[i + 1:i + 1 + seq_len].view(-1))
    #This is where data should be CUDA-fied to lessen OOM errors
    if args.cuda:
        return data.cuda(), target.cuda()
    else:
        return data, target
Пример #36
0
def batchify(data, bsz):
    # Work out how cleanly we can divide the dataset into bsz parts.
    nbatch = data.size(0) // bsz
    # Trim off any extra elements that wouldn't cleanly fit (remainders).
    data = data.narrow(0, 0, nbatch * bsz)
    # Evenly divide the data across the bsz batches.
    data = data.view(bsz, -1).t().contiguous()
    if args.cuda:
        data = data.cuda()
    return data