Exemplo n.º 1
0
def binary_output(dataloader):
    net = AlexNetPlusLatent(args.bits)
    net.load_state_dict(torch.load('./model/%d' % args.pretrained))
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        net.cuda()
    full_batch_output = torch.cuda.FloatTensor()
    full_batch_label = torch.cuda.LongTensor()
    net.eval()
    for batch_idx, (inputs, targets) in enumerate(dataloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        outputs, _ = net(inputs)
        full_batch_output = torch.cat((full_batch_output, outputs.data), 0)
        full_batch_label = torch.cat((full_batch_label, targets.data), 0)
    return torch.round(full_batch_output), full_batch_label
Exemplo n.º 2
0
def binary_output(dataloader):
    net = AlexNetPlusLatent(args.bits)
    net.load_state_dict(torch.load(args.modelpath))
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        net.cuda()
    full_batch_output = torch.cuda.FloatTensor()
    full_batch_label = torch.cuda.LongTensor()
    net.eval()

    for batch_idx, inputs in enumerate(dataloader):
        if use_cuda:
            inputs = inputs.cuda()
        inputs = Variable(inputs)
        inputs = inputs.unsqueeze(0)
        outputs, _ = net(inputs)
        full_batch_output = torch.cat((full_batch_output, outputs.data), 0)
    return torch.round(full_batch_output)
def binary_output(dataloader):
    net = AlexNetPlusLatent(48)
    net.load_state_dict(torch.load('./{}/{}'.format('model', 92)))
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        net.cuda()
    full_batch_output = torch.cuda.FloatTensor()
    full_batch_label = torch.cuda.LongTensor()
    net.eval()
    for batch_idx, (inputs, targets) in enumerate(dataloader):
        print batch_idx
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs), Variable(targets)
        outputs, _ = net(inputs)
        full_batch_output = torch.cat((full_batch_output, outputs.data), 0)
        full_batch_label = torch.cat((full_batch_label, targets.data), 0)
    return torch.round(full_batch_output), full_batch_label
Exemplo n.º 4
0
def binary_output(dataloader):
    net = AlexNetPlusLatent(args.bits)
    net.load_state_dict(torch.load('./model/{}'.format(args.pretrained)))
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print('Use device: ' + str(device))
    net.to(device)
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(dataloader):
            inputs = inputs.to(device)
            outputs, _ = net(inputs)
            print(batch_idx, outputs.data[0])
Exemplo n.º 5
0
def binary_output(dataloader):
    net = AlexNetPlusLatent(args.bits)
    net.load_state_dict(torch.load('./model/{}'.format(args.pretrained)))
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("Use device: " + str(device))
    net.to(device)
    full_batch_output = torch.cuda.FloatTensor()
    full_batch_label = torch.cuda.LongTensor()
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(dataloader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs, _ = net(inputs)
            full_batch_output = torch.cat((full_batch_output, outputs.data), 0)
            full_batch_label = torch.cat((full_batch_label, targets.data), 0)
        return torch.round(full_batch_output), full_batch_label
     transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose(
    [transforms.Resize(227),
     transforms.ToTensor(),
     transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
trainset = datasets.CIFAR10(root='./data', train=True, download=True,
                            transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
                                          shuffle=True, num_workers=2)

testset = datasets.CIFAR10(root='./data', train=False, download=True,
                           transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100,
                                         shuffle=True, num_workers=2)

net = AlexNetPlusLatent(args.bits)

use_cuda = torch.cuda.is_available()

if use_cuda:
    net.cuda()

softmaxloss = nn.CrossEntropyLoss().cuda()

optimizer4nn = torch.optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=0.0005)

scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer4nn, milestones=[64], gamma=0.1)

def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
Exemplo n.º 7
0
                    '{:.2%}'.format(correct.item() / total)
                })
                pbar.update(1)
        pbar.close()
        acc = 100 * int(correct) / int(total)
        if epoch == args.epoch:
            print('Saving')
            if not os.path.isdir('{}'.format(args.path)):
                os.mkdir('{}'.format(args.path))
            torch.save(net.state_dict(), './{}/{}'.format(args.path, acc))


if __name__ == '__main__':
    torch.cuda.empty_cache()  # When using windows, this line is needed
    trainloader, testloader = init_dataset()
    net = AlexNetPlusLatent(args.bits)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("Use device: " + str(device))
    net.to(device)
    softmaxloss = nn.CrossEntropyLoss().cuda()
    optimizer4nn = torch.optim.SGD(net.parameters(),
                                   lr=args.lr,
                                   momentum=args.momentum,
                                   weight_decay=0.0005)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer4nn,
                                                     milestones=[args.epoch],
                                                     gamma=0.1)
    best_acc = 0
    start_epoch = 1
    if args.pretrained:
        net.load_state_dict(
    testset[i][0].save('pic/test/%d.png' % i)
    if i %1000 == 0:
        print i
'''
transform_test = transforms.Compose([
    transforms.Resize(227),
    transforms.CenterCrop(227),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])

query_pic = Image.open(args.querypath)
query_pic = transform_test(query_pic)
print query_pic.size()
from net import AlexNetPlusLatent
net = AlexNetPlusLatent(48)
net.load_state_dict(
    torch.load('/disks/sdb/mingyu_ding/pytorch_deephash/{}/{}'.format(
        'model', 'apy')))
use_cuda = torch.cuda.is_available()
if use_cuda:
    net.cuda()
    query_pic = query_pic.cuda().unsqueeze(0)
net.eval()
outputs, _ = net(query_pic)
query_binary = (outputs[0] > 0.5).cpu().numpy()

#print query_binary

trn_binary = train_binary.cpu().numpy()
query_result = np.count_nonzero(query_binary != trn_binary,
Exemplo n.º 9
0
                            transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=128,
                                          shuffle=True,
                                          num_workers=2)

testset = datasets.CIFAR10(root='./data',
                           train=False,
                           download=True,
                           transform=transform_test)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=100,
                                         shuffle=True,
                                         num_workers=2)

net = AlexNetPlusLatent(args.bits)

use_cuda = torch.cuda.is_available()

if use_cuda:
    net.cuda()

softmaxloss = nn.CrossEntropyLoss().cuda()

ignored_params = list(net.Linear1.parameters()) + list(
    net.sigmoid.parameters()) + list(net.Linear2.parameters())

base_params = list(net.remain.parameters()) + list(net.features.parameters())

optimizer4nn = torch.optim.SGD([{
    'params': ignored_params
        inputs, targets = Variable(inputs), Variable(targets)
        outputs, _ = net(inputs)
        full_batch_output = torch.cat((full_batch_output, outputs.data), 0)
        full_batch_label = torch.cat((full_batch_label, targets.data), 0)
    return torch.round(full_batch_output), full_batch_label


trainloader = torch.utils.data.DataLoader(train_data,
                                          batch_size=100,
                                          shuffle=False,
                                          num_workers=2)

train_binary, train_label = binary_output(trainloader)

query_pic = train_data[2][0]
net = AlexNetPlusLatent(48)
net.load_state_dict(torch.load('./{}/{}'.format('model', 92)))
use_cuda = torch.cuda.is_available()
if use_cuda:
    net.cuda()
    query_pic = query_pic.cuda().unsqueeze(0)
net.eval()
outputs, _ = net(query_pic)
query_binary = (outputs[0] > 0.5).cpu().numpy()
trn_binary = train_binary.cpu().numpy()
query_result = np.count_nonzero(query_binary != trn_binary,
                                axis=1)  #don't need to divide binary length
sort_indices = np.argsort(query_result)

print sort_indices