def main():
    train_transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.Scale(256),
        transforms.RandomCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    test_transform = transforms.Compose([
        transforms.Scale(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    # Dataset
    train_dataset = MSCOCO(root='./data/coco/',
                           train=True,
                           transform=train_transform)

    test_dataset = MSCOCO(root='./data/coco/',
                          train=False,
                          transform=test_transform)

    database_dataset = MSCOCO(root='./data/coco/',
                              train=False,
                              transform=test_transform,
                              database_bool=True)

    # Data Loader
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=4)

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=4)

    database_loader = torch.utils.data.DataLoader(dataset=database_dataset,
                                                  batch_size=batch_size,
                                                  shuffle=True,
                                                  num_workers=4)

    cnn = CNN(encode_length=encode_length)
    # Loss and Optimizer
    criterion = DFHLoss_margin(eta, margin)
    optimizer = torch.optim.SGD(cnn.parameters(),
                                lr=learning_rate,
                                momentum=0.9,
                                weight_decay=5e-4)

    best_top = 0.0
    best = 0.0

    # Initialize for centers
    N = len(train_loader.dataset)
    U = torch.zeros(encode_length, N)
    train_targets = train_loader.dataset.get_onehot_targets()
    S = (train_targets @ train_targets.t() > 0).float()
    Y = train_targets.t()

    # multi-label process
    Multi_Y = Y.sum(0).expand(Y.size())
    Multi_Y = 1. / Multi_Y
    Y = Multi_Y * Y

    Relax_center = torch.zeros(encode_length, num_classes)
    CenTer = Relax_center

    # Train the Model
    for epoch in range(num_epochs):
        cnn.cuda().train()
        adjust_learning_rate(optimizer, epoch)
        for i, (images, labels, index) in enumerate(train_loader):
            images = Variable(images.cuda())
            labels = Variable(labels.cuda().long())

            # Forward + Backward + Optimize
            optimizer.zero_grad()
            U_batch = cnn(images).t()

            # Prepare
            U[:, index] = U_batch.cpu().data
            batchY = Y[:, index]
            batchS = S[:, index]

            # B-step
            batchB = (mu * CenTer @ batchY + U_batch.cpu()).sign()

            # C-step: two methods - relax and discrete
            """
            First: relax method
            """
            """
            CenTer, Relax_center = Relaxcenter(Variable(batchY.cuda(), requires_grad=False), \
                                                   Variable(batchB.cuda(), requires_grad=False), \
                                                   Variable(Relax_center.cuda(), requires_grad=True), mu, vul, nta);
            """
            """
            Second: discrete method
            """
            CenTer = Discretecenter(Variable(batchY.cuda(), requires_grad=False), \
                                                   Variable(batchB.cuda(), requires_grad=False), \
                                                   Variable(CenTer.t().cuda(), requires_grad=True), mu, vul)

            # U-step+ Backward + Optimize
            loss = criterion(U_batch, Variable(U.cuda()),
                             Variable(batchS.cuda()), Variable(batchB.cuda()))

            loss.backward()
            optimizer.step()

        # Test the Model
        if (epoch + 1) % 10 == 0:
            cnn.eval()
            retrievalB, retrievalL, queryB, queryL = compress(
                database_loader, test_loader, cnn, classes=num_classes)

            print(np.shape(retrievalB))
            print(np.shape(retrievalL))
            print(np.shape(queryB))
            print(np.shape(queryL))

            print('-----calculate top 5000 map-------')
            result = calculate_top_map(qB=queryB,
                                       rB=retrievalB,
                                       queryL=queryL,
                                       retrievalL=retrievalL,
                                       topk=5000)
            print(result)

            if result > best_top:
                best_top = result
                mAP_all_top[code_index, 0] = result
                print('-------------Best mAP for all bits-------------')
                print(mAP_all_top)
def main():

    img_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda tensor:min_max_normalization(tensor, 0, 1)),
        transforms.Lambda(lambda tensor:tensor_round(tensor))
    ])
    

    dataset = MNIST('./data', train=True, transform=img_transform, download=True)
    train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    testset = MNIST('./data', train=False, transform=img_transform, download=True)
    testloader = DataLoader(testset, batch_size=batch_size, shuffle=True)
    
    # visualize the distributions of the continuous feature U over 5,000 images
    visuadata =  MNIST('./data', train=False, transform=img_transform, download=True)
    X = dataset.data
    L = np.array(dataset.targets)
    
    first = True
    
    for label in range(10):
        index = np.where(L == label)[0]
    
        N = index.shape[0]
        np.random.seed(0)
        perm = np.random.permutation(N)
        index = index[perm]
    
        data = X[index[0:500]]
        labels = L[index[0:500]]
        if first:
            visualization_L = labels
            visualization_data = data
        else:
            visualization_L = np.concatenate((visualization_L, labels))
            visualization_data = torch.cat((visualization_data, data))
    
    
        first = False
    
        visuadata.data = visualization_data
        visuadata.targets = visualization_L
    
    # Data Loader
    visualization_loader = DataLoader(dataset=visuadata,
                                            batch_size=batch_size,
                                            shuffle=False,
                                            num_workers = 0)      
        
    
    
    model = autoencoder(encode_length=encode_length)
    criterion = nn.BCELoss()
    optimizer = torch.optim.Adam(
        model.parameters(), lr=learning_rate, weight_decay=1e-5)

    
    for epoch in range(num_epochs):
        print('--------training epoch {}--------'.format(epoch))        
        adjust_learning_rate(optimizer, epoch)    
        
        # train the model using SGD        
        for i, (img, _) in enumerate(train_loader):   
            img = img.view(img.size(0), -1)
            img = Variable(img)
  
            # ===================forward=====================
            output, h, b = model(img)
            loss_BCE = criterion(output, img)
            onesvec  =  Variable(torch.ones(h.size(0), 1))  
            Tcode  = torch.transpose(b, 1, 0)
            loss_reg = torch.mean(torch.pow(Tcode.mm(onesvec)/h.size(0), 2))/2
            loss = loss_BCE + Alpha*loss_reg
            # ===================backward====================
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
        # Test the Model using testset            
        if (epoch + 1) % 1== 0:       


            '''
            Calculate the mAP over test set            
            '''             

            retrievalB, retrievalL, queryB, queryL = compress(train_loader, testloader, model)            
            result_map = calculate_map(qB=queryB, rB=retrievalB, queryL=queryL, retrievalL=retrievalL)
            print('---{}_mAP: {}---'.format(name, result_map))  
            
          
            
            '''
            visulization of latent variable over 5,000 images
            In this setting, we set encode_length = 3            
            '''
            if encode_length ==3:
                z_buf = list([])
                label_buf = list([])
                for ii, (img, labelb) in enumerate(visualization_loader):
                    img = img.view(img.size(0), -1)
                    img = Variable(img)
                    # ===================forward=====================
                    _, qz, _ = model(img)        
                    z_buf.extend(qz.cpu().data.numpy())
                    label_buf.append(labelb)
                X = np.vstack(z_buf)
                Y = np.hstack(label_buf)
                plot_latent_variable3d(X, Y, epoch, name)   
Esempio n. 3
0
        outputs, _, _ = cnn(images)
        _, predicted = torch.max(outputs.cpu().data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum()

    print('Test Accuracy of the model: %.2f %%' % (100.0 * correct / total))

    if 1.0 * correct / total > best:
        best = 1.0 * correct / total
        torch.save(cnn.state_dict(), 'temp.pkl')
        
    print('best: %.2f %%' % (best * 100.0))


# Save the Trained Model
torch.save(cnn.state_dict(), 'cifar2.pkl')


# Calculate MAP
#cnn.load_state_dict(torch.load('temp.pkl'))
cnn.eval()
retrievalB, retrievalL, queryB, queryL = compress(train_loader, test_loader, cnn)
print(np.shape(retrievalB))
print(np.shape(retrievalL))
print(np.shape(queryB))
print(np.shape(queryL))

print('---calculate map---')
result = calculate_map(qB=queryB, rB=retrievalB, queryL=queryL, retrievalL=retrievalL)
print(result)
Esempio n. 4
0
    print('Test Accuracy of the model: %.2f %%' % (100.0 * correct / total))

    if 1.0 * correct / total > best:
        best = 1.0 * correct / total
        torch.save(cnn.state_dict(), 'temp.pkl')

    print('best: %.2f %%' % (best * 100.0))

# Save the Trained Model
torch.save(cnn.state_dict(), 'imagenet.pkl')

# Calculate MAP
#cnn.load_state_dict(torch.load('temp.pkl'))
cnn.eval()
retrievalB, retrievalL, queryB, queryL = compress(database_loader,
                                                  test_loader,
                                                  cnn,
                                                  classes=num_classes)
print(np.shape(retrievalB))
print(np.shape(retrievalL))
print(np.shape(queryB))
print(np.shape(queryL))
"""
print('---calculate map---')
result = calculate_map(qB=queryB, rB=retrievalB, queryL=queryL, retrievalL=retrievalL)
print(result)
"""
print('---calculate top map---')
result = calculate_top_map(qB=queryB,
                           rB=retrievalB,
                           queryL=queryL,
                           retrievalL=retrievalL,
def main():

    resnet_in = generate_model(opt)
    resnet_in.module.fc = Identity()
    model = ReNet34(resnet_in, encode_length=encode_length)

    if opt.no_mean_norm and not opt.std_norm:
        norm_method = Normalize([0, 0, 0], [1, 1, 1])
    elif not opt.std_norm:
        norm_method = Normalize(opt.mean, [1, 1, 1])
    else:
        norm_method = Normalize(opt.mean, opt.std)

    if not opt.no_train:
        assert opt.train_crop in ['random', 'corner', 'center']
        if opt.train_crop == 'random':
            crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size)
        elif opt.train_crop == 'corner':
            crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
        elif opt.train_crop == 'center':
            crop_method = MultiScaleCornerCrop(opt.scales,
                                               opt.sample_size,
                                               crop_positions=['c'])

        ## train loader
        spatial_transform = Compose([
            crop_method,
            RandomHorizontalFlip(),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = TemporalRandomCrop(opt.sample_duration)
        target_transform = ClassLabel()
        training_data = get_training_set(opt, spatial_transform,
                                         temporal_transform, target_transform)
        train_loader = torch.utils.data.DataLoader(training_data,
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.n_threads,
                                                   pin_memory=True)

        ## test loader
        spatial_transform = Compose([
            Scale(int(opt.sample_size / opt.scale_in_test)),
            CornerCrop(opt.sample_size, opt.crop_position_in_test),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = LoopPadding(opt.sample_duration)

        target_transform = ClassLabel()
        test_data = get_test_set(opt, spatial_transform, temporal_transform,
                                 target_transform)
        test_loader = torch.utils.data.DataLoader(test_data,
                                                  batch_size=opt.batch_size,
                                                  shuffle=False,
                                                  num_workers=opt.n_threads,
                                                  pin_memory=True)

        ## Database loader
        spatial_transform = Compose([
            Scale(int(opt.sample_size / opt.scale_in_test)),
            CornerCrop(opt.sample_size, opt.crop_position_in_test),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = LoopPadding(opt.sample_duration)
        target_transform = ClassLabel()
        validation_data = get_validation_set(opt, spatial_transform,
                                             temporal_transform,
                                             target_transform)
        database_loader = torch.utils.data.DataLoader(
            validation_data,
            batch_size=opt.batch_size,
            shuffle=False,
            num_workers=opt.n_threads,
            pin_memory=True)

        if opt.nesterov:
            dampening = 0
        else:
            dampening = opt.dampening

        optimizer = optim.SGD(model.parameters(),
                              lr=opt.learning_rate,
                              momentum=opt.momentum,
                              dampening=dampening,
                              weight_decay=opt.weight_decay,
                              nesterov=opt.nesterov)
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                                   'min',
                                                   patience=opt.lr_patience)

    if opt.resume_path:
        print('loading checkpoint {}'.format(opt.resume_path))
        checkpoint = torch.load(opt.resume_path)
        assert opt.arch == checkpoint['arch']

        opt.begin_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        if not opt.no_train:
            optimizer.load_state_dict(checkpoint['optimizer'])
            for state in optimizer.state.values():
                for k, v in state.items():
                    if torch.is_tensor(v):
                        state[k] = v.cuda()

    print('run')
    for epoch in range(opt.begin_epoch, opt.n_epochs + 1):
        model.cuda().train()
        for i, (images, labels) in enumerate(train_loader):

            images = Variable(images.cuda())
            labels = Variable(labels.cuda().long())

            # Forward + Backward + Optimize
            optimizer.zero_grad()
            x, _, b = model(images)

            target_b = F.cosine_similarity(b[:int(labels.size(0) / 2)],
                                           b[int(labels.size(0) / 2):])
            target_x = F.cosine_similarity(x[:int(labels.size(0) / 2)],
                                           x[int(labels.size(0) / 2):])
            loss = F.mse_loss(target_b, target_x)
            loss.backward()
            optimizer.step()
            scheduler.step()

        # Test the Model
        if (epoch + 1) % 10 == 0:
            model.eval()
            retrievalB, retrievalL, queryB, queryL = compress(
                database_loader, test_loader, model)
            result_map = calculate_top_map(qB=queryB,
                                           rB=retrievalB,
                                           queryL=queryL,
                                           retrievalL=retrievalL,
                                           topk=100)
            print('--------mAP@100: {}--------'.format(result_map))