def train_siamese_network(nclasses, fp16, transform, batch_size, num_epochs):
    since = time.time()
    net = SiameseNetwork().cuda()
    # net.classifier.classifier = nn.Sequential()

    print(net)
    print("Start time: ", since)

    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0005)

    if fp16:
        # model = network_to_half(model)
        # optimizer_ft = FP16_Optimizer(optimizer_ft, static_loss_scale = 128.0)
        print("Memory saving is on using fp16")
        net, optimizer = amp.initialize(net, optimizer, opt_level="O1")

    counter = []
    loss_history = []
    iteration_number = 0
    train_dataloader = get_dataloader(transform, batch_size)
    print("Started training siamese network")

    for epoch in range(0, num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        for i, data in enumerate(train_dataloader, 0):
            img0, img1, label = data
            img0, img1, label = img0.cuda(), img1.cuda(), label.cuda()

            optimizer.zero_grad()

            output1, output2 = net(img0, img1)

            loss_contrastive = criterion(output1, output2, label)
            # loss_contrastive.backward()
            # optimizer.step()
            if fp16:  # we use optimier to backward loss
                with amp.scale_loss(loss_contrastive,
                                    optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss_contrastive.backward()
            optimizer.step()

            if i % 10 == 0:
                iteration_number += 10
                counter.append(iteration_number)
                loss_history.append(loss_contrastive.item())

        time_elapsed = time.time() - since
        print('Training complete in {:.0f}m {:.0f}s'.format(
            time_elapsed // 60, time_elapsed % 60))
        print("Epoch number {} finished, Current loss {}\n".format(
            epoch, loss_contrastive.item()))

        if epoch % 10 == 9:
            save_model(epoch, net, loss_contrastive, optimizer)
    show_plot(counter, loss_history)
Beispiel #2
0
def main():
    device = torch.device('cuda:9' if torch.cuda.is_available else 'cpu')
    train_dataset_dir = tdatasets.ImageFolder('images/all')
    train_dataset = SiameseNetworkDataset(imageFolderDataset = train_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))
    vis_dataloader = DataLoader(train_dataset,
                        shuffle=False,
                        num_workers=0,
                        batch_size=10)

    # dataiter = iter(vis_dataloader)
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0],example_batch[1]),0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    #
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0], example_batch[1]), 0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    net = SiameseNetwork()
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(),lr = 0.0005 )
    loss_vals = []
    '''
    Training Starts
    '''
    print('Training started')
    for epoch in range(10):
       loss_epoch = 0
       for i, data in enumerate(vis_dataloader,0):
           img_0, img_1, label = data
           print(i, label)
           # img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
           optimizer.zero_grad()
           out_0, out_1 = net(img_0, img_1)
           loss = criterion(out_0, out_1, label)
           loss_epoch += loss.item()
           loss.backward()
           optimizer.step()
       loss_vals.append(loss_epoch)
       print('Epoch',str(epoch+1), str(loss_epoch))
       print('Epoch done')
       torch.save(net.state_dict(), 'siamese.pt')
    print('Training completed')
    plt.plot(loss_vals)
    plt.savefig('loss_siamese.png')
   

    # ****************************** Training ends ***************************************


    '''
  transform=transforms.Compose(
    [
      transforms.Resize((100,100)),
      transforms.ToTensor()
    ]
  ),
  should_invert=False
)
train_dataloader = DataLoader(siamese_dataset,
                        shuffle=True,
                        num_workers=8,
                        batch_size=Config.train_batch_size)
net = SiameseNetwork().cuda()
print(net)
criterion = ContrastiveLoss()
optimizer = optim.Adam(net.parameters(),lr = 0.0005 )
counter = []
loss_history = [] 
iteration_number= 0
for epoch in range(0,Config.train_number_epochs):
    for i, data in enumerate(train_dataloader,0):
        img0, img1 , label = data
        img0, img1 , label = img0.cuda(), img1.cuda() , label.cuda()
        optimizer.zero_grad()
        output1,output2 = net(img0,img1)
        loss_contrastive = criterion(output1,output2,label)
        loss_contrastive.backward()
        optimizer.step()
        if i %10 == 0 :
            print("Epoch number {}\n Current loss {}\n".format(epoch,loss_contrastive.item()))
            iteration_number +=10
Beispiel #4
0
                self.margin = margin

            def forward(self, output1, output2, label):
                euclidean_distance = F.pairwise_distance(output1,
                                                         output2,
                                                         keepdim=True)
                loss_contrastive = torch.mean(
                    (1 - label) * torch.pow(euclidean_distance, 2) +
                    (label) * torch.pow(
                        torch.clamp(self.margin -
                                    euclidean_distance, min=0.0), 2))

                return loss_contrastive

        # set optimizer
        optimizer = optim.Adam(model.parameters(), lr=0.0005)
        #scheduler = StepLR(optimizer, step_size=40, gamma=0.1)
        #scheduler upgrade - DBY
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=num_epochs, eta_min=0.)
        criterion = ContrastiveLoss()

        # Declare Optimizer

        # get data loader
        train_dataloader, _ = data_loader(root=DATASET_PATH,
                                          phase='train',
                                          batch_size=batch)
        validate_dataloader, validate_label_file = data_loader(
            root=DATASET_PATH, phase='validate', batch_size=1)
        time_ = datetime.datetime.now()
import torchvision
from torch import optim
from torch.utils.data import DataLoader
from torchvision import transforms

import config
from model import SiameseNetwork, ContrastiveLoss, train
from utils import SiameseNetworkDataset

opt = config.args()
folder_dataset = torchvision.datasets.ImageFolder(root=opt.training_dir)

# 定义图像dataset
transform = transforms.Compose([transforms.Resize((100, 100)),  # 有坑,传入int和tuple有区别
                                transforms.ToTensor()])
siamese_dataset = SiameseNetworkDataset(imageFolderDataset=folder_dataset,
                                        transform=transform,
                                        should_invert=False)

# 定义图像dataloader
train_dataloader = DataLoader(siamese_dataset,
                              shuffle=True,
                              batch_size=opt.batch_size)

net = SiameseNetwork().cuda() #定义模型且移至GPU
print(net)
criterion = ContrastiveLoss(margin=2.0) #定义损失函数
optimizer = optim.Adam(net.parameters(), lr=opt.lr) #定义优化器

train(net, optimizer, criterion, train_dataloader, opt)
Beispiel #6
0
        should_invert=False)
    """ Train Dataset """
    train_dataloader = DataLoader(siamese_dataset,
                                  shuffle=True,
                                  num_workers=0,
                                  batch_size=config.train_batch_size)

    if os.path.exists(args.savefile):
        print("Loading Existing Model")
        net = torch.load(args.savefile)
    else:
        print("Creating New Model")
        net = SiameseNetwork().cuda()

    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(), lr=args.learnrate)

    counter = []
    loss_history = []
    iteration_number = 0

    total_step = len(train_dataloader)

    for epoch in range(0, config.train_number_epochs):
        for i, data in enumerate(train_dataloader, 0):
            img0, img1, label = data
            img0, img1, label = img0.cuda(), img1.cuda(), label.cuda()
            optimizer.zero_grad()
            output1, output2 = net(img0, img1)
            loss_contrastive = criterion(output1, output2, label)
            loss_contrastive.backward()
Beispiel #7
0
def main():
    device = torch.device('cuda:9' if torch.cuda.is_available else 'cpu')
    train_dataset_dir = tdatasets.ImageFolder('images/train')
    train_dataset = SiameseNetworkDataset(imageFolderDataset = train_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))
    vis_dataloader = DataLoader(train_dataset,
                        shuffle=False,
                        num_workers=0,
                        batch_size=1)

    # dataiter = iter(vis_dataloader)
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0],example_batch[1]),0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    #
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0], example_batch[1]), 0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    net = SiameseNetwork()
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(),lr = 0.0005 )
    loss_vals = []
    net.to(device)
    '''
    Training Starts
    '''
    print('Training started')
    for epoch in range(1000):
       loss_epoch = 0
       for i, data in enumerate(vis_dataloader,0):
           img_0, img_1, label = data
           img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
           optimizer.zero_grad()
           out_0, out_1 = net(img_0, img_1)
           loss = criterion(out_0, out_1, label)
           loss_epoch += loss.item()
           loss.backward()
           optimizer.step()
       loss_vals.append(loss_epoch)
       print('Epoch',str(epoch+1), str(loss_epoch))
    print('Training completed')
    plt.plot(loss_vals)
    plt.savefig('loss_siamese.png')
    
    

    # ****************************** Training ends ***************************************


    '''
    Testing starts
    '''
    

    test_dataset_dir = tdatasets.ImageFolder('images/test')
    net.load_state_dict(torch.load('siamese.pt'))
    test_dataset = SiameseNetworkDataset(imageFolderDataset = test_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))

    test_dataloader = DataLoader(test_dataset,
                        shuffle=True,
                        num_workers=2,
                        batch_size=1)
    print('Testing starts')
    correct = 0
    total = 0
    test_img_sub = None
    for i, data in enumerate(test_dataloader, 0):
        img_0, img_1, label = data
        if test_img_sub is None:
            test_img_sub = img_0
        #concat = torch.cat((test_img_sub, img_1), 0)
        concat = torch.cat((img_0, img_1), 0)
        test_img_sub, img_1, label = test_img_sub.to(device), img_1.to(device), label.to(device)
        img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
        out_0, out_1 = net(img_0, img_1)
        dist = F.pairwise_distance(out_0, out_1)
        if dist <= 0.5 and label == 0:
            correct = correct + 1
        elif label == 1:
            correct = correct + 1
        else:
            pass
        total = total + 1
        imshow(torchvision.utils.make_grid(concat),i,'Dissimilarity: {:.2f}'.format(dist.item()), True)
        test_img_sub = test_img_sub.cpu()
#        dist = dist.cpu().detach()
#        print(dist.numpy())
#        dist = torch.sigmoid(dist)
#        print(dist.numpy())
    print(correct/total)


    print('Testing complete')

    torch.save(net.state_dict(), 'siamese_blog.pt')
Beispiel #8
0
    # get data loader
    # epoch 부분에서 설정 -> 경우의 수 해당 상황에서 sampling
    # can change the number of cpu core (bottle neck)
    tmp_train_dataloader, _ = data_loader(root=DATASET_PATH,
                                          phase='train',
                                          batch_size=batch)
    # val 총 10,000개라 batch:16 -> 예외없이 다 이용
    validate_dataloader, validate_label_file = data_loader(root=DATASET_PATH,
                                                           phase='validate',
                                                           batch_size=16)

    num_batches = len(tmp_train_dataloader)

    # set optimizer
    optimizer = optim.Adam(model.parameters(), lr=base_lr, eps=1e-8)

    # set scheduler
    # StepLR 경우
    # scheduler = StepLR(optimizer, step_size=5, gamma=0.5)
    # warmup + ReduceLROnPlateau
    t_total = len(tmp_train_dataloader) * num_epochs
    warmup_step = int(0.01 * t_total)
    # decay lr, related to a validation
    scheduler_cosine = CosineAnnealingLR(optimizer, t_total)
    scheduler = GradualWarmupScheduler(optimizer,
                                       1,
                                       warmup_step,
                                       after_scheduler=scheduler_cosine)

    criterion = ContrastiveLoss()
Beispiel #9
0
batch_size = 32
num_epochs = 50
learning_rate = 0.003
print_freq = 500
model_time = strftime('%H:%M:%S', gmtime())
#num-perspective = 20?
#dropout=0.1

#  Dataset
print('loading Quora data...')
data = Quora(batch_size, input_size)
word_vocab_size = len(data.TEXT.vocab)
siamese = SiameseNetwork(input_size, word_vocab_size, hidden_size, num_layers,
                         data)

parameters = filter(lambda p: p.requires_grad, siamese.parameters())

# Loss and Optimizer
optimizer = torch.optim.Adam(parameters, lr=learning_rate)
criterion = nn.CrossEntropyLoss()

writer = SummaryWriter(log_dir='runs/' + model_time)

siamese.train()
loss, last_epoch = 0, -1
max_dev_acc, max_test_acc = 0, 0

best_model = copy.deepcopy(siamese)

# Train the Model
print('training start!')
Beispiel #10
0
def main():
    device = torch.device('cuda:9' if torch.cuda.is_available else 'cpu')
    train_dataset_dir = tdatasets.ImageFolder('images/all')
    train_dataset = SiameseNetworkDataset(imageFolderDataset=train_dataset_dir,
                                          transform=transforms.Compose([
                                              transforms.Resize((100, 100)),
                                              transforms.ToTensor()
                                          ]))
    vis_dataloader = DataLoader(train_dataset,
                                shuffle=False,
                                num_workers=0,
                                batch_size=1)

    # dataiter = iter(vis_dataloader)
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0],example_batch[1]),0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    #
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0], example_batch[1]), 0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    net = SiameseNetwork()
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0005)
    loss_vals = []
    '''
    Training Starts
    '''
    print('Training started')
    # for epoch in range(100):
    #    loss_epoch = 0
    #    for i, data in enumerate(vis_dataloader,0):
    #        img_0, img_1, label = data
    #        # img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
    #        optimizer.zero_grad()
    #        out_0, out_1 = net(img_0, img_1)
    #        loss = criterion(out_0, out_1, label)
    #        loss_epoch += loss.item()
    #        loss.backward()
    #        optimizer.step()
    #    loss_vals.append(loss_epoch)
    #    print('Epoch',str(epoch+1), str(loss_epoch))
    # print('Training completed')
    # plt.plot(loss_vals)
    # plt.savefig('loss_siamese.png')
    #
    # torch.save(net.state_dict(), 'siamese.pt')

    # ****************************** Training ends ***************************************
    '''
    Testing starts
    '''
    net.load_state_dict(torch.load('siamese.pt'))
    test_dataset = SiameseTestDataset(train_dataset_dir, \
                                 transform=transforms.Compose([transforms.Resize((100, 100)), transforms.ToTensor()]))
    test_vis_dataloader = DataLoader(test_dataset,
                                     shuffle=False,
                                     num_workers=0,
                                     batch_size=1)

    train_dataset_dir = tdatasets.ImageFolder('images/all')
    train_dataset = FacesDataset(train_dataset_dir, \
                                 transform=transforms.Compose([transforms.Resize((100, 100)), transforms.ToTensor()]))

    _, test = split_train_val(train_dataset)
    test_dataloader = DataLoader(test,
                                 shuffle=False,
                                 num_workers=0,
                                 batch_size=1)
    correct = 0
    total = 0
    for i, data in enumerate(test_dataloader, 0):
        total += 1
        img_1, labels = data
        min_dist = float("inf")
        pred = -1
        print('Testing begins', i)
        for j, data_test_vis in enumerate(test_vis_dataloader, 0):
            img_0 = data_test_vis
            out_0, out_1 = net(img_0, img_1)
            dist = F.pairwise_distance(out_0, out_1)
            if min_dist > dist:
                min_dist = dist
                pred = j
        if pred == labels.item():
            correct += 1
        print('Testing ends', i, pred)

    print('Accuracy: ', str(correct / total))