def get_siamese_model(nclasses):
    device = torch.device("cuda")
    model = SiameseNetwork()
    # model.classifier.classifier = nn.Sequential()
    checkpoint = torch.load('./model/siamese/net_59.pth')
    model.load_state_dict(checkpoint['model_state_dict'])
    model.to(device)
    model.eval()
    return model
示例#2
0
def train(args):
    # basic arguments.
    ngpu = args.ngpu
    margin = args.margin
    num_epochs = args.num_epochs
    train_batch_size = args.train_batch_size
    test_batch_size = args.test_batch_size
    gamma = args.gamma # for learning rate decay

    root_dir = args.root_dir
    image_txt = args.image_txt
    train_test_split_txt = args.train_test_split_txt
    label_txt = args.label_txt
    ckpt_dir = args.ckpt_dir
    eval_step = args.eval_step


    pretrained = args.pretrained
    aux_logits = args.aux_logits
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    kargs = {'ngpu': ngpu, 'pretrained': pretrained, 'aux_logits':aux_logits}

    # network and loss
    siamese_network = SiameseNetwork(**kargs)
    gpu_number = torch.cuda.device_count()
    if device.type == 'cuda' and gpu_number > 1:
        siamese_network = nn.DataParallel(siamese_network, list(range(torch.cuda.device_count())))
    siamese_network.to(device)
    contrastive_loss = ContrastiveLoss(margin=margin)

    # params = siamese_network.parameters()
    # optimizer = optim.Adam(params, lr=0.0005)
    # optimizer = optim.SGD(params, lr=0.01, momentum=0.9)

    # using different lr
    optimizer = optim.SGD([
                       {'params': siamese_network.module.inception_v3.parameters() if gpu_number > 1 else siamese_network.inception_v3.parameters()},
                       {'params': siamese_network.module.main.parameters() if gpu_number > 1 else siamese_network.main.parameters(), 'lr': 1e-2}
                      ], lr=0.00001, momentum=0.9)

    scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma, last_epoch=-1)


    transform = transforms.Compose([transforms.Resize((299, 299)),
                                    transforms.CenterCrop(299),
                                    transforms.ToTensor(),
                                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
                                  )
    cub_dataset = CubDataset(root_dir, image_txt, train_test_split_txt, label_txt, transform=transform, is_train=True, offset=1)
    dataloader = DataLoader(dataset=cub_dataset, batch_size=train_batch_size, shuffle=True, num_workers=4)

    cub_dataset_eval = CubDataset(root_dir, image_txt, train_test_split_txt, label_txt, transform=transform, is_train=False, offset=1)
    dataloader_eval = DataLoader(dataset=cub_dataset_eval, batch_size=test_batch_size, shuffle=False, num_workers=4)

    for epoch in range(num_epochs):
        if epoch == 0:
            feature_set, label_set = get_feature_and_label(siamese_network, dataloader_eval, device)
            evaluation(feature_set, label_set)
        siamese_network.train()
        for i, data in enumerate(dataloader, 0):
            img_1, img_2, sim_label = data['img_1'].to(device), data['img_2'].to(device), data['sim_label'].type(torch.FloatTensor).to(device)
            optimizer.zero_grad()
            output_1, output_2 = siamese_network(img_1, img_2)
            loss = contrastive_loss(output_1, output_2, sim_label)
            loss.backward()
            optimizer.step()

            if i % 20 == 0 and i > 0:
                print("{}, Epoch [{:3d}/{:3d}], Iter [{:3d}/{:3d}], Current loss: {}".format(
                      datetime.datetime.now(), epoch, num_epochs, i, len(dataloader), loss.item()))
        if epoch % eval_step == 0:
            print("Start evalution")
            feature_set, label_set = get_feature_and_label(siamese_network, dataloader_eval, device)
            evaluation(feature_set, label_set)
            torch.save(siamese_network.module.state_dict(), os.path.join(ckpt_dir, 'model_' + str(epoch) +'_.pth'))
示例#3
0
def main():
    device = torch.device('cuda:9' if torch.cuda.is_available else 'cpu')
    train_dataset_dir = tdatasets.ImageFolder('images/train')
    train_dataset = SiameseNetworkDataset(imageFolderDataset = train_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))
    vis_dataloader = DataLoader(train_dataset,
                        shuffle=False,
                        num_workers=0,
                        batch_size=1)

    # dataiter = iter(vis_dataloader)
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0],example_batch[1]),0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    #
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0], example_batch[1]), 0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    net = SiameseNetwork()
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(),lr = 0.0005 )
    loss_vals = []
    net.to(device)
    '''
    Training Starts
    '''
    print('Training started')
    for epoch in range(1000):
       loss_epoch = 0
       for i, data in enumerate(vis_dataloader,0):
           img_0, img_1, label = data
           img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
           optimizer.zero_grad()
           out_0, out_1 = net(img_0, img_1)
           loss = criterion(out_0, out_1, label)
           loss_epoch += loss.item()
           loss.backward()
           optimizer.step()
       loss_vals.append(loss_epoch)
       print('Epoch',str(epoch+1), str(loss_epoch))
    print('Training completed')
    plt.plot(loss_vals)
    plt.savefig('loss_siamese.png')
    
    

    # ****************************** Training ends ***************************************


    '''
    Testing starts
    '''
    

    test_dataset_dir = tdatasets.ImageFolder('images/test')
    net.load_state_dict(torch.load('siamese.pt'))
    test_dataset = SiameseNetworkDataset(imageFolderDataset = test_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))

    test_dataloader = DataLoader(test_dataset,
                        shuffle=True,
                        num_workers=2,
                        batch_size=1)
    print('Testing starts')
    correct = 0
    total = 0
    test_img_sub = None
    for i, data in enumerate(test_dataloader, 0):
        img_0, img_1, label = data
        if test_img_sub is None:
            test_img_sub = img_0
        #concat = torch.cat((test_img_sub, img_1), 0)
        concat = torch.cat((img_0, img_1), 0)
        test_img_sub, img_1, label = test_img_sub.to(device), img_1.to(device), label.to(device)
        img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
        out_0, out_1 = net(img_0, img_1)
        dist = F.pairwise_distance(out_0, out_1)
        if dist <= 0.5 and label == 0:
            correct = correct + 1
        elif label == 1:
            correct = correct + 1
        else:
            pass
        total = total + 1
        imshow(torchvision.utils.make_grid(concat),i,'Dissimilarity: {:.2f}'.format(dist.item()), True)
        test_img_sub = test_img_sub.cpu()
#        dist = dist.cpu().detach()
#        print(dist.numpy())
#        dist = torch.sigmoid(dist)
#        print(dist.numpy())
    print(correct/total)


    print('Testing complete')

    torch.save(net.state_dict(), 'siamese_blog.pt')
示例#4
0
ngpu = 2
margin = 1.
num_epochs = 1000
train_batch_size = 64
test_batch_size = 32
gamma = 0.98  # for learning rate decay
pretrained = False
aux_logits = False
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
kargs = {'ngpu': ngpu, 'pretrained': pretrained, 'aux_logits': aux_logits}
siamese_network = SiameseNetwork(**kargs)
if device.type == 'cuda' and torch.cuda.device_count() > 1:
    siamese_network = nn.DataParallel(siamese_network,
                                      list(range(torch.cuda.device_count())))
siamese_network.to(device)
contrastive_loss = ContrastiveLoss(margin=margin)

# params = siamese_network.parameters()

# optimizer = optim.Adam(params, lr=0.0005)
# optimizer = optim.SGD(params, lr=0.01, momentum=0.9)

# using different lr
optimizer = optim.SGD(
    [{
        'params': siamese_network.module.inception_v3.parameters()
    }, {
        'params': siamese_network.module.main.parameters(),
        'lr': 1e-3
    }],
示例#5
0
文件: main.py 项目: gxdai/FCS_pytorch
def train(args):
    # basic arguments.
    ngpu = args.ngpu
    margin = args.margin
    manual_seed = args.manual_seed
    torch.manual_seed(manual_seed)
    mean_value = args.mean_value
    std_value = args.std_value
    print("margin = {:5.2f}".format(margin))
    print("manual_seed = {:5.2f}".format(manual_seed))
    print("mean_value = {:5.2f}".format(mean_value))
    print("std_value = {:5.2f}".format(std_value))
    num_epochs = args.num_epochs
    train_batch_size = args.train_batch_size
    test_batch_size = args.test_batch_size
    gamma = args.gamma # for learning rate decay
    learning_rate = args.learning_rate
    learning_rate2 = args.learning_rate2


    loss_type = args.loss_type
    dataset_name = args.dataset_name
    pair_type = args.pair_type
    mode = args.mode
    weight_file = args.weight_file
    print("pair_type = {}".format(pair_type))
    print("loss_type = {}".format(loss_type))
    print("mode = {}".format(mode))
    print("weight_file = {}".format(weight_file))

    root_dir = args.root_dir
    image_txt = args.image_txt
    train_test_split_txt = args.train_test_split_txt
    label_txt = args.label_txt
    ckpt_dir = args.ckpt_dir
    eval_step = args.eval_step
    display_step = args.display_step
    embedding_size = args.embedding_size


    pretrained = args.pretrained
    aux_logits = args.aux_logits
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    kargs = {'ngpu': ngpu, 'pretrained': pretrained, 'aux_logits':aux_logits, 'embedding_size': embedding_size}

    # create directory
    model_dir = os.path.join(ckpt_dir, dataset_name, loss_type, str(int(embedding_size)))
    print("model_dir = {}".format(model_dir))
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)
    # network and loss
    siamese_network = SiameseNetwork(**kargs)


    first_group, second_group = siamese_network.separate_parameter_group()

    param_lr_dict = [
               {'params': first_group, 'lr': learning_rate2},
               {'params': second_group, 'lr': learning_rate}
              ]

    gpu_number = torch.cuda.device_count()
    if device.type == 'cuda' and gpu_number > 1:
        siamese_network = nn.DataParallel(siamese_network, list(range(torch.cuda.device_count())))
    siamese_network.to(device)

    # contrastive_loss = ContrastiveLoss(margin=margin)

    # params = siamese_network.parameters()

    print("args.optimizer = {:10s}".format(args.optimizer))
    print("learning_rate = {:5.5f}".format(learning_rate))
    print("learning_rate2 = {:5.5f}".format(learning_rate2))
    optimizer = configure_optimizer(param_lr_dict, optimizer=args.optimizer)

    # using different lr
    # scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma, last_epoch=-1)


    transform = transforms.Compose([transforms.Resize((299, 299)),
                                    transforms.CenterCrop(299),
                                    transforms.ToTensor(),
                                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
                                  )

    if dataset_name == 'cub200':
        """
        print("dataset_name = {:10s}".format(dataset_name))
        print(root_dir)
        print(image_txt)
        print(train_test_split_txt)
        print(label_txt)
        """
        dataset_train = CubDataset(root_dir, image_txt, train_test_split_txt, label_txt, transform=transform, is_train=True, offset=1)
        dataset_eval = CubDataset(root_dir, image_txt, train_test_split_txt, label_txt, transform=transform, is_train=False, offset=1)
    elif dataset_name == 'online_product':
        """
        print("dataset_name = {:10s}".format(dataset_name))
        """
        dataset_train = OnlineProductDataset(root_dir, train_txt=image_txt, test_txt=train_test_split_txt, transform=transform, is_train=True, offset=1)
        dataset_eval = OnlineProductDataset(root_dir, train_txt=image_txt, test_txt=train_test_split_txt, transform=transform, is_train=False, offset=1)
    elif dataset_name == "car196":
        print("dataset_name = {}".format(dataset_name))
        dataset_train = CarDataset(root_dir, image_info_mat=image_txt, transform=transform, is_train=True, offset=1)
        dataset_eval = CarDataset(root_dir, image_info_mat=image_txt, transform=transform, is_train=False, offset=1)


    dataloader = DataLoader(dataset=dataset_train, batch_size=train_batch_size, shuffle=False, num_workers=4)
    dataloader_eval = DataLoader(dataset=dataset_eval, batch_size=test_batch_size, shuffle=False, num_workers=4)

    log_for_loss = []

    if mode == 'evaluation':
        print("Do one time evluation and exit")
        print("Load pretrained model")
        siamese_network.module.load_state_dict(torch.load(weight_file))
        print("Finish loading")
        print("Calculting features")
        feature_set, label_set, path_set = get_feature_and_label(siamese_network, dataloader_eval, device)
        rec_pre = evaluation(feature_set, label_set)
        # np.save("car196_rec_pre_ftl.npy", rec_pre)
        # for visualization
        sum_dict = {'feature': feature_set, 'label': label_set, 'path': path_set}
        np.save('car196_fea_label_path.npy', sum_dict)
        sys.exit()
    print("Finish eval")

    for epoch in range(num_epochs):
        if epoch == 0:
            feature_set, label_set, _ = get_feature_and_label(siamese_network, dataloader_eval, device)
            # distance_type: Euclidean or cosine
            rec_pre = evaluation(feature_set, label_set, distance_type='cosine')
        siamese_network.train()
        for i, data in enumerate(dataloader, 0):
            # img_1, img_2, sim_label = data['img_1'].to(device), data['img_2'].to(device), data['sim_label'].type(torch.FloatTensor).to(device)
            img_1, img_2, label_1, label_2 = data['img_1'].to(device), data['img_2'].to(device), data['label_1'].to(device), data['label_2'].to(device)
            optimizer.zero_grad()
            output_1, output_2 = siamese_network(img_1, img_2)
            pair_dist, pair_sim_label = calculate_distance_and_similariy_label(output_1, output_2, label_1, label_2, sqrt=True, pair_type=pair_type)
            if loss_type == "contrastive_loss":
                loss, positive_loss, negative_loss = contrastive_loss(pair_dist, pair_sim_label, margin)
            elif loss_type == "focal_contrastive_loss":
                loss, positive_loss, negative_loss = focal_contrastive_loss(pair_dist, pair_sim_label, margin, mean_value, std_value)
            elif loss_type == "triplet_loss":
                loss, positive_loss, negative_loss = triplet_loss(pair_dist, pair_sim_label, margin)
            elif loss_type == "focal_triplet_loss":
                loss, positive_loss, negative_loss = focal_triplet_loss(pair_dist, pair_sim_label, margin, mean_value, std_value)
            elif loss_type == "angular_loss":
                center_output = (output_1 + output_2)/2.
                pair_dist_2, _ = calculate_distance_and_similariy_label(center_output, output_2, label_1, label_2, sqrt=True, pair_type=pair_type)
                # angle margin is 45^o
                loss, positive_loss, negative_loss = angular_loss(pair_dist, pair_dist_2, pair_sim_label, 45)
            else:
                print("Unknown loss function")
                sys.exit()

            # try my own customized loss function
            # loss = contrastive_loss(output_1, output_2, pair_sim_label)
            loss.backward()
            optimizer.step()
            log_for_loss.append(loss.detach().item())
            if i % display_step == 0 and i > 0:
                print("{}, Epoch [{:3d}/{:3d}], Iter [{:3d}/{:3d}], Loss: {:6.5f}, Positive loss: {:6.5f}, Negative loss: {:6.5f}".format(
                      datetime.datetime.now(), epoch, num_epochs, i, len(dataloader), loss.item(), positive_loss.item(), negative_loss.item()))
        if epoch % eval_step == 0:
            print("Start evalution")
            # np.save(loss_type +'.npy', log_for_loss)
            feature_set, label_set, _ = get_feature_and_label(siamese_network, dataloader_eval, device)
            # distance_type: Euclidean or cosine
            rec_pre = evaluation(feature_set, label_set, distance_type='cosine')
            torch.save(siamese_network.module.state_dict(), os.path.join(model_dir, 'model_' + str(epoch) +'_.pth'))