Example #1
0
class TensorPredictor:

  def __init__(self, checkpoint_path):

    self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    self.model = EmbeddingNet()
    self.model.load_state_dict(torch.load(checkpoint_path))

    self.model.to(self.device)
    self.model.eval()    

  def predict(self, image_list):
    
    tensor_list = []
    
    for image_tensor in image_list:
      #image_tensor = torch.index_select(image_tensor, 2, torch.tensor([2, 1, 0], device=self.device))  # BGR -> RBG ?
      image_tensor = F.interpolate(torch.unsqueeze(image_tensor,0), size=(224, 224))[0]
      tensor_list.append(image_tensor)
    
    input_tensor = torch.stack(tensor_list)

    with torch.no_grad():
      input_tensor = input_tensor.to(self.device)      
      embeddings = self.model(input_tensor)
      
    return embeddings
Example #2
0
class Predictor:

  def __init__(self, checkpoint_path):

    self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    self.model = EmbeddingNet()
    self.model.load_state_dict(torch.load(checkpoint_path))

    self.model.to(self.device)
    self.model.eval()
    
    self.transform = transforms.Compose([transforms.Lambda(lambda image: image.convert('RGB')),
                                transforms.Resize((224, 224)),
                                transforms.ToTensor()])

  def _preprocess(self, image):
    image = transforms.ToPILImage()(image)
    image = self.transform(image)
    
    return image

  
  def predict(self, image_list):
    
    image_tensor = torch.cat([self._preprocess(im).unsqueeze(0) for im in image_list], dim=0)

    with torch.no_grad():
      image_tensor = image_tensor.cuda()      
      embedings = self.model(image_tensor)
      
    return embedings.cpu().numpy()
Example #3
0
def main(args):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    p = args.labels_per_batch
    k = args.samples_per_label
    batch_size = p * k

    model = EmbeddingNet()
    if args.resume:
        model.load_state_dict(torch.load(args.resume))

    model.to(device)

    criterion = TripletMarginLoss(margin=args.margin)
    optimizer = Adam(model.parameters(), lr=args.lr)

    transform = transforms.Compose([
        transforms.Lambda(lambda image: image.convert("RGB")),
        transforms.Resize((224, 224)),
        transforms.PILToTensor(),
        transforms.ConvertImageDtype(torch.float),
    ])

    # Using FMNIST to demonstrate embedding learning using triplet loss. This dataset can
    # be replaced with any classification dataset.
    train_dataset = FashionMNIST(args.dataset_dir,
                                 train=True,
                                 transform=transform,
                                 download=True)
    test_dataset = FashionMNIST(args.dataset_dir,
                                train=False,
                                transform=transform,
                                download=True)

    # targets is a list where the i_th element corresponds to the label of i_th dataset element.
    # This is required for PKSampler to randomly sample from exactly p classes. You will need to
    # construct targets while building your dataset. Some datasets (such as ImageFolder) have a
    # targets attribute with the same format.
    targets = train_dataset.targets.tolist()

    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              sampler=PKSampler(targets, p, k),
                              num_workers=args.workers)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.eval_batch_size,
                             shuffle=False,
                             num_workers=args.workers)

    for epoch in range(1, args.epochs + 1):
        print("Training...")
        train_epoch(model, optimizer, criterion, train_loader, device, epoch,
                    args.print_freq)

        print("Evaluating...")
        evaluate(model, test_loader, device)

        print("Saving...")
        save(model, epoch, args.save_dir, "ckpt.pth")
Example #4
0
def create_embedder(embedding_model=''):
    embedder = EmbeddingNet()
    if embedding_model != '':
        embedder.load_state_dict(torch.load(embedding_model))

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        embedder = torch.nn.DataParallel(embedder)

    embedder.cuda()
    return embedder
Example #5
0
def main(args):
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    p = args.labels_per_batch
    k = args.samples_per_label
    batch_size = p * k

    model = EmbeddingNet(backbone=None, pretrained=args.pretrained)
    print('pretrained: ', args.pretrained)
    if args.resume:
        model.load_state_dict(torch.load(args.resume))

    model.to(device)

    criterion = TripletMarginLoss(margin=args.margin)
    optimizer = Adam(model.parameters(), lr=args.lr)

    train_transform = transforms.Compose([transforms.Lambda(lambda image: image.convert('RGB')),
                                    transforms.RandomHorizontalFlip(),
                                    transforms.Resize((224, 224)),
                                    transforms.ToTensor()])
    
    test_transform = transforms.Compose([transforms.Lambda(lambda image: image.convert('RGB')),
                                    transforms.Resize((224, 224)),
                                    transforms.ToTensor()])    

    # Using FMNIST to demonstrate embedding learning using triplet loss. This dataset can
    # be replaced with any classification dataset.
    train_dataset = ImageFolder(os.path.join(args.dataset_dir, 'train'), transform=train_transform)
    test_dataset = ImageFolder(os.path.join(args.dataset_dir, 'test'), transform=test_transform)

    # targets is a list where the i_th element corresponds to the label of i_th dataset element.
    # This is required for PKSampler to randomly sample from exactly p classes. You will need to
    # construct targets while building your dataset. Some datasets (such as ImageFolder) have a
    # targets attribute with the same format.
    targets = train_dataset.targets

    train_loader = DataLoader(train_dataset, batch_size=batch_size,
                              sampler=PKSampler(targets, p, k),
                              num_workers=args.workers)
    test_loader = DataLoader(test_dataset, batch_size=args.eval_batch_size,
                             shuffle=False,
                             num_workers=args.workers)

    for epoch in range(1, args.epochs + 1):
        print('Training...')
        train_epoch(model, optimizer, criterion, train_loader, device, epoch, args.print_freq)

        print('Evaluating...')
        evaluate(model, test_loader, device)

        print('Saving...')
        save(model, epoch, args.save_dir, 'ckpt.pth')
Example #6
0
def create_classifier(embedding_model='', model=''):
    embedder = EmbeddingNet()
    if embedding_model != '':
        embedder.load_state_dict(torch.load(embedding_model))
    classifier = FullNet(embedder)

    if model != '':
        classifier.load_state_dict(torch.load(model))

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        classifier = torch.nn.DataParallel(classifier)

    classifier.cuda()
    return classifier
Example #7
0
def create_lcc(embedding_model='', model=''):
    embedder = EmbeddingNet()
    if embedding_model != '':
        embedder.load_state_dict(torch.load(embedding_model))

    lcc = LCCNet(embedder)

    if model != '':
        lcc.load_state_dict(torch.load(model))

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        lcc = torch.nn.DataParallel(lcc)

    lcc.cuda()
    return lcc
Example #8
0
def create_distance_model(embedding_model='', model=''):
    embedder = EmbeddingNet()
    if embedding_model != '':
        embedder.load_state_dict(torch.load(embedding_model))

    distanceModel = DistanceNet(embedder)

    if model != '':
        distanceModel.load_state_dict(torch.load(model))

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        distanceModel = torch.nn.DataParallel(distanceModel)

    distanceModel.cuda()
    return distanceModel
def main():

    # 4. dataset
    mean, std = 0.1307, 0.3081

    transform = tfs.Compose([tfs.Normalize((mean, ), (std, ))])
    test_transform = tfs.Compose([tfs.ToTensor(),
                                  tfs.Normalize((mean,), (std,))])

    train_set = MNIST('./data/MNIST',
                      train=True,
                      download=True,
                      transform=None)

    train_set = SEMI_MNIST(train_set,
                           transform=transform,
                           num_samples=100)

    test_set = MNIST('./data/MNIST',
                     train=False,
                     download=True,
                     transform=test_transform)

    test_set = SEMI_MNIST(test_set,
                          transform=transform,
                          num_samples=100)

    # 5. data loader
    train_loader = DataLoader(dataset=train_set,
                              shuffle=True,
                              batch_size=1,
                              num_workers=8,
                              pin_memory=True
                              )

    test_loader = DataLoader(dataset=test_set,
                             shuffle=False,
                             batch_size=1,
                             )

    # 6. model
    model = EmbeddingNet().cuda()
    model.load_state_dict(torch.load('./saves/state_dict.{}'.format(15)))

    # 7. criterion
    criterion = MetricCrossEntropy()

    data = []
    y = []
    is_known_ = []
    # for idx, (imgs, targets, samples, is_known) in enumerate(train_loader):
    #     model.train()
    #     batch_size = 1
    #     imgs = imgs.cuda()  # [N, 1, 28, 28]
    #     targets = targets.cuda()  # [N]
    #     samples = samples.cuda() # [N, 1, 32, 32]
    #     is_known = is_known.cuda()
    #
    #     output = model(imgs)
    #     y.append(targets.cpu().detach().numpy())
    #     is_known_.append(is_known.cpu().detach().numpy())
    #
    #     if idx % 100 == 0:
    #         print(idx)
    #         print(output.size())
    #
    #     data.append(output.cpu().detach().numpy())
    #
    # data_numpy = np.array(data)
    # y_numpy = np.array(y)
    # is_known_numpy = np.array(is_known_)
    #
    # np.save('data', data_numpy)
    # np.save('known', is_known_numpy)
    # np.save('y', y)

    data_numpy = np.load('data.npy')
    y_numpy = np.load('y.npy')
    is_known_numpy = np.load('known.npy')

    print(data_numpy.shape)
    print(y_numpy.shape)

    data_numpy = np.squeeze(data_numpy)
    y_numpy = np.squeeze(y_numpy)
    is_known_numpy = np.squeeze(is_known_numpy)

    print(data_numpy.shape)
    print(y_numpy.shape)

    from sklearn.manifold import TSNE
    import matplotlib.pyplot as plt

    colors = ['#476A2A', '#7851B8', '#BD3430', '#4A2D4E', '#875525',
              '#A83683', '#4E655E', '#853541', '#3A3120', '#535D8E']

    colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
              '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
              '#bcbd22', '#17becf', '#ada699']

    # t-SNE 모델 생성 및 학습
    # tsne = TSNE(random_state=0)
    # digits_tsne = tsne.fit_transform(data_numpy)
    # np.save('tsne', digits_tsne)
    digits_tsne = np.load('tsne.npy')
    print('complete t-sne')

    # ------------------------------ 1 ------------------------------
    plt.figure(figsize=(10, 10))
    for i in range(11):
        inds = np.where(y_numpy == i)[0]
        known = is_known_numpy[inds]
        known_idx = np.where(known == 1)
        unknown_idx = np.where(known == 0)

        plt.scatter(digits_tsne[inds[unknown_idx], 0], digits_tsne[inds[unknown_idx], 1], alpha=0.5, color=colors[10])
        plt.scatter(digits_tsne[inds[known_idx], 0], digits_tsne[inds[known_idx], 1], alpha=0.5, color=colors[i])

        # plt.scatter(digits_tsne[inds, 0], digits_tsne[inds, 1], alpha=0.5, color=colors[i])

    plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'unknown'])
    plt.show()  # 그래프 출력
def main():
    # 1. argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--epoch', type=int, default=100)
    parser.add_argument('--lr', type=float, default=1e-4)
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--resume', type=int, default=0)
    opts = parser.parse_args()
    print(opts)

    # 2. device
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 3. visdom
    vis = visdom.Visdom()

    # 4. dataset
    mean, std = 0.1307, 0.3081

    transform = tfs.Compose([tfs.Normalize((mean, ), (std, ))])
    test_transform = tfs.Compose(
        [tfs.ToTensor(), tfs.Normalize((mean, ), (std, ))])

    train_set = MNIST('./data/MNIST',
                      train=True,
                      download=True,
                      transform=None)

    train_set = SEMI_MNIST(train_set, transform=transform, num_samples=100)

    test_set = MNIST('./data/MNIST',
                     train=False,
                     download=True,
                     transform=test_transform)

    # 5. data loader
    train_loader = DataLoader(dataset=train_set,
                              shuffle=True,
                              batch_size=opts.batch_size,
                              num_workers=8,
                              pin_memory=True)

    test_loader = DataLoader(
        dataset=test_set,
        shuffle=False,
        batch_size=opts.batch_size,
    )

    # 6. model
    model = EmbeddingNet().to(device)

    # 7. criterion
    criterion = MetricCrossEntropy().to(device)

    # 8. optimizer
    optimizer = torch.optim.SGD(params=model.parameters(),
                                lr=opts.lr,
                                momentum=0.9,
                                weight_decay=5e-4)

    # 9. scheduler
    scheduler = StepLR(optimizer=optimizer, step_size=50, gamma=1)
    # 10. resume
    if opts.resume:
        model.load_state_dict(
            torch.load('./saves/state_dict.{}'.format(opts.resume)))
        print("resume from {} epoch..".format(opts.resume - 1))
    else:
        print("no checkpoint to resume.. train from scratch.")

    # --
    for epoch in range(opts.resume, opts.epoch):

        # 11. trian
        for idx, (imgs, targets, samples, is_known) in enumerate(train_loader):
            model.train()
            batch_size = opts.batch_size

            imgs = imgs.to(device)  # [N, 1, 28, 28]
            targets = targets.to(device)  # [N]
            samples = samples.to(device)  # [N, 1, 32, 32]
            is_known = is_known.to(device)

            samples = samples.view(batch_size * 10, 1, 28, 28)
            out_x = model(imgs)  # [N, 10]
            out_z = model(samples).view(batch_size, 10,
                                        out_x.size(-1))  # [N * 10 , 2]
            loss = criterion(out_x, targets, out_z, is_known, 10, 1)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            for param_group in optimizer.param_groups:
                lr = param_group['lr']

            if idx % 100 == 0:
                print('Epoch : {}\t'
                      'step : [{}/{}]\t'
                      'loss : {}\t'
                      'lr   : {}\t'.format(epoch, idx, len(train_loader), loss,
                                           lr))

                vis.line(X=torch.ones(
                    (1, 1)) * idx + epoch * len(train_loader),
                         Y=torch.Tensor([loss]).unsqueeze(0),
                         update='append',
                         win='loss',
                         opts=dict(x_label='step',
                                   y_label='loss',
                                   title='loss',
                                   legend=['total_loss']))

        torch.save(model.state_dict(), './saves/state_dict.{}'.format(epoch))

        # 12. test
        correct = 0
        avg_loss = 0
        for idx, (img, target) in enumerate(test_loader):

            model.load_state_dict(
                torch.load('./saves/state_dict.{}'.format(epoch)))
            model.eval()
            img = img.to(device)  # [N, 1, 28, 28]
            target = target.to(device)  # [N]
            output = model(img)  # [N, 10]

            output = torch.softmax(output, -1)
            pred, idx_ = output.max(-1)
            print(idx_)
            correct += torch.eq(target, idx_).sum()
            #loss = criterion(output, target)
            #avg_loss += loss.item()

        print('Epoch {} test : '.format(epoch))
        accuracy = correct.item() / len(test_set)
        print("accuracy : {:.4f}%".format(accuracy * 100.))
        #avg_loss = avg_loss / len(test_loader)
        #print("avg_loss : {:.4f}".format(avg_loss))

        vis.line(X=torch.ones((1, 1)) * epoch,
                 Y=torch.Tensor([accuracy]).unsqueeze(0),
                 update='append',
                 win='test',
                 opts=dict(x_label='epoch',
                           y_label='test_',
                           title='test_loss',
                           legend=['accuracy']))
        scheduler.step()