コード例 #1
0
def main():
    # batch_size = 100
    batch_size = 2
    print("here")
    sk_root = '../256x256/sketch/tx_000000000000'
    sk_root = '../test'
    in_size = 225
    in_size = 224
    train_dataset = DataSet.ImageDataset(sk_root, transform=Compose([Resize(in_size), ToTensor()]))
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, num_workers=os.cpu_count(),
                                  shuffle=True, drop_last=True)
    tmp_root ='../testpair/photo'
    # util.train_test_split(tmp_root,split=(0.8,0.1,0.1))
    sketch_root = '../testpair/sketch'
    train_dataset = DataSet.PairedDataset(photo_root=tmp_root,sketch_root=sketch_root,transform=Compose([Resize(in_size), ToTensor()]))
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, num_workers=os.cpu_count(),
                                  shuffle=True, drop_last=True)
    test_dataset = DataSet.PairedDataset(photo_root=tmp_root,sketch_root=sketch_root,transform=Compose([Resize(in_size), ToTensor()]),train=True)
    test_dataloader = DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, num_workers=os.cpu_count(),
                                  shuffle=True, drop_last=True)

    model = SketchANet(num_classes=3)
    model = Net()
    crit = torch.nn.CrossEntropyLoss()
    net1 = getResnet(num_class=100)
    margin = 1
    model = SiameseNet(net1,net1)
    crit = ContrastiveLoss(margin)
    if torch.cuda.is_available():
        model = model.cuda()



    optim = torch.optim.Adam(model.parameters())
    # optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    # Tensorboard stuff
    # writer = tb.SummaryWriter('./logs')
    to_image = transforms.ToPILImage()
    count = 0
    epochs = 200
    prints_interval = 1
    for e in range(epochs):
        print('epoch', e, 'started')
        for i, (X, Y) in enumerate(train_dataloader):

            if torch.cuda.is_available():
                X, Y = X.cuda(), Y.cuda()
            sketch,photo = X
            Y = (Y != train_dataset.class_to_index['unmatched'])
            # for i in range(sketch.shape[0]):
            #     image =to_image(sketch[i])
            #     util.showImage(image)
            #     image =to_image(photo[i])
            #     util.showImage(image)
            #     print(Y)
            optim.zero_grad()
            #
            # image = to_image(X[0])
            # util.showImage(image)
            # print(train_dataset.class_to_idx)
            # print(Y)
            output = model(*X)
            # print(output,Y)

            loss = crit(*output, Y)

            if i % prints_interval == 0:
                print(f'[Training] {i}/{e}/{epochs} -> Loss: {loss.item()}')
                # writer.add_scalar('train-loss', loss.item(), count)

            # to_image = transforms.ToPILImage()
            # image = to_image(X[0])
            # util.showImage(image)
            # print(train_dataset.class_to_idx)
            # print(Y)

            loss.backward()
            optim.step()

            count += 1
        print('epoch', e, 'loss', loss.item())
        correct, total, accuracy = 0, 0, 0
        model.eval()
        # print(f'[Testing] -/{e}/{epochs} -> Accuracy: {accuracy} %', total, correct)
        model.train()
コード例 #2
0
# train_loader = DataLoader(dataset=train_data, shuffle=True)
#
# for x_batch, y_batch in train_loader:
#     print(x_batch.shape)
#     to_image = transforms.ToPILImage()
#     image =to_image(x_batch[0])
#     util.showImage(image)
#     print(train_data.class_to_idx)
#     print(y_batch)

# Test PairedDataSet
tmp_root ='../test_pair/sketch'
util.train_test_split(tmp_root,split=(0.5,0.0,0.5))
sketch_root = '../test_pair/sketch'
util.train_test_split(tmp_root,split=(0.5,0.0,0.5))
train_data = DataSet.PairedDataset(photo_root=tmp_root,sketch_root=sketch_root,transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_data, batch_size=1,shuffle=True)
#
for e in range(10):
    for x_batch, y_batch in train_loader:
        print(x_batch[0].shape,)
        to_image = transforms.ToPILImage()
        sketch,photo,l1,l2=x_batch
        for i in range(sketch.shape[0]):
            image =to_image(sketch[i])
            util.showImage(image)
            image =to_image(photo[i])
            util.showImage(image)
        print(train_data.class_to_index)
        print(y_batch,l1,l2)
コード例 #3
0
def main():
    # batch_size = 100
    batch_size = 1
    balanced = False
    print("here")
    # sk_root ='../test'
    in_size = 225
    in_size = 224
    tmp_root = '../256x256/photo/tx_000000000000'
    sketch_root = '../256x256/sketch/tx_000000000000'
    # tmp_root = '../rendered_256x256/256x256/photo/tx_000000000000'
    # sketch_root = '../rendered_256x256/256x256/sketch/tx_000000000000'
    tmp_root = '../test_pair/photo'
    sketch_root = '../test_pair/sketch'
    train_dataset = DataSet.PairedDataset(photo_root=tmp_root,
                                          sketch_root=sketch_root,
                                          transform=Compose(
                                              [Resize(in_size),
                                               ToTensor()]),
                                          balanced=balanced)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  pin_memory=True,
                                  num_workers=os.cpu_count(),
                                  shuffle=True,
                                  drop_last=True)

    test_dataset = DataSet.ImageDataset(sketch_root,
                                        transform=Compose(
                                            [Resize(in_size),
                                             ToTensor()]),
                                        train=True)
    # print(test_dataset.classes)
    # print(train_dataset.classes)
    # print(test_dataset.class_to_idx)
    # print(train_dataset.class_to_index)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=batch_size,
                                 pin_memory=True,
                                 num_workers=os.cpu_count(),
                                 shuffle=True,
                                 drop_last=True)

    num_class = len(train_dataset.classes)
    embedding_size = 200
    net1 = getResnet(num_class=embedding_size, pretrain=True)
    model = SiaClassNet(net1, embedding_size, num_class)

    method = "classify"
    crit = torch.nn.CrossEntropyLoss()
    model.train()

    if torch.cuda.is_available():
        model = model.cuda()

    optim = torch.optim.Adam(model.parameters())
    # optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    # Tensorboard stuff
    # writer = tb.SummaryWriter('./logs')

    count = 0
    epochs = 200
    prints_interval = 1
    max_chpt = 3
    max_loss = -1
    for e in range(epochs):
        print('epoch', e, 'started')
        avg_loss = 0
        for i, (X, Y) in enumerate(train_dataloader):

            if torch.cuda.is_available():
                X, Y = (X[0].cuda(), X[1].cuda()), (Y[0].cuda(), Y[1].cuda(),
                                                    Y[2].cuda)
            sketch, photo = X
            optim.zero_grad()
            to_image = transforms.ToPILImage()
            #output = model(*X)
            output = model(sketch, sketch)
            (Y, label_s, label_p) = Y
            # loss = crit(output, Y)
            loss = crit(output, label_s)
            avg_loss += loss.item()
            if i % prints_interval == 0:
                print(output, label_s)
                print(f'[Training] {i}/{e}/{epochs} -> Loss: {avg_loss/(i+1)}')
                # writer.add_scalar('train-loss', loss.item(), count)
            loss.backward()

            optim.step()

            count += 1
        print('epoch', e, 'Avg loss', avg_loss / len(train_dataloader))

        eval_accu(test_dataloader, model, e, epochs)
コード例 #4
0
def main():
    # batch_size = 100
    batch_size = 1
    balanced = False
    print("Start Training")

    # sk_root ='../test'
    in_size = 225
    in_size = 224
    tmp_root = '../test_pair/photo'
    sketch_root = '../test_pair/sketch'
    # tmp_root = '../256x256/photo/tx_000000000000'
    # sketch_root = '../256x256/sketch/tx_000000000000'

    transform = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    train_dataset = DataSet.PairedDataset(photo_root=tmp_root,
                                          sketch_root=sketch_root,
                                          transform=Compose(
                                              [Resize(in_size),
                                               ToTensor()]),
                                          balanced=balanced)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  pin_memory=True,
                                  num_workers=os.cpu_count(),
                                  shuffle=True,
                                  drop_last=True)
    test_dataset = DataSet.PairedDataset(photo_root=tmp_root,
                                         sketch_root=sketch_root,
                                         transform=transform,
                                         train=False)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=batch_size,
                                 pin_memory=True,
                                 num_workers=os.cpu_count(),
                                 shuffle=True,
                                 drop_last=True)

    embedding_size = 512
    margin = 1
    num_class = len(train_dataset.classes) - 1
    photo_net = getResnet(num_class=num_class,
                          pretrain=True,
                          feature_extract=True)

    for param in photo_net.parameters():
        param.requires_grad = False

    sketch_net = getResnet(num_class=num_class,
                           pretrain=True,
                           feature_extract=False)
    softmax_loss = SoftMax(embed_size=embedding_size, num_class=num_class)
    optim = torch.optim.Adam(
        list(sketch_net.parameters()) + list(softmax_loss.parameters()))
    optim = torch.optim.Adam(list(sketch_net.parameters()))
    model = ParallelNet(sketch_net=sketch_net, photo_net=photo_net)
    print(sketch_net)
    contrastive_loss = ContrastiveLoss(margin)

    cross = torch.nn.CrossEntropyLoss()
    if torch.cuda.is_available():
        model = model.cuda()

    # optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    # Tensorboard stuff
    # writer = tb.SummaryWriter('./logs')

    epochs = 100
    prints_interval = 1
    max_chpt = 3
    min_loss = 100000
    chpt_num = 0
    for e in range(epochs):
        print('epoch', e, 'started')
        avg_loss = 0
        for i, (X, Y) in enumerate(train_dataloader):
            one = torch.ones(Y[0].shape)
            zero = torch.zeros(Y[0].shape)
            if torch.cuda.is_available():
                X, Y = (X[0].cuda(), X[1].cuda()), (Y[0].cuda(), Y[1].cuda(),
                                                    Y[2].cuda())
                one, zero = one.cuda(), zero.cuda()
            optim.zero_grad()

            sketch, photo = X
            (Y, label_s, label_p) = Y
            embedding_sketch = sketch_net(sketch)
            embedding_photo = photo_net(photo)
            loss = cross(embedding_sketch, label_s)
            sloss = 0
            # sloss = softmax_loss(embedding_sketch, label_s)
            # sketch_feature = normalize(embedding_sketch)
            # phtot_feature = normalize(embedding_photo)

            Y = torch.where(Y != train_dataset.class_to_index['unmatched'],
                            one, zero)

            closs = 0
            # closs = contrastive_loss(sketch_feature, phtot_feature, Y)

            # loss = 0.0 * closs + 1* sloss

            avg_loss += loss.item()
            if i % prints_interval == 0:
                print(
                    f'[Training] {i}/{e}/{epochs} -> Loss: {avg_loss / (i + 1)} Contrastive: {closs} SoftMax: {sloss}'
                )
            loss.backward()

            optim.step()

        print('epoch', e, 'end', 'Avg loss', avg_loss / len(train_dataloader))
コード例 #5
0
def main():
    # batch_size = 100
    batch_size = 100
    balanced = False
    print("Start Training")
    sk_root = '../256x256/sketch/tx_000000000000'
    # sk_root ='../test'
    in_size = 225
    in_size = 224
    tmp_root = '../test_pair/photo'
    sketch_root = '../test_pair/sketch'
    tmp_root = '../256x256/photo/tx_000000000000'
    sketch_root = '../256x256/sketch/tx_000000000000'
    train_dataset = DataSet.PairedDataset(photo_root=tmp_root,
                                          sketch_root=sketch_root,
                                          transform=Compose(
                                              [Resize(in_size),
                                               ToTensor()]),
                                          balanced=balanced)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  pin_memory=True,
                                  num_workers=os.cpu_count(),
                                  shuffle=True,
                                  drop_last=True)
    test_dataset = DataSet.PairedDataset(photo_root=tmp_root,
                                         sketch_root=sketch_root,
                                         transform=Compose(
                                             [Resize(in_size),
                                              ToTensor()]),
                                         train=False)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=batch_size,
                                 pin_memory=True,
                                 num_workers=os.cpu_count(),
                                 shuffle=True,
                                 drop_last=True)

    num_class = len(train_dataset.classes)
    embedding_size = 10242
    embedding_size = 1024
    embedding_size = 512
    net1 = getResnet(num_class=embedding_size, pretrain=True)
    margin = 1
    model = SiameseNet(net1)

    method = 'metric'
    crit = ContrastiveLoss(margin)
    model.train()
    if torch.cuda.is_available():
        model = model.cuda()

    optim = torch.optim.Adam(model.parameters())
    # optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    # Tensorboard stuff
    # writer = tb.SummaryWriter('./logs')

    count = 0
    epochs = 100
    prints_interval = 100
    prints_interval = 100
    max_chpt = 3
    min_loss = 100000
    chpt_num = 0
    for e in range(epochs):
        print('epoch', e, 'started')
        avg_loss = 0
        for i, (X, Y) in enumerate(train_dataloader):
            one = torch.ones(Y[0].shape)
            zero = torch.zeros(Y[0].shape)
            if torch.cuda.is_available():
                X, Y = (X[0].cuda(), X[1].cuda()), (Y[0].cuda(), Y[1].cuda(),
                                                    Y[2].cuda())
                one, zero = one.cuda(), zero.cuda()
            output = model(*X)
            # print(output,Y)
            sketch, photo = X
            #print(sketch.shape)
            optim.zero_grad()
            to_image = transforms.ToPILImage()
            output = model(*X)
            #print(output[0])
            (Y, label_s, label_p) = Y
            Y = torch.where(Y != train_dataset.class_to_index['unmatched'],
                            one, zero)
            loss = crit(*output, Y)
            avg_loss += loss.item()
            if i % prints_interval == 0:
                # print(output,Y)
                print(f'[Training] {i}/{e}/{epochs} -> Loss: {avg_loss/(i+1)}')
                # writer.add_scalar('train-loss', loss.item(), count)
            loss.backward()

            optim.step()

            count += 1
        print('epoch', e, 'Avg loss', avg_loss / len(train_dataloader))
        valid_loss = eval_loss(test_dataloader, model, e, epochs, crit,
                               train_dataset)
        if valid_loss < min_loss:
            path = 'checkpoint' + str(chpt_num) + '.pt'
            min_loss = valid_loss
            chpt_num = (chpt_num + 1) % max_chpt
            set_checkpoint(epoch=e,
                           model=model,
                           optimizer=optim,
                           train_loss=avg_loss / len(train_dataloader),
                           loss=valid_loss,
                           path=path)
            path = 'best.pt'
            set_checkpoint(epoch=e,
                           model=model,
                           optimizer=optim,
                           train_loss=avg_loss / len(train_dataloader),
                           loss=valid_loss,
                           path=path)
コード例 #6
0
def main():
    batch_size = 100
    balanced = False
    print("Start Training")

    # sk_root ='../test'
    in_size = 225
    in_size = 224
    tmp_root = '../test_pair/photo'
    sketch_root = '../test_pair/sketch'
    tmp_root = '../256x256/photo/tx_000000000000'
    sketch_root = '../256x256/sketch/tx_000000000000'

    train_dataset = DataSet.PairedDataset(photo_root=tmp_root,
                                          sketch_root=sketch_root,
                                          transform=Compose(
                                              [Resize(in_size),
                                               ToTensor()]),
                                          balanced=balanced)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  pin_memory=True,
                                  num_workers=os.cpu_count(),
                                  shuffle=True,
                                  drop_last=True)
    test_dataset = DataSet.PairedDataset(photo_root=tmp_root,
                                         sketch_root=sketch_root,
                                         transform=Compose(
                                             [Resize(in_size),
                                              ToTensor()]),
                                         train=False)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=batch_size,
                                 pin_memory=True,
                                 num_workers=os.cpu_count(),
                                 shuffle=True,
                                 drop_last=True)

    num_class = len(train_dataset.classes)
    embed_size = -1
    sketch_net = getResnet(num_class=num_class,
                           pretrain=True,
                           feature_extract=True)
    softmax_loss = SoftMax(embed_size=512, num_class=num_class)
    hinge_loss = ContrastiveLoss(margin=2)
    optim = torch.optim.Adam(
        list(sketch_net.parameters()) + list(softmax_loss.parameters()))
    sketch_net.train()
    photo_net = getResnet(num_class=num_class,
                          pretrain=True,
                          feature_extract=True)
    for param in photo_net.parameters():
        param.requires_grad = False

    if torch.cuda.is_available():
        sketch_net = sketch_net.cuda()
        softmax_loss = softmax_loss.cuda()
        photo_net = photo_net.cuda()
    count = 0
    epochs = 200
    max_chpt = 3
    max_acu = -1
    chpt_num = 0
    activation = {}

    def get_activation(name):
        def hook(model, input, output):
            activation[name] = output

        return hook

    for e in range(epochs):
        print('epoch', e, 'Start')
        (avg_loss, avg_class_loss, avg_hinge_loss,
         accuracy) = eval_model(e,
                                epochs,
                                sketch_net,
                                photo_net,
                                softmax_loss,
                                hinge_loss,
                                [train_dataloader, test_dataloader],
                                optim,
                                train=True)
        print('epoch', e, 'End')
        (avg_loss, avg_class_loss, avg_hinge_loss,
         accuracy) = eval_model(e,
                                epochs,
                                sketch_net,
                                photo_net,
                                softmax_loss,
                                hinge_loss,
                                [train_dataloader, test_dataloader],
                                optim,
                                train=False)

        if accuracy >= max_acu:
            path = 'checkpoint' + str(chpt_num) + '.pt'
            max_acu = accuracy
            chpt_num = (chpt_num + 1) % max_chpt
            set_checkpoint(epoch=e,
                           model=sketch_net,
                           softmax=softmax_loss,
                           optimizer=optim,
                           train_loss=avg_loss / len(train_dataloader),
                           softmax_loss=avg_class_loss,
                           hinge_loss=avg_hinge_loss,
                           accurate=accuracy,
                           path=path)
            path = 'best.pt'
            set_checkpoint(epoch=e,
                           model=sketch_net,
                           softmax=softmax_loss,
                           optimizer=optim,
                           train_loss=avg_loss / len(train_dataloader),
                           softmax_loss=avg_class_loss,
                           hinge_loss=avg_hinge_loss,
                           accurate=accuracy,
                           path=path)