Esempio n. 1
0
    def setUp(self):
        self.args = DotDict({
            "image_size": 256,
            "batch_size": 1,
            "dataset":
            "/media/zzhuang/00091EA2000FB1D0/iGit/git_projects/SuperStyleTransfer/data/images/content-images",
            "styleset":
            "/media/zzhuang/00091EA2000FB1D0/iGit/git_projects/SuperStyleTransfer/data/images/style-images/",
            "in_channel_num": 3,
            "channel_base_num": 64,
            "layer_num": 3
        })
        self.model = PixelDiscriminator(
            self.args.in_channel_num,
            channel_base_num=self.args.channel_base_num)

        self.AdvLoss = GanLoss()

        transform = transforms.Compose([
            transforms.Resize(self.args.image_size),
            transforms.CenterCrop(self.args.image_size),
            transforms.ToTensor(),
            transforms.Lambda(lambda x: x.mul(255))
        ])
        content_dataset = datasets.ImageFolder(self.args.dataset, transform)
        style_dataset = datasets.ImageFolder(self.args.styleset, transform)
        synth_dataset = UnalignedDataset(content_dataset, style_dataset)
        self.train_loader = DataLoader(synth_dataset,
                                       batch_size=self.args.batch_size)
Esempio n. 2
0
 def setUp(self):
     self.args = DotDict({
         "image_size": 256,
         "batch_size": 1,
         "dataset": "/media/zzhuang/00091EA2000FB1D0/iGit/git_projects/SuperStyleTransfer/data/images/content-images",
         "styleset": "/media/zzhuang/00091EA2000FB1D0/iGit/git_projects/SuperStyleTransfer/data/images/style-images/",
     })
    def __init__(self, args=DotDict({})):
        # Initial convolution layers
        super(JohnsonNet, self).__init__()
        self.args = args
        self.args.n_batch = 0

        self.TransformerNet = ResNet().cuda()
        self.total_loss = None
        self.content_loss = None
        self.style_loss = None

        self.x = None
        self.y = None
        self.features_y = None
        self.features_x = None
Esempio n. 4
0
 def __init__(self, args=DotDict({}), requires_grad=False):
     super(VGGClassifier, self).__init__()
     self.args = args
     self.ConvNet = CroppedVGG(requires_grad=False)
     self.Classifier = torch.nn.Sequential(
         torch.nn.Linear(524288, 4096),
         torch.nn.ReLU(True),
         torch.nn.Dropout(),
         torch.nn.Linear(4096, 4096),
         torch.nn.ReLU(True),
         torch.nn.Dropout(),
         torch.nn.Linear(4096, 1),
     )
     self.optimizer = Adam(self.Classifier.parameters(), self.args.lr)
     self.lossFunc = torch.nn.MSELoss()
     self.x = None
     self.y = None
     self.pred = None
    train_dataset = datasets.ImageFolder(args.dataset, transform)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size)
    vgg = VGGClassifier(args)

    for e in range(args.epochs):
        for batch_id, (x, y) in enumerate(train_loader):
            y = y.type(torch.FloatTensor)
            vgg.set_input(x, y)
            vgg.optimize_parameters()

    # save model
    save_model_filename = "epoch_" + str(args.epochs) + "_" + str(time.ctime()).replace(' ', '_') + "vgg_classifier.model"
    save_model_path = os.path.join(args.save_model_dir, save_model_filename)
    vgg.save_model(save_model_path)

    print("\nDone, trained model saved at", save_model_path)


if __name__ == '__main__':
    args = {
        "num_classes": 3,
        "seed": 42,
        "image_size": 256,
        "batch_size": 4,
        "lr": 1e-3,
        "epochs": 2,
        "dataset": "../../data/images/content-images/",
        "save_model_dir": "../../models/JohnsonNet/"
    }
    train(DotDict(args))
Esempio n. 6
0

def classify(args):
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    train_dataset = datasets.ImageFolder(args.dataset, transform)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size)
    print("data loaded, data size:", len(train_dataset))

    with torch.no_grad():
        for batch_id, (content_image, _) in enumerate(train_loader):
            content_image = content_image.unsqueeze(0).cuda()
            style_model = JohnsonNet()
            style_model.load_model(args.model)
            style_model.set_input(content_image)
            output = style_model.test()
            utils.save_image(args.output_image, output[0].cpu())


if __name__ == '__main__':
    args = {
        "dataset": "../../data/trainingData/mileStoneData/",
        "content_scale": None,
        "model":
        "../../models/JohnsonNet/epoch_10_Sun_Nov_11_22:38:42_2018_100000.0_10000000000.0.model",
        "outputpath": "../../output/JohnsonNet/"
    }
    with torch.cuda.device(0):
        classify(DotDict(args))