示例#1
0
    def __init__(self,
                 text,
                 *,
                 lr=1e-5,
                 batch_size=4,
                 gradient_accumulate_every=4,
                 save_every=100,
                 image_width=512,
                 num_layers=16,
                 epochs=20,
                 iterations=1050,
                 save_progress=False,
                 seed=None,
                 open_folder=True):
        super().__init__()

        if exists(seed):
            print(f'setting seed: {seed}')
            torch.manual_seed(seed)
            torch.cuda.manual_seed(seed)
            random.seed(seed)
            torch.backends.cudnn.deterministic = True

        self.epochs = epochs
        self.iterations = iterations
        total_batches = epochs * iterations * batch_size * gradient_accumulate_every

        model = DeepDaze(total_batches=total_batches,
                         batch_size=batch_size,
                         image_width=image_width,
                         num_layers=num_layers).cuda()

        self.model = model

        self.scaler = GradScaler()
        self.optimizer = Adam(model.parameters(), lr)
        self.gradient_accumulate_every = gradient_accumulate_every
        self.save_every = save_every

        self.text = text
        textpath = self.text.replace(' ', '_')

        self.textpath = textpath
        self.filename = Path(f'./{textpath}.png')
        self.save_progress = save_progress

        self.encoded_text = tokenize(text).cuda()

        self.open_folder = open_folder
示例#2
0
 def create_text_encoding(self, text):
     tokenized_text = tokenize(text).cuda()
     with torch.no_grad():
         text_encoding = perceptor.encode_text(tokenized_text).detach()
     return text_encoding
示例#3
0
 def create_text_encoding(text):
     tokenized_text = tokenize(text).cuda()
     text_encoding = perceptor.encode_text(tokenized_text).detach()
     return text_encoding
示例#4
0
    def __init__(
        self,
        text,
        *,
        lr=1e-5,
        batch_size=4,
        gradient_accumulate_every=4,
        save_every=100,
        image_width=512,
        num_layers=16,
        epochs=20,
        iterations=1050,
        save_progress=False,
        seed=None,
        open_folder=True,
        save_date_time=False,
        start_image_path=None,
        start_image_train_iters=10,
        start_image_lr=3e-4,
        theta_initial=None,
        theta_hidden=None,
    ):

        super().__init__()

        if exists(seed):
            tqdm.write(f'setting seed: {seed}')
            torch.manual_seed(seed)
            torch.cuda.manual_seed(seed)
            random.seed(seed)
            torch.backends.cudnn.deterministic = True

        self.epochs = epochs
        self.iterations = iterations
        total_batches = epochs * iterations * batch_size * gradient_accumulate_every

        model = DeepDaze(total_batches=total_batches,
                         batch_size=batch_size,
                         image_width=image_width,
                         num_layers=num_layers,
                         theta_initial=theta_initial,
                         theta_hidden=theta_hidden).cuda()

        self.model = model
        self.scaler = GradScaler()
        self.optimizer = AdamP(model.parameters(), lr)
        self.gradient_accumulate_every = gradient_accumulate_every
        self.save_every = save_every
        self.save_date_time = save_date_time
        self.open_folder = open_folder
        self.save_progress = save_progress
        self.text = text
        self.textpath = text.replace(" ", "_")
        self.filename = self.image_output_path()
        self.encoded_text = tokenize(text).cuda()

        self.start_image = None
        self.start_image_train_iters = start_image_train_iters
        self.start_image_lr = start_image_lr
        if exists(start_image_path):
            file = Path(start_image_path)
            assert file.exists(
            ), f'file does not exist at given starting image path {self.start_image_path}'
            image = Image.open(str(file))

            transform = T.Compose([
                T.Resize(image_width),
                T.CenterCrop((image_width, image_width)),
                T.ToTensor(),
                T.Normalize(0.5, 0.5)
            ])

            image_tensor = transform(image)[None, ...].cuda()
            self.start_image = image_tensor