def __init__(self, steps=1, lr=0.0001, decay=0.00001, silent=True):

        #Init GAN and Eval Models
        self.GAN = GAN(steps=steps, lr=lr, decay=decay)
        self.GAN.GenModel()
        self.GAN.GenModelA()

        self.GAN.S.summary()
        self.GAN.G.summary()
        self.GAN.D.summary()

        #Data generator (my own code, not from TF 2.0)
        self.im = dataGenerator(dataset, im_size, BATCH_SIZE, flip=True)

        #Set up variables
        self.lastblip = time.clock()

        self.silent = silent

        self.ones = np.ones((BATCH_SIZE, 1), dtype=np.float32)
        self.zeros = np.zeros((BATCH_SIZE, 1), dtype=np.float32)
        self.nones = -self.ones

        self.pl_mean = 0
        self.av = np.zeros([44])
    def __init__(self,
                 steps=1,
                 lr=0.0001,
                 decay=0.00001,
                 *,
                 verbose=True,
                 max_steps=1000000,
                 im_size,
                 data_dir='Data',
                 dataset,
                 models_dir='Models',
                 results_dir='Results'):

        self.data_dir = data_dir + '/'
        self.dataset = dataset
        self.models_dir = models_dir + '/'
        self.results_dir = results_dir + '/'

        self.max_steps = max_steps

        self.im_size = im_size

        #Init GAN and Eval Models
        self.GAN = GAN(steps=steps, lr=lr, decay=decay)
        self.GAN.GenModel()
        self.GAN.GenModelA()

        self.GAN.G.summary()

        #Data generator (my own code, not from TF 2.0)
        self.im = dataGenerator(self.data_dir,
                                self.dataset,
                                self.im_size,
                                flip=True,
                                verbose=verbose)

        #Set up variables
        self.lastblip = time.time()

        self.verbose = verbose

        self.ones = np.ones((BATCH_SIZE, 1), dtype=np.float32)
        self.zeros = np.zeros((BATCH_SIZE, 1), dtype=np.float32)

        self.pl_mean = 0
        self.av = np.zeros([44])
    def __init__(self, steps = 1, lr = 0.0001, decay = 0.00001, silent = True):

        #Init GAN and Eval Models
        self.GAN = GAN(steps = steps, lr = lr, decay = decay)

        self.GAN.G1.summary()

        #Data generator (my own code, not from TF 2.0)
        self.im = []
        for i in range(len(directory)):
            self.im.append(dataGenerator(directory[i], im_size))

        #Set up variables
        self.lastblip = time.clock()

        self.silent = silent

        self.av = np.zeros([44])
Exemple #4
0
    def __init__(self, steps=1, lr=0.0001, decay=0.00001, silent=True):

        self.GAN = GAN(steps=steps, lr=lr, decay=decay)
        self.GAN.GenModel()
        self.GAN.GenModelA()

        self.GAN.G.summary()

        self.lastblip = time.clock()

        self.noise_level = 0

        self.im = dataGenerator(directory, im_size, flip=True)

        self.silent = silent

        #Train Generator to be in the middle, not all the way at real. Apparently works better??
        self.ones = np.ones((BATCH_SIZE, 1), dtype=np.float32)
        self.zeros = np.zeros((BATCH_SIZE, 1), dtype=np.float32)
        self.nones = -self.ones

        self.gp_weight = np.array([10.0] * BATCH_SIZE).astype('float32')
Exemple #5
0
    def train(self, directory=''):
        if self.im is None:
            # Data generator (my own code, not from TF 2.0)
            self.im = dataGenerator(directory, im_size, flip=True)

        # Train Alternating
        if random() < mixed_prob:
            style = mixedList(BATCH_SIZE)
        else:
            style = noiseList(BATCH_SIZE)

        # Apply penalties every 16 steps
        apply_gradient_penalty = self.GAN.steps % 2 == 0 or self.GAN.steps < 10000  # noqa
        apply_path_penalty = self.GAN.steps % 16 == 0

        a, b, c, d = self.train_step(
            self.im.get_batch(BATCH_SIZE).astype('float32'), style,
            nImage(BATCH_SIZE), apply_gradient_penalty, apply_path_penalty)

        # Adjust path length penalty mean
        # d = pl_mean when no penalty is applied
        if self.pl_mean == 0:
            self.pl_mean = np.mean(d)
        self.pl_mean = 0.99 * self.pl_mean + 0.01 * np.mean(d)

        if self.GAN.steps % 10 == 0 and self.GAN.steps > 20000:
            self.GAN.EMA()

        if self.GAN.steps <= 25000 and self.GAN.steps % 1000 == 2:
            self.GAN.MAinit()

        if np.isnan(a):
            print("NaN Value Error.")
            exit()

        # Print info
        if self.GAN.steps % 100 == 0 and not self.silent:
            print("\n\nRound " + str(self.GAN.steps) + ":")
            print("D:", np.array(a))
            print("G:", np.array(b))
            print("PL:", self.pl_mean)

            s = round((time.clock() - self.lastblip), 4)
            self.lastblip = time.clock()

            steps_per_second = 100 / s
            steps_per_minute = steps_per_second * 60
            steps_per_hour = steps_per_minute * 60
            print("Steps/Second: " + str(round(steps_per_second, 2)))
            print("Steps/Hour: " + str(round(steps_per_hour)))

            min1k = floor(1000 / steps_per_minute)
            sec1k = floor(1000 / steps_per_second) % 60
            print("1k Steps: " + str(min1k) + ":" + str(sec1k))
            steps_left = 200000 - self.GAN.steps + 1e-7
            hours_left = steps_left // steps_per_hour
            minutes_left = (steps_left // steps_per_minute) % 60

            print("Til Completion: " + str(int(hours_left)) + "h" +
                  str(int(minutes_left)) + "m")
            print()

            # Save Model
            if self.GAN.steps % 500 == 0:
                self.save(floor(self.GAN.steps / 10000))
            if self.GAN.steps % 1000 == 0 or (self.GAN.steps % 100 == 0
                                              and self.GAN.steps < 2500):
                self.evaluate(floor(self.GAN.steps / 1000))

        printProgressBar(self.GAN.steps % 100, 99, decimals=0)

        self.GAN.steps = self.GAN.steps + 1
Exemple #6
0
rep3.trainable = False
for layer in rep3.layers:
    layer.trainable = False

rep4.trainable = False
for layer in rep4.layers:
    layer.trainable = False

#Compilation
model.compile(optimizer = Adam(lr = 0.0001),
                loss = [c_loss1, c_loss2, c_loss3, c_loss4, s_loss1, s_loss2, s_loss3, s_loss4],
                loss_weights = [1, 1, 1, 1, ss, ss, ss, ss])

#Datasets
coco = dataGenerator('coco', 256)
wikiart = dataGenerator('wikiart', 256)

#Evaluation function
def evaluate(num = 0):
    content_images = coco.get_batch(8)
    style_images = wikiart.get_batch(8)

    out_images = eval_model.predict([content_images, style_images], batch_size = 2)

    r = []
    for i in range(8):
        r.append(np.concatenate([content_images[i], style_images[i], out_images[i]], axis = 1))

    c = np.concatenate(r, axis = 0)