def train_model(self, fname=None): time.sleep(1) try: self.load(fname) print("File load successful.") except Exception: print("File load failed.") image_dataset, dataset_size = utils.create_dataset( batch_size=batch_size) dataset_size = dataset_size - 1 print("Dataset size is", dataset_size) print("Total number of images is", dataset_size * batch_size) record_batch = utils.record_steps(int(dataset_size / 2)) start_time = time.time() print("Beginning training at", start_time) for i in range(num_epochs): print("Starting epoch {}/{}".format(i, num_epochs)) start = time.time() batch_on = 0 for source in zip(image_dataset.take(int(dataset_size / 2))): loss_identity, kl_loss = train_step(self, source, optimizer) if batch_on % record_batch == 0: print("Beginning batch #" + str(batch_on), 'out of', int(dataset_size / 2), 'of size', batch_size) self.loss_identity += [loss_identity] self.kl_loss += [kl_loss] batch_on += 1 duration = time.time() - start print(int(duration / 60), "minutes &", int(duration % 60), "seconds, for epoch", i) if i % record_epochs == 0: self.loss['Identity'] = self.loss_identity self.loss['KL'] = self.kl_loss utils.test_model(self, source, num=i, test=True, name=model_name, details='identity') for style in zip(image_dataset.take(1)): utils.test_model(self, source, style, num=i, test=True, name=model_name, details='transfer') break #act = keract.get_activations(self, source) #keract.display_activations(act) print('\n') image_dataset, _ = utils.create_dataset(batch_size=batch_size) self.save(fname) time.sleep(.5) print('Training completed in', int((time.time() - start_time) / 60), "minutes &", int(duration % 60), "seconds")
def train_model(self, fname=None): time.sleep(1) try: self.load(fname) print("File load successful.") except Exception: print("File load failed.") model_val = AE_A(training=False) image_dataset, dataset_size = utils.create_dataset(batch_size=batch_size) dataset_size = int((dataset_size - 1)/2) print("Dataset size is", dataset_size) print("Total number of images is", dataset_size * batch_size) record_batch = utils.record_steps(dataset_size) start_time = time.time() for i in range(num_epochs): print(f'Starting epoch {i}/{num_epochs}') start = time.time() batch_on = 0 for source in zip(image_dataset.take(dataset_size)): try: loss_identity, kl_loss = self.train_step(source, optimizer) if batch_on % record_batch == 0: print(f'Beginning batch #{batch_on} out of {dataset_size} of size {batch_size}') self.loss_identity += [loss_identity] self.kl_loss += [kl_loss] except Exception: print(f'Batch #{batch_on} failed. Continuing with next batch.') batch_on += 1 self.save(fname) time.sleep(0.5) duration = time.time() - start utils.print_time_remaining(i, num_epochs, duration) if i % display_mod == 0: model_val.load(model_name, compile=False) model_val.loss['Identity'] = self.loss_identity model_val.loss['KL'] = self.kl_loss utils.test_model(model_val, source, num=i, test=True, name=model_name, details='identity') for style in zip(image_dataset.take(1)): utils.test_model(model_val, source, style, num=i, test=True, name=model_name, details='merge') break #act = keract.get_activations(self, source) #keract.display_activations(act) print('\n') image_dataset, _ = utils.create_dataset(batch_size=batch_size) print('Training completed in', int((time.time() - start_time) / 60), "minutes &", int(duration % 60), "seconds")
def train_model(self, fname=None): time.sleep(1) try: self.load(fname) print("File load successful.") except Exception: print("File load failed.") image_dataset, dataset_size = utils.create_dataset( batch_size=batch_size) dataset_size = dataset_size - 1 print("Dataset size is", dataset_size) print("Total number of images is", dataset_size * batch_size) record_batch = utils.record_steps(int(dataset_size)) start_time = time.time() print("Beginning training at", start_time) for i in range(num_epochs): print("Starting epoch {}/{}".format(i, num_epochs)) start = time.time() batch_on = 0 for source in zip(image_dataset.take(int(dataset_size))): source = utils.get_random_crop(np.array(source), 32, 32) loss_identity = train_step(self, source, optimizer) if batch_on % record_batch == 0: print("Beginning batch #" + str(batch_on), 'out of', int(dataset_size), 'of size', batch_size) self.loss_identity += [loss_identity] batch_on += 1 if i % record_epochs == 0: self.loss['Identity'] = self.loss_identity utils.test_model(self, source, num=i, test=True, name=model_name, details='identity') '''for style in zip(image_dataset.take(1)): style = utils.get_random_crop(style, 32, 32) utils.test_model(self, source, style, num=i, test=True, name=model_name, details='transfer') break''' print('\n') duration = time.time() - start utils.print_time_remaining(i, num_epochs, duration) image_dataset, _ = utils.create_dataset(batch_size=batch_size) self.save(fname) time.sleep(.5) print('Training completed in', int((time.time() - start_time) / 60), "minutes &", int(duration % 60), "seconds")