def main(): #create tensorboard summary writer writer = SummaryWriter(args.experiment_id) #[TODO] may need to resize input image cudnn.enabled = True #create model: Encoder model_encoder = Encoder() model_encoder.train() model_encoder.cuda(args.gpu) optimizer_encoder = optim.Adam(model_encoder.parameters(), lr=args.learning_rate, betas=(0.95, 0.99)) optimizer_encoder.zero_grad() #create model: Decoder model_decoder = Decoder() model_decoder.train() model_decoder.cuda(args.gpu) optimizer_decoder = optim.Adam(model_decoder.parameters(), lr=args.learning_rate, betas=(0.95, 0.99)) optimizer_decoder.zero_grad() l2loss = nn.MSELoss() #load data for i in range(1, 360002, 30000): train_data, valid_data = get_data(i) for e in range(1, args.epoch + 1): train_loss_value = 0 validation_loss_value = 0 for j in range(0, int(args.train_size/4), args.batch_size): optimizer_decoder.zero_grad() optimizer_decoder.zero_grad() image = Variable(torch.tensor(train_data[j: j + args.batch_size, :, :])).cuda(args.gpu) latent = model_encoder(image) img_recon = model_decoder(latent) img_recon = F.interpolate(img_recon, size=image.shape[2:], mode='bilinear', align_corners=True) loss = l2loss(img_recon, image) train_loss_value += loss.data.cpu().numpy() / args.batch_size loss.backward() optimizer_decoder.step() optimizer_encoder.step() print("data load: {:8d}".format(i)) print("epoch: {:8d}".format(e)) print("train_loss: {:08.6f}".format(train_loss_value / (args.train_size / args.batch_size))) for j in range(0,int(args.validation_size/4), args.batch_size): model_encoder.eval() model_decoder.eval() image = Variable(torch.tensor(valid_data[j: j + args.batch_size, :, :])).cuda(args.gpu) latent = model_encoder(image) img_recon = model_decoder(latent) img_1 = img_recon[0][0] img = image[0][0] img_recon = F.interpolate(img_recon, size=image.shape[2:], mode='bilinear', align_corners=True) save_image(img_1, args.image_dir + '/fake' + str(i) + "_" + str(j) + ".png") save_image(img, args.image_dir + '/real' + str(i) + "_" + str(j) + ".png") image = Variable(torch.tensor(train_data[j: j + args.batch_size, :, :, :])).cuda(args.gpu) loss = l2loss(img_recon, image) validation_loss_value += loss.data.cpu().numpy() / args.batch_size model_encoder.train() model_decoder.train() print("train_loss: {:08.6f}".format(validation_loss_value / (args.validation_size / args.batch_size))) torch.save({'encoder_state_dict': model_encoder.state_dict()}, osp.join(args.checkpoint_dir, 'AE_encoder.pth')) torch.save({'decoder_state_dict': model_decoder.state_dict()}, osp.join(args.checkpoint_dir, 'AE_decoder.pth'))
class Trainer(Trainer_Base): def create_model(self): self.encoder = Encoder(True).cuda() self.decoder = Decoder(True, True).cuda() self.D = VGG16_mid().cuda() self.attention1 = Transformer(4, 512, self.config.topk, True, False).cuda() def train(self): self.create_model() optimizer = torch.optim.Adam(self.attention1.parameters(), lr=self.config.learning_rate) optimizer2 = torch.optim.Adam(self.decoder.parameters(), lr=self.config.learning_rate) criterion = torch.nn.L1Loss() criterion_p = torch.nn.MSELoss(reduction='mean') styles = iter(self.style_loader) self.encoder.eval() self.decoder.train() self.reporter.writeInfo("Start to train the model") for e in range(1, self.config.epoch_size + 1): for i, (content, target) in enumerate(self.train_loader): try: style, target = next(styles) except: styles = iter(self.style_loader) style, target = next(styles) content = content.cuda() style = style.cuda() fea_c = self.encoder(content) fea_s = self.encoder(style) out_feature, attention_map = self.attention1(fea_c, fea_s) # out_feature, attention_map = self.attention2(out_feature, fea_s) rec, _ = self.attention1(fea_s, fea_s) out_content = self.decoder(out_feature) c1, c2, c3, _ = self.D(content) h1, h2, h3, _ = self.D(out_content) s1, s2, s3, _ = self.D(style) loss_content = torch.norm(c3 - h3, p=2) loss_perceptual = 0 for t in range(3): loss_perceptual += criterion( gram_matrix(eval('s' + str(t + 1))), gram_matrix(eval('h' + str(t + 1)))) loss = loss_content * self.config.content_weight + loss_perceptual * self.config.style_weight optimizer.zero_grad() optimizer2.zero_grad() loss.backward() optimizer.step() optimizer2.step() if i % self.config.log_interval == 0: now = datetime.datetime.now() otherStyleTime = now.strftime("%Y-%m-%d %H:%M:%S") print(otherStyleTime) print('epoch: ', e, ' iter: ', i) print( 'attention scartters: ', torch.std(attention_map.argmax(-1).float(), 1).mean().cpu()) print(attention_map.shape) # self.attention1.hard = True self.attention1.eval() self.decoder.eval() tosave, perc, cont = self.eval() save_image( denorm(tosave), self.image_dir + '/epoch_{}-iter_{}.png'.format(e, i)) print("image saved to " + self.image_dir + '/epoch_{}-iter_{}.png'.format(e, i)) print('content loss:', cont) print('perceptual loss:', perc) self.reporter.writeTrainLog( e, i, f''' attention scartters: {torch.std(attention_map.argmax(-1).float(), 1).mean().cpu()}\n content loss: {cont}\n perceptual loss: {perc} ''') # self.attention1.hard = False self.attention1.train() self.decoder.train() torch.save( { 'layer1': self.attention1.state_dict(), # 'layer2':self.attention2.state_dict(), 'decoder': self.decoder.state_dict() }, f'{self.model_state_dir}/epoch_{e}-iter_{i}.pth')
class Trainer(object): def __init__(self, config): self.train_loader = torch.utils.data.DataLoader( datasets.ImageFolder(config.train_data_dir, transforms.Compose([ transforms.RandomSizedCrop(config.image_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])), batch_size=config.batch_size, shuffle=True, num_workers=config.workers, pin_memory=True) os.makedirs(f'{config.save_dir}/{config.version}',exist_ok=True) self.loss_dir = f'{config.save_dir}/{config.version}/loss' self.model_state_dir = f'{config.save_dir}/{config.version}/model_state' self.image_dir = f'{config.save_dir}/{config.version}/image' self.psnr_dir = f'{config.save_dir}/{config.version}/psnr' os.makedirs(self.loss_dir,exist_ok=True) os.makedirs(self.model_state_dir,exist_ok=True) os.makedirs(self.image_dir,exist_ok=True) os.makedirs(self.psnr_dir,exist_ok=True) self.encoder = Encoder(True).cuda() self.decoder = Decoder(False, True).cuda() self.D = VGG16_mid().cuda() self.config = config def train(self): optimizer = torch.optim.Adam(itertools.chain(self.encoder.parameters(), self.decoder.parameters()), lr=self.config.learning_rate) criterion = torch.nn.L1Loss()#torch.nn.MSELoss() loss_list = [] psnr_list = [] self.encoder.train() self.decoder.train() for e in range(1, self.config.epoch_size+1): print(f'Start {e} epoch') psnr_list = [] # for i, (content, target) in tqdm(enumerate(self.train_loader, 1)): for i, (content, target) in enumerate(self.train_loader): content = content.cuda() content_feature = self.encoder(content) out_content = self.decoder(content_feature) loss = criterion(content, out_content) c1,c2,c3,_ = self.D(content) h1,h2,h3,_ = self.D(out_content) b,c,w,h = c3.shape loss_content = torch.norm(c3-h3,p=2)/c/w/h loss_perceptual = 0 for t in range(3): loss_perceptual += criterion( gram_matrix(eval('c'+str(t+1))), gram_matrix(eval('h'+str(t+1))) ) loss = loss + loss_content + loss_perceptual*10000 loss_list.append(loss.item()) optimizer.zero_grad() loss.backward() optimizer.step() with torch.no_grad(): if i%self.config.log_interval == 0: print(loss.item()) print(loss_content.item()) print(loss_perceptual.item()*10000) psnr = PSNR2(denorm(content).cpu().numpy(),denorm(out_content).cpu().numpy()) psnr_list.append(psnr) print('psnr:',psnr) ori = torch.cat(list(denorm(content)), 2) out = torch.cat(list(denorm(out_content)), 2) save_image(torch.cat([ori,out], 1), self.image_dir+'/epoch_{}.png'.format(e)) print("image saved to " + self.image_dir + '/epoch_{}.png'.format(e)) torch.save(self.decoder.state_dict(), f'{self.model_state_dir}/{e}_epoch.pth') filename = self.psnr_dir+'/e'+ str(e) + '.pkl' joblib.dump(psnr_list,filename) self.plot_loss_curve(loss_list) def plot_loss_curve(self, loss_list): plt.plot(range(len(loss_list)), loss_list) plt.xlabel('iteration') plt.ylabel('loss') plt.title('train loss') plt.savefig(f'{self.loss_dir}/train_loss.png') with open(f'{self.loss_dir}/loss_log.txt', 'w') as f: for l in loss_list: f.write(f'{l}\n') print(f'Loss saved in {self.loss_dir}')
class Instructor: def __init__(self, model_name: str, args): self.model_name = model_name self.args = args self.encoder = Encoder(self.args.add_noise).to(self.args.device) self.decoder = Decoder(self.args.upsample_mode).to(self.args.device) self.pretrainDataset = None self.pretrainDataloader = None self.pretrainOptimizer = None self.pretrainScheduler = None self.RHO_tensor = None self.pretrain_batch_cnt = 0 self.writer = None self.svmDataset = None self.svmDataloader = None self.testDataset = None self.testDataloader = None self.svm = SVC(C=self.args.svm_c, kernel=self.args.svm_ker, verbose=True, max_iter=self.args.svm_max_iter) self.resnet = Resnet(use_pretrained=True, num_classes=self.args.classes, resnet_depth=self.args.resnet_depth, dropout=self.args.resnet_dropout).to( self.args.device) self.resnetOptimizer = None self.resnetScheduler = None self.resnetLossFn = None def _load_data_by_label(self, label: str) -> list: ret = [] LABEL_PATH = os.path.join(self.args.TRAIN_PATH, label) for dir_path, _, file_list in os.walk(LABEL_PATH, topdown=False): for file_name in file_list: file_path = os.path.join(dir_path, file_name) img_np = imread(file_path) img = img_np.copy() img = img.tolist() ret.append(img) return ret def _load_all_data(self): all_data = [] all_labels = [] for label_id in range(0, self.args.classes): expression = LabelEnum(label_id) sub_data = self._load_data_by_label(expression.name) sub_labels = [label_id] * len(sub_data) all_data.extend(sub_data) all_labels.extend(sub_labels) return all_data, all_labels def _load_test_data(self): file_map = pd.read_csv( os.path.join(self.args.RAW_PATH, 'submission.csv')) test_data = [] img_names = [] for file_name in file_map['file_name']: file_path = os.path.join(self.args.TEST_PATH, file_name) img_np = imread(file_path) img = img_np.copy() img = img.tolist() test_data.append(img) img_names.append(file_name) return test_data, img_names def trainAutoEncoder(self): self.writer = SummaryWriter( os.path.join(self.args.LOG_PATH, self.model_name)) all_data, all_labels = self._load_all_data() self.pretrainDataset = FERDataset(all_data, labels=all_labels, args=self.args) self.pretrainDataloader = DataLoader(dataset=self.pretrainDataset, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.num_workers) self.pretrainOptimizer = torch.optim.Adam([{ 'params': self.encoder.parameters(), 'lr': self.args.pretrain_lr }, { 'params': self.decoder.parameters(), 'lr': self.args.pretrain_lr }]) tot_steps = math.ceil( len(self.pretrainDataloader) / self.args.cumul_batch) * self.args.epochs self.pretrainScheduler = get_linear_schedule_with_warmup( self.pretrainOptimizer, num_warmup_steps=0, num_training_steps=tot_steps) self.RHO_tensor = torch.tensor( [self.args.rho for _ in range(self.args.embed_dim)], dtype=torch.float).unsqueeze(0).to(self.args.device) epochs = self.args.epochs for epoch in range(1, epochs + 1): print() print( "================ AutoEncoder Training Epoch {:}/{:} ================" .format(epoch, epochs)) print(" ---- Start training ------>") self.epochTrainAutoEncoder(epoch) print() self.writer.close() def epochTrainAutoEncoder(self, epoch): self.encoder.train() self.decoder.train() cumul_loss = 0 cumul_steps = 0 cumul_samples = 0 self.pretrainOptimizer.zero_grad() cumulative_batch = 0 for idx, (images, labels) in enumerate(tqdm(self.pretrainDataloader)): batch_size = images.shape[0] images, labels = images.to(self.args.device), labels.to( self.args.device) embeds = self.encoder(images) outputs = self.decoder(embeds) loss = torch.nn.functional.mse_loss(outputs, images) if self.args.use_sparse: rho_hat = torch.mean(embeds, dim=0, keepdim=True) sparse_penalty = self.args.regulizer_weight * torch.nn.functional.kl_div( input=torch.nn.functional.log_softmax(rho_hat, dim=-1), target=torch.nn.functional.softmax(self.RHO_tensor, dim=-1)) loss = loss + sparse_penalty loss_each = loss / self.args.cumul_batch loss_each.backward() cumulative_batch += 1 cumul_steps += 1 cumul_loss += loss.detach().cpu().item() * batch_size cumul_samples += batch_size if cumulative_batch >= self.args.cumul_batch: torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), max_norm=self.args.max_norm) torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), max_norm=self.args.max_norm) self.pretrainOptimizer.step() self.pretrainScheduler.step() self.pretrainOptimizer.zero_grad() cumulative_batch = 0 if cumul_steps >= self.args.disp_period or idx + 1 == len( self.pretrainDataloader): print(" -> cumul_steps={:} loss={:}".format( cumul_steps, cumul_loss / cumul_samples)) self.pretrain_batch_cnt += 1 self.writer.add_scalar('batch-loss', cumul_loss / cumul_samples, global_step=self.pretrain_batch_cnt) self.writer.add_scalar('encoder_lr', self.pretrainOptimizer.state_dict() ['param_groups'][0]['lr'], global_step=self.pretrain_batch_cnt) self.writer.add_scalar('decoder_lr', self.pretrainOptimizer.state_dict() ['param_groups'][1]['lr'], global_step=self.pretrain_batch_cnt) cumul_steps = 0 cumul_loss = 0 cumul_samples = 0 self.saveAutoEncoder(epoch) def saveAutoEncoder(self, epoch): encoderPath = os.path.join( self.args.CKPT_PATH, self.model_name + "--Encoder" + "--EPOCH-{:}".format(epoch)) decoderPath = os.path.join( self.args.CKPT_PATH, self.model_name + "--Decoder" + "--EPOCH-{:}".format(epoch)) print("-----------------------------------------------") print(" -> Saving AutoEncoder {:} ......".format(self.model_name)) torch.save(self.encoder.state_dict(), encoderPath) torch.save(self.decoder.state_dict(), decoderPath) print(" -> Successfully saved AutoEncoder.") print("-----------------------------------------------") def generateAutoEncoderTestResultSamples(self, sample_cnt): self.encoder.eval() self.decoder.eval() print(' -> Generating samples with AutoEncoder ...') save_path = os.path.join(self.args.SAMPLE_PATH, self.model_name) if not os.path.exists(save_path): os.mkdir(save_path) with torch.no_grad(): for dir_path, _, file_list in os.walk(self.args.TEST_PATH, topdown=False): sample_file_list = random.choices(file_list, k=sample_cnt) for file_name in sample_file_list: file_path = os.path.join(dir_path, file_name) img_np = imread(file_path) img = img_np.copy() img = ToTensor()(img) img = img.reshape(1, 1, 48, 48) img = img.to(self.args.device) embed = self.encoder(img) out = self.decoder(embed).cpu() out = out.reshape(1, 48, 48) out_img = ToPILImage()(out) out_img.save(os.path.join(save_path, file_name)) print(' -> Done sampling from AutoEncoder with test pictures.') def loadAutoEncoder(self, epoch): encoderPath = os.path.join( self.args.CKPT_PATH, self.model_name + "--Encoder" + "--EPOCH-{:}".format(epoch)) decoderPath = os.path.join( self.args.CKPT_PATH, self.model_name + "--Decoder" + "--EPOCH-{:}".format(epoch)) print("-----------------------------------------------") print(" -> Loading AutoEncoder {:} ......".format(self.model_name)) self.encoder.load_state_dict(torch.load(encoderPath)) self.decoder.load_state_dict(torch.load(decoderPath)) print(" -> Successfully loaded AutoEncoder.") print("-----------------------------------------------") def generateExtractedFeatures( self, data: torch.FloatTensor) -> torch.FloatTensor: """ :param data: (batch, channel, l, w) :return: embed: (batch, embed_dim) """ with torch.no_grad(): data = data.to(self.args.device) embed = self.encoder(data) embed = embed.detach().cpu() return embed def trainSVM(self, load: bool): svm_path = os.path.join(self.args.CKPT_PATH, self.model_name + '--svm') self.loadAutoEncoder(self.args.epochs) self.encoder.eval() self.decoder.eval() if load: print(' -> Loaded from SVM trained model.') self.svm = joblib.load(svm_path) return print() print("================ SVM Training Starting ================") all_data, all_labels = self._load_all_data() all_length = len(all_data) self.svmDataset = FERDataset(all_data, labels=all_labels, use_da=False, args=self.args) self.svmDataloader = DataLoader(dataset=self.svmDataset, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.num_workers) print(" -> Converting to extracted features ...") cnt = 0 all_embeds = [] all_labels = [] for images, labels in self.svmDataloader: cnt += 1 embeds = self.generateExtractedFeatures(images) all_embeds.extend(embeds.tolist()) all_labels.extend(labels.reshape(-1).tolist()) print(' -> Start SVM fit ...') self.svm.fit(X=all_embeds, y=all_labels) # self.svm.fit(X=all_embeds[0:3], y=[0, 1, 2]) joblib.dump(self.svm, svm_path) print(" -> Done training for SVM.") def genTestResult(self, from_svm=True): print() print('-------------------------------------------------------') print(' -> Generating test result for {:} ...'.format( 'SVM' if from_svm else 'Resnet')) test_data, img_names = self._load_test_data() test_length = len(test_data) self.testDataset = FERDataset(test_data, filenames=img_names, use_da=False, args=self.args) self.testDataloader = DataLoader(dataset=self.testDataset, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.num_workers) str_preds = [] for images, filenames in self.testDataloader: if from_svm: embeds = self.generateExtractedFeatures(images) preds = self.svm.predict(X=embeds) else: self.resnet.eval() outs = self.resnet( images.repeat(1, 3, 1, 1).to(self.args.device)) preds = outs.max(-1)[1].cpu().tolist() str_preds.extend([LabelEnum(pred).name for pred in preds]) # generate submission assert len(str_preds) == len(img_names) submission = pd.DataFrame({'file_name': img_names, 'class': str_preds}) submission.to_csv(os.path.join(self.args.DATA_PATH, 'submission.csv'), index=False, index_label=False) print(' -> Done generation of submission.csv with model {:}'.format( self.model_name)) def epochTrainResnet(self, epoch): self.resnet.train() cumul_loss = 0 cumul_acc = 0 cumul_steps = 0 cumul_samples = 0 cumulative_batch = 0 self.resnetOptimizer.zero_grad() for idx, (images, labels) in enumerate(tqdm(self.pretrainDataloader)): batch_size = images.shape[0] images, labels = images.to(self.args.device), labels.to( self.args.device) images += torch.randn(images.shape).to( images.device) * self.args.add_noise images = images.repeat(1, 3, 1, 1) outs = self.resnet(images) preds = outs.max(-1)[1].unsqueeze(dim=1) cur_acc = (preds == labels).type(torch.int).sum().item() loss = self.resnetLossFn(outs, labels.squeeze(dim=1)) loss_each = loss / self.args.cumul_batch loss_each.backward() cumulative_batch += 1 cumul_steps += 1 cumul_loss += loss.detach().cpu().item() * batch_size cumul_acc += cur_acc cumul_samples += batch_size if cumulative_batch >= self.args.cumul_batch: torch.nn.utils.clip_grad_norm_(self.resnet.parameters(), max_norm=self.args.max_norm) self.resnetOptimizer.step() self.resnetScheduler.step() self.resnetOptimizer.zero_grad() cumulative_batch = 0 if cumul_steps >= self.args.disp_period or idx + 1 == len( self.pretrainDataloader): print(" -> cumul_steps={:} loss={:} acc={:}".format( cumul_steps, cumul_loss / cumul_samples, cumul_acc / cumul_samples)) self.pretrain_batch_cnt += 1 self.writer.add_scalar('batch-loss', cumul_loss / cumul_samples, global_step=self.pretrain_batch_cnt) self.writer.add_scalar('batch-acc', cumul_acc / cumul_samples, global_step=self.pretrain_batch_cnt) self.writer.add_scalar( 'resnet_lr', self.resnetOptimizer.state_dict()['param_groups'][0]['lr'], global_step=self.pretrain_batch_cnt) cumul_steps = 0 cumul_loss = 0 cumul_acc = 0 cumul_samples = 0 if epoch % 10 == 0: self.saveResnet(epoch) def saveResnet(self, epoch): resnetPath = os.path.join( self.args.CKPT_PATH, self.model_name + "--Resnet" + "--EPOCH-{:}".format(epoch)) print("-----------------------------------------------") print(" -> Saving Resnet {:} ......".format(self.model_name)) torch.save(self.resnet.state_dict(), resnetPath) print(" -> Successfully saved Resnet.") print("-----------------------------------------------") def loadResnet(self, epoch): resnetPath = os.path.join( self.args.CKPT_PATH, self.model_name + "--Resnet" + "--EPOCH-{:}".format(epoch)) print("-----------------------------------------------") print(" -> Loading Resnet {:} ......".format(self.model_name)) self.resnet.load_state_dict(torch.load(resnetPath)) print(" -> Successfully loaded Resnet.") print("-----------------------------------------------") def trainResnet(self): self.writer = SummaryWriter( os.path.join(self.args.LOG_PATH, self.model_name)) all_data, all_labels = self._load_all_data() self.pretrainDataset = FERDataset(all_data, labels=all_labels, args=self.args) self.pretrainDataloader = DataLoader(dataset=self.pretrainDataset, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.num_workers) self.resnetOptimizer = self.getResnetOptimizer() tot_steps = math.ceil( len(self.pretrainDataloader) / self.args.cumul_batch) * self.args.epochs self.resnetScheduler = get_linear_schedule_with_warmup( self.resnetOptimizer, num_warmup_steps=tot_steps * self.args.warmup_rate, num_training_steps=tot_steps) self.resnetLossFn = torch.nn.CrossEntropyLoss( weight=torch.tensor([ 9.40661861, 1.00104606, 0.56843877, 0.84912748, 1.02660468, 1.29337298, 0.82603942, ], dtype=torch.float, device=self.args.device)) epochs = self.args.epochs for epoch in range(1, epochs + 1): print() print( "================ Resnet Training Epoch {:}/{:} ================" .format(epoch, epochs)) print(" ---- Start training ------>") self.epochTrainResnet(epoch) print() self.writer.close() def getResnetOptimizer(self): if self.args.resnet_optim == 'SGD': return torch.optim.SGD([{ 'params': self.resnet.baseParameters(), 'lr': self.args.resnet_base_lr, 'weight_decay': self.args.weight_decay, 'momentum': self.args.resnet_momentum }, { 'params': self.resnet.finetuneParameters(), 'lr': self.args.resnet_ft_lr, 'weight_decay': self.args.weight_decay, 'momentum': self.args.resnet_momentum }], lr=self.args.resnet_base_lr) elif self.args.resnet_optim == 'Adam': return torch.optim.Adam([{ 'params': self.resnet.baseParameters(), 'lr': self.args.resnet_base_lr }, { 'params': self.resnet.finetuneParameters(), 'lr': self.args.resnet_ft_lr, 'weight_decay': self.args.weight_decay }])
class NeoOriginal: def __init__( # TODO move parameters to config file self, pset, batch_size=64, max_size=100, vocab_inp_size=32, vocab_tar_size=32, embedding_dim=64, units=128, hidden_size=128, alpha=0.1, epochs=200, epoch_decay=1, min_epochs=10, verbose=True): self.alpha = alpha self.batch_size = batch_size self.max_size = max_size self.epochs = epochs self.epoch_decay = epoch_decay self.min_epochs = min_epochs self.train_steps = 0 self.verbose = verbose self.enc = Encoder(vocab_inp_size, embedding_dim, units, batch_size) self.dec = Decoder(vocab_inp_size, vocab_tar_size, embedding_dim, units, batch_size) self.surrogate = Surrogate(hidden_size) self.population = Population(pset, max_size, batch_size) self.prob = 0.5 self.optimizer = tf.keras.optimizers.Adam() self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False, reduction='none') def save_models(self): self.enc.save_weights("model/weights/encoder/enc_{}".format( self.train_steps), save_format="tf") self.dec.save_weights("model/weights/decoder/dec_{}".format( self.train_steps), save_format="tf") self.surrogate.save_weights( "model/weights/surrogate/surrogate_{}".format(self.train_steps), save_format="tf") def load_models(self, train_steps): self.enc.load_weights( "model/weights/encoder/enc_{}".format(train_steps)) self.dec.load_weights( "model/weights/decoder/dec_{}".format(train_steps)) self.surrogate.load_weights( "model/weights/surrogate/surrogate_{}".format(train_steps)) @tf.function def train_step(self, inp, targ, targ_surrogate, enc_hidden, enc_cell): autoencoder_loss = 0 with tf.GradientTape(persistent=True) as tape: enc_output, enc_hidden, enc_cell = self.enc( inp, [enc_hidden, enc_cell]) surrogate_output = self.surrogate(enc_hidden) surrogate_loss = self.surrogate_loss_function( targ_surrogate, surrogate_output) dec_hidden = enc_hidden dec_cell = enc_cell context = tf.zeros(shape=[len(dec_hidden), 1, dec_hidden.shape[1]]) dec_input = tf.expand_dims([1] * len(inp), 1) for t in range(1, self.max_size): initial_state = [dec_hidden, dec_cell] predictions, context, [dec_hidden, dec_cell ], _ = self.dec(dec_input, context, enc_output, initial_state) autoencoder_loss += self.autoencoder_loss_function( targ[:, t], predictions) # Probabilistic teacher forcing # (feeding the target as the next input) if tf.random.uniform(shape=[], maxval=1, dtype=tf.float32) > self.prob: dec_input = tf.expand_dims(targ[:, t], 1) else: pred_token = tf.argmax(predictions, axis=1, output_type=tf.dtypes.int32) dec_input = tf.expand_dims(pred_token, 1) loss = autoencoder_loss + self.alpha * surrogate_loss ae_loss_per_token = autoencoder_loss / int(targ.shape[1]) batch_loss = ae_loss_per_token + self.alpha * surrogate_loss batch_ae_loss = (autoencoder_loss / int(targ.shape[1])) batch_surrogate_loss = surrogate_loss gradients, variables = self.backward(loss, tape) self.optimize(gradients, variables) return batch_loss, batch_ae_loss, batch_surrogate_loss def backward(self, loss, tape): variables = \ self.enc.trainable_variables + self.dec.trainable_variables \ + self.surrogate.trainable_variables gradients = tape.gradient(loss, variables) return gradients, variables def optimize(self, gradients, variables): self.optimizer.apply_gradients(zip(gradients, variables)) def surrogate_breed(self, output, latent, tape): gradients = tape.gradient(output, latent) return gradients def update_latent(self, latent, gradients, eta): latent += eta * gradients return latent def autoencoder_loss_function(self, real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = self.loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_mean(loss_) def surrogate_loss_function(self, real, pred): loss_ = tf.keras.losses.mean_squared_error(real, pred) return tf.reduce_mean(loss_) def __train(self): for epoch in range(self.epochs): self.epoch = epoch start = time.time() total_loss = 0 total_ae_loss = 0 total_surrogate_loss = 0 data_generator = self.population() for (batch, (inp, targ, targ_surrogate)) in enumerate(data_generator): enc_hidden = self.enc.initialize_hidden_state( batch_sz=len(inp)) enc_cell = self.enc.initialize_cell_state(batch_sz=len(inp)) batch_loss, batch_ae_loss, batch_surr_loss = self.train_step( inp, targ, targ_surrogate, enc_hidden, enc_cell) total_loss += batch_loss total_ae_loss += batch_ae_loss total_surrogate_loss += batch_surr_loss if False and self.verbose: print(f'Epoch {epoch + 1} Batch {batch} ' f'Loss {batch_loss.numpy():.4f}') if self.verbose and ((epoch + 1) % 10 == 0 or epoch == 0): epoch_loss = total_loss / self.population.steps_per_epoch ae_loss = total_ae_loss / self.population.steps_per_epoch surrogate_loss = \ total_surrogate_loss / self.population.steps_per_epoch epoch_time = time.time() - start print(f'Epoch {epoch + 1} Loss {epoch_loss:.6f} AE_loss ' f'{ae_loss:.6f} Surrogate_loss ' f'{surrogate_loss:.6f} Time: {epoch_time:.3f}') # decrease number of epochs, but don't go below self.min_epochs self.epochs = max(self.epochs - self.epoch_decay, self.min_epochs) def _gen_children(self, candidates, enc_output, enc_hidden, enc_cell, max_eta=1000): children = [] eta = 0 enc_mask = enc_output._keras_mask last_copy_ind = len(candidates) while eta < max_eta: eta += 1 start = time.time() new_children = self._gen_decoded(eta, enc_output, enc_hidden, enc_cell, enc_mask).numpy() new_children = self.cut_seq(new_children, end_token=2) new_ind, copy_ind = self.find_new(new_children, candidates) if len(copy_ind) < last_copy_ind: last_copy_ind = len(copy_ind) print("Eta {} Not-changed {} Time: {:.3f}".format( eta, len(copy_ind), time.time() - start)) for i in new_ind: children.append(new_children[i]) if len(copy_ind) < 1: break enc_output = tf.gather(enc_output, copy_ind) enc_mask = tf.gather(enc_mask, copy_ind) enc_hidden = tf.gather(enc_hidden, copy_ind) enc_cell = tf.gather(enc_cell, copy_ind) candidates = tf.gather(candidates, copy_ind) if eta == max_eta: print("Maximal value of eta reached - breed stopped") for i in copy_ind: children.append(new_children[i]) return children def _gen_decoded(self, eta, enc_output, enc_hidden, enc_cell, enc_mask): with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape: tape.watch(enc_hidden) surrogate_output = self.surrogate(enc_hidden) gradients = self.surrogate_breed(surrogate_output, enc_hidden, tape) dec_hidden = self.update_latent(enc_hidden, gradients, eta=eta) dec_cell = enc_cell context = tf.zeros(shape=[len(dec_hidden), 1, dec_hidden.shape[1]]) dec_input = tf.expand_dims([1] * len(enc_hidden), 1) child = dec_input for _ in range(1, self.max_size - 1): initial_state = [dec_hidden, dec_cell] predictions, context, [dec_hidden, dec_cell ], _ = self.dec(dec_input, context, enc_output, initial_state, enc_mask) dec_input = tf.expand_dims( tf.argmax(predictions, axis=1, output_type=tf.dtypes.int32), 1) child = tf.concat([child, dec_input], axis=1) stop_tokens = tf.expand_dims([2] * len(enc_hidden), 1) child = tf.concat([child, stop_tokens], axis=1) return child def cut_seq(self, seq, end_token=2): ind = (seq == end_token).argmax(1) res = [] tree_max = [] for d, i in zip(seq, ind): repaired_tree = create_expression_tree(d[:i + 1][1:-1]) repaired_seq = [i.data for i in repaired_tree.preorder() ][-(self.max_size - 2):] tree_max.append(len(repaired_seq) == self.max_size - 2) repaired_seq = [1] + repaired_seq + [2] res.append(np.pad(repaired_seq, (0, self.max_size - i - 1))) return res def find_new(self, seq, candidates): new_ind = [] copy_ind = [] n = False cp = False for i, (s, c) in enumerate(zip(seq, candidates)): if not np.array_equal(s, c): if not n: n = True new_ind.append(i) else: if not cp: cp = True copy_ind.append(i) return new_ind, copy_ind def _gen_latent(self, candidates): enc_hidden = self.enc.initialize_hidden_state(batch_sz=len(candidates)) enc_cell = self.enc.initialize_cell_state(batch_sz=len(candidates)) enc_output, enc_hidden, enc_cell = self.enc(candidates, [enc_hidden, enc_cell]) return enc_output, enc_hidden, enc_cell def update(self): print("Training") self.enc.train() self.dec.train() self.__train() self.save_models() self.train_steps += 1 def breed(self): print("Breed") self.dec.eval() data_generator = self.population( batch_size=len(self.population.samples)) tokenized_pop = [] for (batch, (inp, _, _)) in enumerate(data_generator): enc_output, enc_hidden, enc_cell = self._gen_latent(inp) tokenized_pop += (self._gen_children(inp, enc_output, enc_hidden, enc_cell)) pop_expressions = [ self.population.tokenizer.reproduce_expression(tp) for tp in tokenized_pop ] offspring = [deap.creator.Individual(pe) for pe in pop_expressions] return offspring