def __init__(self, model_name: str, args): self.model_name = model_name self.args = args self.encoder = Encoder(self.args.add_noise).to(self.args.device) self.decoder = Decoder(self.args.upsample_mode).to(self.args.device) self.pretrainDataset = None self.pretrainDataloader = None self.pretrainOptimizer = None self.pretrainScheduler = None self.RHO_tensor = None self.pretrain_batch_cnt = 0 self.writer = None self.svmDataset = None self.svmDataloader = None self.testDataset = None self.testDataloader = None self.svm = SVC(C=self.args.svm_c, kernel=self.args.svm_ker, verbose=True, max_iter=self.args.svm_max_iter) self.resnet = Resnet(use_pretrained=True, num_classes=self.args.classes, resnet_depth=self.args.resnet_depth, dropout=self.args.resnet_dropout).to( self.args.device) self.resnetOptimizer = None self.resnetScheduler = None self.resnetLossFn = None
def __init__( # TODO move parameters to config file self, pset, batch_size=64, max_size=100, vocab_inp_size=32, vocab_tar_size=32, embedding_dim=64, units=128, hidden_size=128, alpha=0.1, epochs=200, epoch_decay=1, min_epochs=10, verbose=True): self.alpha = alpha self.batch_size = batch_size self.max_size = max_size self.epochs = epochs self.epoch_decay = epoch_decay self.min_epochs = min_epochs self.train_steps = 0 self.verbose = verbose self.enc = Encoder(vocab_inp_size, embedding_dim, units, batch_size) self.dec = Decoder(vocab_inp_size, vocab_tar_size, embedding_dim, units, batch_size) self.surrogate = Surrogate(hidden_size) self.population = Population(pset, max_size, batch_size) self.prob = 0.5 self.optimizer = tf.keras.optimizers.Adam() self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False, reduction='none')
def __init__(self, config): super(Model, self).__init__() self.config = config # 定义嵌入层 self.embedding = Embedding(config.num_vocab, # 词汇表大小 config.embedding_size, # 嵌入层维度 config.pad_id, # pad_id config.dropout) # post编码器 self.post_encoder = Encoder(config.post_encoder_cell_type, # rnn类型 config.embedding_size, # 输入维度 config.post_encoder_output_size, # 输出维度 config.post_encoder_num_layers, # rnn层数 config.post_encoder_bidirectional, # 是否双向 config.dropout) # dropout概率 # response编码器 self.response_encoder = Encoder(config.response_encoder_cell_type, config.embedding_size, # 输入维度 config.response_encoder_output_size, # 输出维度 config.response_encoder_num_layers, # rnn层数 config.response_encoder_bidirectional, # 是否双向 config.dropout) # dropout概率 # 先验网络 self.prior_net = PriorNet(config.post_encoder_output_size, # post输入维度 config.latent_size, # 潜变量维度 config.dims_prior) # 隐藏层维度 # 识别网络 self.recognize_net = RecognizeNet(config.post_encoder_output_size, # post输入维度 config.response_encoder_output_size, # response输入维度 config.latent_size, # 潜变量维度 config.dims_recognize) # 隐藏层维度 # 初始化解码器状态 self.prepare_state = PrepareState(config.post_encoder_output_size+config.latent_size, config.decoder_cell_type, config.decoder_output_size, config.decoder_num_layers) # 解码器 self.decoder = Decoder(config.decoder_cell_type, # rnn类型 config.embedding_size, # 输入维度 config.decoder_output_size, # 输出维度 config.decoder_num_layers, # rnn层数 config.dropout) # dropout概率 # 输出层 self.projector = nn.Sequential( nn.Linear(config.decoder_output_size, config.num_vocab), nn.Softmax(-1) )
def __init__(self, opt, vocabs): super(S2SModel, self).__init__() self.opt = opt self.vocabs = vocabs self.encoder = Encoder(vocabs, opt) self.decoder = Decoder(vocabs, opt) self.generator = ProdGenerator(self.opt.decoder_rnn_size, vocabs, self.opt)
def __init__(self, args, vocab, n_dim, image_dim, layers, dropout, num_choice=5): super().__init__() print("Model name: DA, 1 layer, fixed subspaces") self.vocab = vocab self.encoder = Encoder(args, vocab, n_dim, image_dim, layers, dropout, num_choice).cuda() self.decoder = Decoder(args, vocab, n_dim, image_dim, layers, dropout, num_choice).cuda()
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size, target_vocab_size, dropout=0.1): super(Transformer, self).__init__() self.encoder = Encoder(num_layers, d_model, num_heads, dff, input_vocab_size, dropout) self.decoder = Decoder(num_layers, d_model, num_heads, dff, target_vocab_size, dropout) self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def __init__(self, num_encoder_layers: int = 6, num_decoder_layers: int = 6, dim_embedding: int = 512, num_heads: int = 6, dim_feedfordward: int = 512, dropout: float = 0.1, activation: nn.Module = nn.ReLU()): super().__init__() self.encoder = Encoder(num_layers=num_encoder_layers, dim_embedding=dim_embedding, num_heads=num_heads, dim_feedfordward=dim_feedfordward, dropout=dropout) self.decoder = Decoder(num_layers=num_decoder_layers, dim_embedding=dim_embedding, num_heads=num_heads, dim_feedfordward=dim_feedfordward, dropout=dropout) self.criterion = nn.CrossEntropyLoss()
def __init__(self): super(Model, self).__init__() self.encoder = Encoder() self.decoder = Decoder() self.embeds = nn.Embedding(config.vocab_size, config.emb_dim) init_wt.init_wt_normal(self.embeds.weight) self.encoder = get_cuda(self.encoder) self.decoder = get_cuda(self.decoder) self.embeds = get_cuda(self.embeds) # if __name__ == '__main__': # # my_model = Model() # my_model_paramters = my_model.parameters() # # print(my_model_paramters) # my_model_paramters_group = list(my_model_paramters) # print(my_model_paramters_group)
def __init__(self, args, vocab, n_dim, image_dim, layers, dropout, num_choice=5): super().__init__() print("Model name: DA") self.vocab = vocab self.encoder = Encoder(args, vocab, n_dim, image_dim, layers, dropout, num_choice).cuda() #self.encoder = TransformerEncoder(args, vocab, n_dim, image_dim, layers, dropout, num_choice).cuda() #self.encoder = DAEncoder(args, vocab, n_dim, image_dim, layers, dropout, num_choice).cuda() #self.encoder = MHEncoder(args, vocab, n_dim, image_dim, layers, dropout, num_choice).cuda() ##self.encoder = HierarchicalDA(args, vocab, n_dim, image_dim, layers, dropout, num_choice).cuda() #self.decoder = Disc(args, vocab, n_dim, image_dim, layers, dropout, num_choice) #self.decoder = SumDisc(args, vocab, n_dim, image_dim, layers, dropout, num_choice) self.decoder = Decoder(args, vocab, n_dim, image_dim, layers, dropout, num_choice).cuda()
def make_model(src_vocab, tar_vocab, N=6, d_model=512, d_ff=2014, h=8, dropout=0.1): c = copy.deepcopy attn = MultiHeadedAttention(h, d_model) ff = PositionwiseFeedForward(d_model, d_ff, dropout) position = PositionalEncoding(d_model, dropout) model = GeneralEncoderDecoder( Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N), nn.Sequential(Embedding(d_model, src_vocab), c(position)), nn.Sequential(Embedding(d_model, tar_vocab), c(position)), Generator(d_model, tar_vocab)) # 随机初始化参数,这非常重要 for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model
transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=256, shuffle=True, num_workers=2) testset = torchvision.datasets.ImageFolder(root='./data/Test', transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=200, shuffle=False, num_workers=2) # Mode print('==> Building model..') encoder = Encoder(mask=mask) decoder = Decoder(mask=mask) classifier = Classifier() encoder = encoder.to(device) decoder = decoder.to(device) classifier = classifier.to(device) if device == 'cuda': cudnn.benchmark = True if args.resume: # Load checkpoint. print('==> Resuming from checkpoint..') assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' checkpoint = torch.load('./checkpoint/ckpt_' + codir + '.t7') encoder.load_state_dict(checkpoint['encoder']) decoder.load_state_dict(checkpoint['decoder']) classifier.load_state_dict(checkpoint['classifier'])
def main(): # parameters learning_rate = 0.01 num_epochs = 50 batch_size = 250 feature_size = 2048 test_index = 2 # create the save log file print("Create the directory") if not os.path.exists("./save"): os.makedirs("./save") if not os.path.exists("./logfile"): os.makedirs("./logfile") if not os.path.exists("./logfile/MTL"): os.makedirs("./logfile/MTL") # load my Dataset type = ["infograph", "quickdraw", "sketch", "real", "test"] print("training set : %s ,%s, %s" % (type[0], type[1], type[3])) print("testing set : %s" % (type[2])) inf_train_dataset = Dataset.Dataset(mode="train", type=type[0]) inf_train_loader = DataLoader(inf_train_dataset, batch_size=batch_size, shuffle=True, num_workers=1) qdr_train_dataset = Dataset.Dataset(mode="train", type=type[1]) qdr_train_loader = DataLoader(qdr_train_dataset, batch_size=batch_size, shuffle=True, num_workers=1) skt_train_dataset = Dataset.Dataset(mode="train", type=type[2]) skt_train_loader = DataLoader(skt_train_dataset, batch_size=batch_size, shuffle=True, num_workers=1) rel_train_dataset = Dataset.Dataset(mode="train", type=type[3]) rel_train_loader = DataLoader(rel_train_dataset, batch_size=batch_size, shuffle=True, num_workers=1) test_dataset = Dataset.Dataset(mode="test", type=type[0]) test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=1) print('the source dataset has %d size.' % (len(inf_train_dataset))) print('the target dataset has %d size.' % (len(test_dataset))) print('the batch_size is %d' % (batch_size)) # Pre-train models encoder = Encoder() classifier = Classifier(feature_size) domain_classifier = Domain_classifier(feature_size, number_of_domain) # GPU enable use_cuda = torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") print('Device used:', device) if torch.cuda.is_available(): encoder = encoder.to(device) domain_classifier = domain_classifier.to(device) classifier = classifier.to(device) # setup optimizer optimizer_encoder = optim.Adam(encoder.parameters(), weight_decay=1e-4, lr=learning_rate) optimizer_domain_classifier = optim.Adam(domain_classifier.parameters(), weight_decay=1e-4, lr=learning_rate) optimizer_classifier = optim.Adam(classifier.parameters(), weight_decay=1e-4, lr=learning_rate) print("Starting training...") for epoch in range(num_epochs): print("Epoch:", epoch + 1) train_loader = [ inf_train_loader, qdr_train_loader, skt_train_loader, rel_train_loader ] domain_labels = torch.LongTensor([[0 for i in range(batch_size)], [1 for i in range(batch_size)], [2 for i in range(batch_size)], [3 for i in range(batch_size)]]) mtl_criterion = nn.CrossEntropyLoss() moe_criterion = nn.CrossEntropyLoss() encoder.train() domain_classifier.train() classifier.train() epoch_D_loss = 0.0 epoch_C_loss = 0.0 sum_trg_acc = 0.0 sum_label_acc = 0.0 sum_test_acc = 0.0 for index, (inf, qdr, skt, rel, test) in enumerate( zip(train_loader[0], train_loader[1], train_loader[2], train_loader[3], test_loader)): optimizer_encoder.zero_grad() optimizer_classifier.zero_grad() optimizer_domain_classifier.zero_grad() # colculate the lambda_ p = (index + len(train_loader[0]) * epoch) / ( len(train_loader[0]) * num_epochs) lambda_ = 2.0 / (1. + np.exp(-10 * p)) - 1.0 s1_imgs, s1_labels = inf s2_imgs, s2_labels = qdr s3_imgs, s3_labels = rel t1_imgs, _ = skt s1_imgs = Variable(s1_imgs).to(device) s1_labels = Variable(s1_labels).to(device) s2_imgs = Variable(s2_imgs).to(device) s2_labels = Variable(s2_labels).to(device) s3_imgs = Variable(s3_imgs).to(device) s3_labels = Variable(s3_labels).to(device) t1_imgs = Variable(t1_imgs).to(device) s1_feature = encoder(s1_imgs) #t1_feature = encoder(t1_imgs) # Testing test_imgs, test_labels = test test_imgs = Variable(test_imgs).to(device) test_labels = Variable(test_labels).to(device) test_feature = encoder(test_imgs) test_output = classifier(test_feature) test_preds = test_output.argmax(1).cpu() test_acc = np.mean((test_preds == test_labels.cpu()).numpy()) # Classifier network s1_output = classifier(s1_feature) s1_preds = s1_output.argmax(1).cpu() s1_acc = np.mean((s1_preds == s1_labels.cpu()).numpy()) s1_c_loss = mtl_criterion(s1_output, s1_labels) C_loss = s1_c_loss # Domain_classifier network with source domain #domain_labels = Variable(domain_labels).to(device) #s1_domain_output = domain_classifier(s1_feature,lambda_) #s1_domain_preds = s1_domain_output.argmax(1).cpu() #if index == 10: # print(s1_domain_preds) #s1_domain_acc = np.mean((s1_domain_preds == 0).numpy()) #print(s1_domain_output.shape) #print(s1_domain_output[0]) #s1_d_loss = moe_criterion(s1_domain_output,domain_labels[0]) #D_loss_src = s1_d_loss #print(D_loss_src.item()) # Domain_classifier network with target domain #t1_domain_output = domain_classifier(t1_feature,lambda_) #t1_domain_preds = t1_domain_output.argmax(1).cpu() #t1_domain_acc = np.mean((t1_domain_preds == 3).numpy()) #t1_d_loss = moe_criterion(t1_domain_output,domain_labels[3]) #D_loss = D_loss_src + t1_d_loss loss = C_loss D_loss = 0 #epoch_D_loss += D_loss.item() epoch_C_loss += C_loss.item() #sum_trg_acc += t1_domain_acc #D_src_acc = (s1_domain_acc + s2_domain_acc + s3_domain_acc)/3. loss.backward() optimizer_encoder.step() optimizer_classifier.step() optimizer_domain_classifier.step() if (index + 1) % 10 == 0: print( 'Iter [%d/%d] loss %.4f , D_loss %.4f ,Acc %.4f ,Test Acc: %.4f' % (index + 1, len(train_loader[0]), loss.item(), D_loss, s1_acc, test_acc)) test_acc = 0. test_loss = 0. encoder.eval() domain_classifier.eval() classifier.eval() for index, (imgs, labels) in enumerate(test_loader): output_list = [] loss_mtl = [] imgs = Variable(imgs).to(device) labels = Variable(labels).to(device) hidden = encoder(imgs) output = classifier(hidden) preds = output.argmax(1).cpu() s1_acc = np.mean((preds == labels.cpu()).numpy()) """ for sthi in classifiers: output = sthi(hidden) output_list.append(output.cpu()) loss = mtl_criterion(output, labels) loss_mtl.append(loss) output = torch.FloatTensor(np.array(output_list).sum(0)) preds = output.argmax(1).cpu() s1_preds = output_list[0].argmax(1).cpu() s2_preds = output_list[1].argmax(1).cpu() s3_preds = output_list[2].argmax(1).cpu() acc = np.mean((preds == labels.cpu()).numpy()) s1_acc = np.mean((s1_preds == labels.cpu()).numpy()) s2_acc = np.mean((s2_preds == labels.cpu()).numpy()) s3_acc = np.mean((s3_preds == labels.cpu()).numpy()) if index == 0: print(acc) loss_mtl = sum(loss_mtl) loss = loss_mtl test_acc += acc test_loss += loss.item() """ #print('Testing: loss %.4f,Acc %.4f ,s1 %.4f,s2 %.4f,s3 %.4f' %(test_loss/len(test_loader),test_acc/len(test_loader),s1_acc,s2_acc,s3_acc)) return 0
return tf.reduce_mean(loss_) def main(): pass if __name__ == "__main__": BATCH_SIZE = 64 vocab_inp_size = 32 vocab_tar_size = 32 embedding_dim = 64 units = 128 # Encoder encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE) # Decoder decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE) optimizer = tf.keras.optimizers.Adam() loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') @tf.function def train_step(inp, targ, enc_hidden, enc_cell): loss = 0 with tf.GradientTape() as tape: enc_output, enc_hidden, enc_cell = encoder(inp, [enc_hidden, enc_cell])
class Instructor: def __init__(self, model_name: str, args): self.model_name = model_name self.args = args self.encoder = Encoder(self.args.add_noise).to(self.args.device) self.decoder = Decoder(self.args.upsample_mode).to(self.args.device) self.pretrainDataset = None self.pretrainDataloader = None self.pretrainOptimizer = None self.pretrainScheduler = None self.RHO_tensor = None self.pretrain_batch_cnt = 0 self.writer = None self.svmDataset = None self.svmDataloader = None self.testDataset = None self.testDataloader = None self.svm = SVC(C=self.args.svm_c, kernel=self.args.svm_ker, verbose=True, max_iter=self.args.svm_max_iter) self.resnet = Resnet(use_pretrained=True, num_classes=self.args.classes, resnet_depth=self.args.resnet_depth, dropout=self.args.resnet_dropout).to( self.args.device) self.resnetOptimizer = None self.resnetScheduler = None self.resnetLossFn = None def _load_data_by_label(self, label: str) -> list: ret = [] LABEL_PATH = os.path.join(self.args.TRAIN_PATH, label) for dir_path, _, file_list in os.walk(LABEL_PATH, topdown=False): for file_name in file_list: file_path = os.path.join(dir_path, file_name) img_np = imread(file_path) img = img_np.copy() img = img.tolist() ret.append(img) return ret def _load_all_data(self): all_data = [] all_labels = [] for label_id in range(0, self.args.classes): expression = LabelEnum(label_id) sub_data = self._load_data_by_label(expression.name) sub_labels = [label_id] * len(sub_data) all_data.extend(sub_data) all_labels.extend(sub_labels) return all_data, all_labels def _load_test_data(self): file_map = pd.read_csv( os.path.join(self.args.RAW_PATH, 'submission.csv')) test_data = [] img_names = [] for file_name in file_map['file_name']: file_path = os.path.join(self.args.TEST_PATH, file_name) img_np = imread(file_path) img = img_np.copy() img = img.tolist() test_data.append(img) img_names.append(file_name) return test_data, img_names def trainAutoEncoder(self): self.writer = SummaryWriter( os.path.join(self.args.LOG_PATH, self.model_name)) all_data, all_labels = self._load_all_data() self.pretrainDataset = FERDataset(all_data, labels=all_labels, args=self.args) self.pretrainDataloader = DataLoader(dataset=self.pretrainDataset, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.num_workers) self.pretrainOptimizer = torch.optim.Adam([{ 'params': self.encoder.parameters(), 'lr': self.args.pretrain_lr }, { 'params': self.decoder.parameters(), 'lr': self.args.pretrain_lr }]) tot_steps = math.ceil( len(self.pretrainDataloader) / self.args.cumul_batch) * self.args.epochs self.pretrainScheduler = get_linear_schedule_with_warmup( self.pretrainOptimizer, num_warmup_steps=0, num_training_steps=tot_steps) self.RHO_tensor = torch.tensor( [self.args.rho for _ in range(self.args.embed_dim)], dtype=torch.float).unsqueeze(0).to(self.args.device) epochs = self.args.epochs for epoch in range(1, epochs + 1): print() print( "================ AutoEncoder Training Epoch {:}/{:} ================" .format(epoch, epochs)) print(" ---- Start training ------>") self.epochTrainAutoEncoder(epoch) print() self.writer.close() def epochTrainAutoEncoder(self, epoch): self.encoder.train() self.decoder.train() cumul_loss = 0 cumul_steps = 0 cumul_samples = 0 self.pretrainOptimizer.zero_grad() cumulative_batch = 0 for idx, (images, labels) in enumerate(tqdm(self.pretrainDataloader)): batch_size = images.shape[0] images, labels = images.to(self.args.device), labels.to( self.args.device) embeds = self.encoder(images) outputs = self.decoder(embeds) loss = torch.nn.functional.mse_loss(outputs, images) if self.args.use_sparse: rho_hat = torch.mean(embeds, dim=0, keepdim=True) sparse_penalty = self.args.regulizer_weight * torch.nn.functional.kl_div( input=torch.nn.functional.log_softmax(rho_hat, dim=-1), target=torch.nn.functional.softmax(self.RHO_tensor, dim=-1)) loss = loss + sparse_penalty loss_each = loss / self.args.cumul_batch loss_each.backward() cumulative_batch += 1 cumul_steps += 1 cumul_loss += loss.detach().cpu().item() * batch_size cumul_samples += batch_size if cumulative_batch >= self.args.cumul_batch: torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), max_norm=self.args.max_norm) torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), max_norm=self.args.max_norm) self.pretrainOptimizer.step() self.pretrainScheduler.step() self.pretrainOptimizer.zero_grad() cumulative_batch = 0 if cumul_steps >= self.args.disp_period or idx + 1 == len( self.pretrainDataloader): print(" -> cumul_steps={:} loss={:}".format( cumul_steps, cumul_loss / cumul_samples)) self.pretrain_batch_cnt += 1 self.writer.add_scalar('batch-loss', cumul_loss / cumul_samples, global_step=self.pretrain_batch_cnt) self.writer.add_scalar('encoder_lr', self.pretrainOptimizer.state_dict() ['param_groups'][0]['lr'], global_step=self.pretrain_batch_cnt) self.writer.add_scalar('decoder_lr', self.pretrainOptimizer.state_dict() ['param_groups'][1]['lr'], global_step=self.pretrain_batch_cnt) cumul_steps = 0 cumul_loss = 0 cumul_samples = 0 self.saveAutoEncoder(epoch) def saveAutoEncoder(self, epoch): encoderPath = os.path.join( self.args.CKPT_PATH, self.model_name + "--Encoder" + "--EPOCH-{:}".format(epoch)) decoderPath = os.path.join( self.args.CKPT_PATH, self.model_name + "--Decoder" + "--EPOCH-{:}".format(epoch)) print("-----------------------------------------------") print(" -> Saving AutoEncoder {:} ......".format(self.model_name)) torch.save(self.encoder.state_dict(), encoderPath) torch.save(self.decoder.state_dict(), decoderPath) print(" -> Successfully saved AutoEncoder.") print("-----------------------------------------------") def generateAutoEncoderTestResultSamples(self, sample_cnt): self.encoder.eval() self.decoder.eval() print(' -> Generating samples with AutoEncoder ...') save_path = os.path.join(self.args.SAMPLE_PATH, self.model_name) if not os.path.exists(save_path): os.mkdir(save_path) with torch.no_grad(): for dir_path, _, file_list in os.walk(self.args.TEST_PATH, topdown=False): sample_file_list = random.choices(file_list, k=sample_cnt) for file_name in sample_file_list: file_path = os.path.join(dir_path, file_name) img_np = imread(file_path) img = img_np.copy() img = ToTensor()(img) img = img.reshape(1, 1, 48, 48) img = img.to(self.args.device) embed = self.encoder(img) out = self.decoder(embed).cpu() out = out.reshape(1, 48, 48) out_img = ToPILImage()(out) out_img.save(os.path.join(save_path, file_name)) print(' -> Done sampling from AutoEncoder with test pictures.') def loadAutoEncoder(self, epoch): encoderPath = os.path.join( self.args.CKPT_PATH, self.model_name + "--Encoder" + "--EPOCH-{:}".format(epoch)) decoderPath = os.path.join( self.args.CKPT_PATH, self.model_name + "--Decoder" + "--EPOCH-{:}".format(epoch)) print("-----------------------------------------------") print(" -> Loading AutoEncoder {:} ......".format(self.model_name)) self.encoder.load_state_dict(torch.load(encoderPath)) self.decoder.load_state_dict(torch.load(decoderPath)) print(" -> Successfully loaded AutoEncoder.") print("-----------------------------------------------") def generateExtractedFeatures( self, data: torch.FloatTensor) -> torch.FloatTensor: """ :param data: (batch, channel, l, w) :return: embed: (batch, embed_dim) """ with torch.no_grad(): data = data.to(self.args.device) embed = self.encoder(data) embed = embed.detach().cpu() return embed def trainSVM(self, load: bool): svm_path = os.path.join(self.args.CKPT_PATH, self.model_name + '--svm') self.loadAutoEncoder(self.args.epochs) self.encoder.eval() self.decoder.eval() if load: print(' -> Loaded from SVM trained model.') self.svm = joblib.load(svm_path) return print() print("================ SVM Training Starting ================") all_data, all_labels = self._load_all_data() all_length = len(all_data) self.svmDataset = FERDataset(all_data, labels=all_labels, use_da=False, args=self.args) self.svmDataloader = DataLoader(dataset=self.svmDataset, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.num_workers) print(" -> Converting to extracted features ...") cnt = 0 all_embeds = [] all_labels = [] for images, labels in self.svmDataloader: cnt += 1 embeds = self.generateExtractedFeatures(images) all_embeds.extend(embeds.tolist()) all_labels.extend(labels.reshape(-1).tolist()) print(' -> Start SVM fit ...') self.svm.fit(X=all_embeds, y=all_labels) # self.svm.fit(X=all_embeds[0:3], y=[0, 1, 2]) joblib.dump(self.svm, svm_path) print(" -> Done training for SVM.") def genTestResult(self, from_svm=True): print() print('-------------------------------------------------------') print(' -> Generating test result for {:} ...'.format( 'SVM' if from_svm else 'Resnet')) test_data, img_names = self._load_test_data() test_length = len(test_data) self.testDataset = FERDataset(test_data, filenames=img_names, use_da=False, args=self.args) self.testDataloader = DataLoader(dataset=self.testDataset, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.num_workers) str_preds = [] for images, filenames in self.testDataloader: if from_svm: embeds = self.generateExtractedFeatures(images) preds = self.svm.predict(X=embeds) else: self.resnet.eval() outs = self.resnet( images.repeat(1, 3, 1, 1).to(self.args.device)) preds = outs.max(-1)[1].cpu().tolist() str_preds.extend([LabelEnum(pred).name for pred in preds]) # generate submission assert len(str_preds) == len(img_names) submission = pd.DataFrame({'file_name': img_names, 'class': str_preds}) submission.to_csv(os.path.join(self.args.DATA_PATH, 'submission.csv'), index=False, index_label=False) print(' -> Done generation of submission.csv with model {:}'.format( self.model_name)) def epochTrainResnet(self, epoch): self.resnet.train() cumul_loss = 0 cumul_acc = 0 cumul_steps = 0 cumul_samples = 0 cumulative_batch = 0 self.resnetOptimizer.zero_grad() for idx, (images, labels) in enumerate(tqdm(self.pretrainDataloader)): batch_size = images.shape[0] images, labels = images.to(self.args.device), labels.to( self.args.device) images += torch.randn(images.shape).to( images.device) * self.args.add_noise images = images.repeat(1, 3, 1, 1) outs = self.resnet(images) preds = outs.max(-1)[1].unsqueeze(dim=1) cur_acc = (preds == labels).type(torch.int).sum().item() loss = self.resnetLossFn(outs, labels.squeeze(dim=1)) loss_each = loss / self.args.cumul_batch loss_each.backward() cumulative_batch += 1 cumul_steps += 1 cumul_loss += loss.detach().cpu().item() * batch_size cumul_acc += cur_acc cumul_samples += batch_size if cumulative_batch >= self.args.cumul_batch: torch.nn.utils.clip_grad_norm_(self.resnet.parameters(), max_norm=self.args.max_norm) self.resnetOptimizer.step() self.resnetScheduler.step() self.resnetOptimizer.zero_grad() cumulative_batch = 0 if cumul_steps >= self.args.disp_period or idx + 1 == len( self.pretrainDataloader): print(" -> cumul_steps={:} loss={:} acc={:}".format( cumul_steps, cumul_loss / cumul_samples, cumul_acc / cumul_samples)) self.pretrain_batch_cnt += 1 self.writer.add_scalar('batch-loss', cumul_loss / cumul_samples, global_step=self.pretrain_batch_cnt) self.writer.add_scalar('batch-acc', cumul_acc / cumul_samples, global_step=self.pretrain_batch_cnt) self.writer.add_scalar( 'resnet_lr', self.resnetOptimizer.state_dict()['param_groups'][0]['lr'], global_step=self.pretrain_batch_cnt) cumul_steps = 0 cumul_loss = 0 cumul_acc = 0 cumul_samples = 0 if epoch % 10 == 0: self.saveResnet(epoch) def saveResnet(self, epoch): resnetPath = os.path.join( self.args.CKPT_PATH, self.model_name + "--Resnet" + "--EPOCH-{:}".format(epoch)) print("-----------------------------------------------") print(" -> Saving Resnet {:} ......".format(self.model_name)) torch.save(self.resnet.state_dict(), resnetPath) print(" -> Successfully saved Resnet.") print("-----------------------------------------------") def loadResnet(self, epoch): resnetPath = os.path.join( self.args.CKPT_PATH, self.model_name + "--Resnet" + "--EPOCH-{:}".format(epoch)) print("-----------------------------------------------") print(" -> Loading Resnet {:} ......".format(self.model_name)) self.resnet.load_state_dict(torch.load(resnetPath)) print(" -> Successfully loaded Resnet.") print("-----------------------------------------------") def trainResnet(self): self.writer = SummaryWriter( os.path.join(self.args.LOG_PATH, self.model_name)) all_data, all_labels = self._load_all_data() self.pretrainDataset = FERDataset(all_data, labels=all_labels, args=self.args) self.pretrainDataloader = DataLoader(dataset=self.pretrainDataset, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.num_workers) self.resnetOptimizer = self.getResnetOptimizer() tot_steps = math.ceil( len(self.pretrainDataloader) / self.args.cumul_batch) * self.args.epochs self.resnetScheduler = get_linear_schedule_with_warmup( self.resnetOptimizer, num_warmup_steps=tot_steps * self.args.warmup_rate, num_training_steps=tot_steps) self.resnetLossFn = torch.nn.CrossEntropyLoss( weight=torch.tensor([ 9.40661861, 1.00104606, 0.56843877, 0.84912748, 1.02660468, 1.29337298, 0.82603942, ], dtype=torch.float, device=self.args.device)) epochs = self.args.epochs for epoch in range(1, epochs + 1): print() print( "================ Resnet Training Epoch {:}/{:} ================" .format(epoch, epochs)) print(" ---- Start training ------>") self.epochTrainResnet(epoch) print() self.writer.close() def getResnetOptimizer(self): if self.args.resnet_optim == 'SGD': return torch.optim.SGD([{ 'params': self.resnet.baseParameters(), 'lr': self.args.resnet_base_lr, 'weight_decay': self.args.weight_decay, 'momentum': self.args.resnet_momentum }, { 'params': self.resnet.finetuneParameters(), 'lr': self.args.resnet_ft_lr, 'weight_decay': self.args.weight_decay, 'momentum': self.args.resnet_momentum }], lr=self.args.resnet_base_lr) elif self.args.resnet_optim == 'Adam': return torch.optim.Adam([{ 'params': self.resnet.baseParameters(), 'lr': self.args.resnet_base_lr }, { 'params': self.resnet.finetuneParameters(), 'lr': self.args.resnet_ft_lr, 'weight_decay': self.args.weight_decay }])
def create_model_code_retrieval(opt, dataset, all_dict): def _init_param(opt, model): for p in model.parameters(): p.data.uniform_(-opt.param_init, opt.param_init) dict_code, dict_comment = all_dict[0], all_dict[1] if opt.dataset_type == "c": dict_leaves = all_dict[2] else: dict_leaves = dict_code if opt.modal_type == "seq8tree8cfg9selfattn": seq_encoder = RetrievalCodeEncoderWrapper(opt, Encoder(opt, dict_code), "seq9coattn") tree_encoder = RetrievalCodeEncoderWrapper( opt, TreeEncoder_TreeLSTM_dgl(opt, dict_leaves), "tree9coattn") if opt.use_outmlp3: cfg_encoder = RetrievalCodeEncoderWrapper( opt, CFGEncoder_GGNN(opt, dataset.new_annotation_dim, dataset.new_n_edge_types, dataset.new_n_node), "cfg") else: cfg_encoder = RetrievalCodeEncoderWrapper( opt, CFGEncoder_GGNN(opt, dataset.new_annotation_dim, dataset.new_n_edge_types, dataset.new_n_node), "cfg9coattn") code_encoder = RetrievalCodeEncoderWrapper( opt, (seq_encoder, tree_encoder, cfg_encoder), opt.modal_type) comment_encoder = RetrievalCommentEncoderWrapper( opt, Encoder(opt, dict_comment)) _init_param(opt, seq_encoder) _init_param(opt, tree_encoder) _init_param(opt, comment_encoder) if opt.modal_type == "seq8tree8cfg9selfattn": model = ModelCodeRetrieval(code_encoder, comment_encoder, opt) print("model.state_dict().keys(): \n ", model.state_dict().keys()) if opt.model_from: if os.path.exists(opt.model_from): print("Loading from checkpoint at %s" % opt.model_from) checkpoint = torch.load(opt.model_from, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint) else: print("not load pt file") print("create_model_code_retrieval, opt.gpus: ", opt.gpus) if opt.gpus: model.cuda() print("model.cuda() ok") gpu_list = [int(k) for k in opt.gpus.split(",")] gpu_list = list(range(len(gpu_list))) if len(gpu_list) > 1: model = torch.nn.DataParallel(model, device_ids=gpu_list) print("DataParallel ok , gpu_list: ", gpu_list) return model
import tensorflow as tf from model.Attention import Attention from model.Decoder import Decoder from model.Encoder import Encoder if __name__ == "__main__": BATCH_SIZE = 64 vocab_inp_size = 32 vocab_tar_size = 32 embedding_dim = 256 units = 1024 # Encoder encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE) example_input_batch = tf.random.uniform(shape=(64, 16), minval=0, maxval=31, dtype=tf.int64) example_target_batch = tf.random.uniform(shape=(64, 11), minval=0, maxval=31, dtype=tf.int64) print(example_input_batch.shape, example_target_batch.shape) # sample input sample_hidden = encoder.initialize_hidden_state() sample_cell = encoder.initialize_cell_state() sample_output, sample_hidden, cell_hidden = encoder( example_input_batch, [sample_hidden, sample_cell]) print(
class NeoOriginal: def __init__( # TODO move parameters to config file self, pset, batch_size=64, max_size=100, vocab_inp_size=32, vocab_tar_size=32, embedding_dim=64, units=128, hidden_size=128, alpha=0.1, epochs=200, epoch_decay=1, min_epochs=10, verbose=True): self.alpha = alpha self.batch_size = batch_size self.max_size = max_size self.epochs = epochs self.epoch_decay = epoch_decay self.min_epochs = min_epochs self.train_steps = 0 self.verbose = verbose self.enc = Encoder(vocab_inp_size, embedding_dim, units, batch_size) self.dec = Decoder(vocab_inp_size, vocab_tar_size, embedding_dim, units, batch_size) self.surrogate = Surrogate(hidden_size) self.population = Population(pset, max_size, batch_size) self.prob = 0.5 self.optimizer = tf.keras.optimizers.Adam() self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False, reduction='none') def save_models(self): self.enc.save_weights("model/weights/encoder/enc_{}".format( self.train_steps), save_format="tf") self.dec.save_weights("model/weights/decoder/dec_{}".format( self.train_steps), save_format="tf") self.surrogate.save_weights( "model/weights/surrogate/surrogate_{}".format(self.train_steps), save_format="tf") def load_models(self, train_steps): self.enc.load_weights( "model/weights/encoder/enc_{}".format(train_steps)) self.dec.load_weights( "model/weights/decoder/dec_{}".format(train_steps)) self.surrogate.load_weights( "model/weights/surrogate/surrogate_{}".format(train_steps)) @tf.function def train_step(self, inp, targ, targ_surrogate, enc_hidden, enc_cell): autoencoder_loss = 0 with tf.GradientTape(persistent=True) as tape: enc_output, enc_hidden, enc_cell = self.enc( inp, [enc_hidden, enc_cell]) surrogate_output = self.surrogate(enc_hidden) surrogate_loss = self.surrogate_loss_function( targ_surrogate, surrogate_output) dec_hidden = enc_hidden dec_cell = enc_cell context = tf.zeros(shape=[len(dec_hidden), 1, dec_hidden.shape[1]]) dec_input = tf.expand_dims([1] * len(inp), 1) for t in range(1, self.max_size): initial_state = [dec_hidden, dec_cell] predictions, context, [dec_hidden, dec_cell ], _ = self.dec(dec_input, context, enc_output, initial_state) autoencoder_loss += self.autoencoder_loss_function( targ[:, t], predictions) # Probabilistic teacher forcing # (feeding the target as the next input) if tf.random.uniform(shape=[], maxval=1, dtype=tf.float32) > self.prob: dec_input = tf.expand_dims(targ[:, t], 1) else: pred_token = tf.argmax(predictions, axis=1, output_type=tf.dtypes.int32) dec_input = tf.expand_dims(pred_token, 1) loss = autoencoder_loss + self.alpha * surrogate_loss ae_loss_per_token = autoencoder_loss / int(targ.shape[1]) batch_loss = ae_loss_per_token + self.alpha * surrogate_loss batch_ae_loss = (autoencoder_loss / int(targ.shape[1])) batch_surrogate_loss = surrogate_loss gradients, variables = self.backward(loss, tape) self.optimize(gradients, variables) return batch_loss, batch_ae_loss, batch_surrogate_loss def backward(self, loss, tape): variables = \ self.enc.trainable_variables + self.dec.trainable_variables \ + self.surrogate.trainable_variables gradients = tape.gradient(loss, variables) return gradients, variables def optimize(self, gradients, variables): self.optimizer.apply_gradients(zip(gradients, variables)) def surrogate_breed(self, output, latent, tape): gradients = tape.gradient(output, latent) return gradients def update_latent(self, latent, gradients, eta): latent += eta * gradients return latent def autoencoder_loss_function(self, real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = self.loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_mean(loss_) def surrogate_loss_function(self, real, pred): loss_ = tf.keras.losses.mean_squared_error(real, pred) return tf.reduce_mean(loss_) def __train(self): for epoch in range(self.epochs): self.epoch = epoch start = time.time() total_loss = 0 total_ae_loss = 0 total_surrogate_loss = 0 data_generator = self.population() for (batch, (inp, targ, targ_surrogate)) in enumerate(data_generator): enc_hidden = self.enc.initialize_hidden_state( batch_sz=len(inp)) enc_cell = self.enc.initialize_cell_state(batch_sz=len(inp)) batch_loss, batch_ae_loss, batch_surr_loss = self.train_step( inp, targ, targ_surrogate, enc_hidden, enc_cell) total_loss += batch_loss total_ae_loss += batch_ae_loss total_surrogate_loss += batch_surr_loss if False and self.verbose: print(f'Epoch {epoch + 1} Batch {batch} ' f'Loss {batch_loss.numpy():.4f}') if self.verbose and ((epoch + 1) % 10 == 0 or epoch == 0): epoch_loss = total_loss / self.population.steps_per_epoch ae_loss = total_ae_loss / self.population.steps_per_epoch surrogate_loss = \ total_surrogate_loss / self.population.steps_per_epoch epoch_time = time.time() - start print(f'Epoch {epoch + 1} Loss {epoch_loss:.6f} AE_loss ' f'{ae_loss:.6f} Surrogate_loss ' f'{surrogate_loss:.6f} Time: {epoch_time:.3f}') # decrease number of epochs, but don't go below self.min_epochs self.epochs = max(self.epochs - self.epoch_decay, self.min_epochs) def _gen_children(self, candidates, enc_output, enc_hidden, enc_cell, max_eta=1000): children = [] eta = 0 enc_mask = enc_output._keras_mask last_copy_ind = len(candidates) while eta < max_eta: eta += 1 start = time.time() new_children = self._gen_decoded(eta, enc_output, enc_hidden, enc_cell, enc_mask).numpy() new_children = self.cut_seq(new_children, end_token=2) new_ind, copy_ind = self.find_new(new_children, candidates) if len(copy_ind) < last_copy_ind: last_copy_ind = len(copy_ind) print("Eta {} Not-changed {} Time: {:.3f}".format( eta, len(copy_ind), time.time() - start)) for i in new_ind: children.append(new_children[i]) if len(copy_ind) < 1: break enc_output = tf.gather(enc_output, copy_ind) enc_mask = tf.gather(enc_mask, copy_ind) enc_hidden = tf.gather(enc_hidden, copy_ind) enc_cell = tf.gather(enc_cell, copy_ind) candidates = tf.gather(candidates, copy_ind) if eta == max_eta: print("Maximal value of eta reached - breed stopped") for i in copy_ind: children.append(new_children[i]) return children def _gen_decoded(self, eta, enc_output, enc_hidden, enc_cell, enc_mask): with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape: tape.watch(enc_hidden) surrogate_output = self.surrogate(enc_hidden) gradients = self.surrogate_breed(surrogate_output, enc_hidden, tape) dec_hidden = self.update_latent(enc_hidden, gradients, eta=eta) dec_cell = enc_cell context = tf.zeros(shape=[len(dec_hidden), 1, dec_hidden.shape[1]]) dec_input = tf.expand_dims([1] * len(enc_hidden), 1) child = dec_input for _ in range(1, self.max_size - 1): initial_state = [dec_hidden, dec_cell] predictions, context, [dec_hidden, dec_cell ], _ = self.dec(dec_input, context, enc_output, initial_state, enc_mask) dec_input = tf.expand_dims( tf.argmax(predictions, axis=1, output_type=tf.dtypes.int32), 1) child = tf.concat([child, dec_input], axis=1) stop_tokens = tf.expand_dims([2] * len(enc_hidden), 1) child = tf.concat([child, stop_tokens], axis=1) return child def cut_seq(self, seq, end_token=2): ind = (seq == end_token).argmax(1) res = [] tree_max = [] for d, i in zip(seq, ind): repaired_tree = create_expression_tree(d[:i + 1][1:-1]) repaired_seq = [i.data for i in repaired_tree.preorder() ][-(self.max_size - 2):] tree_max.append(len(repaired_seq) == self.max_size - 2) repaired_seq = [1] + repaired_seq + [2] res.append(np.pad(repaired_seq, (0, self.max_size - i - 1))) return res def find_new(self, seq, candidates): new_ind = [] copy_ind = [] n = False cp = False for i, (s, c) in enumerate(zip(seq, candidates)): if not np.array_equal(s, c): if not n: n = True new_ind.append(i) else: if not cp: cp = True copy_ind.append(i) return new_ind, copy_ind def _gen_latent(self, candidates): enc_hidden = self.enc.initialize_hidden_state(batch_sz=len(candidates)) enc_cell = self.enc.initialize_cell_state(batch_sz=len(candidates)) enc_output, enc_hidden, enc_cell = self.enc(candidates, [enc_hidden, enc_cell]) return enc_output, enc_hidden, enc_cell def update(self): print("Training") self.enc.train() self.dec.train() self.__train() self.save_models() self.train_steps += 1 def breed(self): print("Breed") self.dec.eval() data_generator = self.population( batch_size=len(self.population.samples)) tokenized_pop = [] for (batch, (inp, _, _)) in enumerate(data_generator): enc_output, enc_hidden, enc_cell = self._gen_latent(inp) tokenized_pop += (self._gen_children(inp, enc_output, enc_hidden, enc_cell)) pop_expressions = [ self.population.tokenizer.reproduce_expression(tp) for tp in tokenized_pop ] offspring = [deap.creator.Individual(pe) for pe in pop_expressions] return offspring
def main(): #create tensorboard summary writer writer = SummaryWriter(args.experiment_id) #[TODO] may need to resize input image cudnn.enabled = True #create model: Encoder model_encoder = Encoder() model_encoder.train() model_encoder.cuda(args.gpu) optimizer_encoder = optim.Adam(model_encoder.parameters(), lr=args.learning_rate, betas=(0.95, 0.99)) optimizer_encoder.zero_grad() #create model: Decoder model_decoder = Decoder() model_decoder.train() model_decoder.cuda(args.gpu) optimizer_decoder = optim.Adam(model_decoder.parameters(), lr=args.learning_rate, betas=(0.95, 0.99)) optimizer_decoder.zero_grad() l2loss = nn.MSELoss() #load data for i in range(1, 360002, 30000): train_data, valid_data = get_data(i) for e in range(1, args.epoch + 1): train_loss_value = 0 validation_loss_value = 0 for j in range(0, int(args.train_size/4), args.batch_size): optimizer_decoder.zero_grad() optimizer_decoder.zero_grad() image = Variable(torch.tensor(train_data[j: j + args.batch_size, :, :])).cuda(args.gpu) latent = model_encoder(image) img_recon = model_decoder(latent) img_recon = F.interpolate(img_recon, size=image.shape[2:], mode='bilinear', align_corners=True) loss = l2loss(img_recon, image) train_loss_value += loss.data.cpu().numpy() / args.batch_size loss.backward() optimizer_decoder.step() optimizer_encoder.step() print("data load: {:8d}".format(i)) print("epoch: {:8d}".format(e)) print("train_loss: {:08.6f}".format(train_loss_value / (args.train_size / args.batch_size))) for j in range(0,int(args.validation_size/4), args.batch_size): model_encoder.eval() model_decoder.eval() image = Variable(torch.tensor(valid_data[j: j + args.batch_size, :, :])).cuda(args.gpu) latent = model_encoder(image) img_recon = model_decoder(latent) img_1 = img_recon[0][0] img = image[0][0] img_recon = F.interpolate(img_recon, size=image.shape[2:], mode='bilinear', align_corners=True) save_image(img_1, args.image_dir + '/fake' + str(i) + "_" + str(j) + ".png") save_image(img, args.image_dir + '/real' + str(i) + "_" + str(j) + ".png") image = Variable(torch.tensor(train_data[j: j + args.batch_size, :, :, :])).cuda(args.gpu) loss = l2loss(img_recon, image) validation_loss_value += loss.data.cpu().numpy() / args.batch_size model_encoder.train() model_decoder.train() print("train_loss: {:08.6f}".format(validation_loss_value / (args.validation_size / args.batch_size))) torch.save({'encoder_state_dict': model_encoder.state_dict()}, osp.join(args.checkpoint_dir, 'AE_encoder.pth')) torch.save({'decoder_state_dict': model_decoder.state_dict()}, osp.join(args.checkpoint_dir, 'AE_decoder.pth'))
class Model(nn.Module): def __init__(self, config): super(Model, self).__init__() self.config = config # 定义嵌入层 self.embedding = Embedding(config.num_vocab, # 词汇表大小 config.embedding_size, # 嵌入层维度 config.pad_id, # pad_id config.dropout) # post编码器 self.post_encoder = Encoder(config.post_encoder_cell_type, # rnn类型 config.embedding_size, # 输入维度 config.post_encoder_output_size, # 输出维度 config.post_encoder_num_layers, # rnn层数 config.post_encoder_bidirectional, # 是否双向 config.dropout) # dropout概率 # response编码器 self.response_encoder = Encoder(config.response_encoder_cell_type, config.embedding_size, # 输入维度 config.response_encoder_output_size, # 输出维度 config.response_encoder_num_layers, # rnn层数 config.response_encoder_bidirectional, # 是否双向 config.dropout) # dropout概率 # 先验网络 self.prior_net = PriorNet(config.post_encoder_output_size, # post输入维度 config.latent_size, # 潜变量维度 config.dims_prior) # 隐藏层维度 # 识别网络 self.recognize_net = RecognizeNet(config.post_encoder_output_size, # post输入维度 config.response_encoder_output_size, # response输入维度 config.latent_size, # 潜变量维度 config.dims_recognize) # 隐藏层维度 # 初始化解码器状态 self.prepare_state = PrepareState(config.post_encoder_output_size+config.latent_size, config.decoder_cell_type, config.decoder_output_size, config.decoder_num_layers) # 解码器 self.decoder = Decoder(config.decoder_cell_type, # rnn类型 config.embedding_size, # 输入维度 config.decoder_output_size, # 输出维度 config.decoder_num_layers, # rnn层数 config.dropout) # dropout概率 # 输出层 self.projector = nn.Sequential( nn.Linear(config.decoder_output_size, config.num_vocab), nn.Softmax(-1) ) def forward(self, inputs, inference=False, max_len=60, gpu=True): if not inference: # 训练 id_posts = inputs['posts'] # [batch, seq] len_posts = inputs['len_posts'] # [batch] id_responses = inputs['responses'] # [batch, seq] len_responses = inputs['len_responses'] # [batch, seq] sampled_latents = inputs['sampled_latents'] # [batch, latent_size] len_decoder = id_responses.size(1) - 1 embed_posts = self.embedding(id_posts) # [batch, seq, embed_size] embed_responses = self.embedding(id_responses) # [batch, seq, embed_size] # state: [layers, batch, dim] _, state_posts = self.post_encoder(embed_posts.transpose(0, 1), len_posts) _, state_responses = self.response_encoder(embed_responses.transpose(0, 1), len_responses) if isinstance(state_posts, tuple): state_posts = state_posts[0] if isinstance(state_responses, tuple): state_responses = state_responses[0] x = state_posts[-1, :, :] # [batch, dim] y = state_responses[-1, :, :] # [batch, dim] # p(z|x) _mu, _logvar = self.prior_net(x) # [batch, latent] # p(z|x,y) mu, logvar = self.recognize_net(x, y) # [batch, latent] # 重参数化 z = mu + (0.5 * logvar).exp() * sampled_latents # [batch, latent] # 解码器的输入为回复去掉end_id decoder_inputs = embed_responses[:, :-1, :].transpose(0, 1) # [seq-1, batch, embed_size] decoder_inputs = decoder_inputs.split([1] * len_decoder, 0) # 解码器每一步的输入 seq-1个[1, batch, embed_size] first_state = self.prepare_state(torch.cat([z, x], 1)) # [num_layer, batch, dim_out] outputs = [] for idx in range(len_decoder): if idx == 0: state = first_state # 解码器初始状态 decoder_input = decoder_inputs[idx] # 当前时间步输入 [1, batch, embed_size] # output: [1, batch, dim_out] # state: [num_layer, batch, dim_out] output, state = self.decoder(decoder_input, state) assert output.squeeze().equal(state[0][-1]) outputs.append(output) outputs = torch.cat(outputs, 0).transpose(0, 1) # [batch, seq-1, dim_out] output_vocab = self.projector(outputs) # [batch, seq-1, num_vocab] return output_vocab, _mu, _logvar, mu, logvar else: # 测试 id_posts = inputs['posts'] # [batch, seq] len_posts = inputs['len_posts'] # [batch] sampled_latents = inputs['sampled_latents'] # [batch, latent_size] batch_size = id_posts.size(0) embed_posts = self.embedding(id_posts) # [batch, seq, embed_size] # state = [layers, batch, dim] _, state_posts = self.post_encoder(embed_posts.transpose(0, 1), len_posts) if isinstance(state_posts, tuple): # 如果是lstm则取h state_posts = state_posts[0] # [layers, batch, dim] x = state_posts[-1, :, :] # 取最后一层 [batch, dim] # p(z|x) _mu, _logvar = self.prior_net(x) # [batch, latent] # 重参数化 z = _mu + (0.5 * _logvar).exp() * sampled_latents # [batch, latent] first_state = self.prepare_state(torch.cat([z, x], 1)) # [num_layer, batch, dim_out] done = torch.tensor([0] * batch_size).bool() first_input_id = (torch.ones((1, batch_size)) * self.config.start_id).long() if gpu: done = done.cuda() first_input_id = first_input_id.cuda() outputs = [] for idx in range(max_len): if idx == 0: # 第一个时间步 state = first_state # 解码器初始状态 decoder_input = self.embedding(first_input_id) # 解码器初始输入 [1, batch, embed_size] else: decoder_input = self.embedding(next_input_id) # [1, batch, embed_size] # output: [1, batch, dim_out] # state: [num_layers, batch, dim_out] output, state = self.decoder(decoder_input, state) outputs.append(output) vocab_prob = self.projector(output) # [1, batch, num_vocab] next_input_id = torch.argmax(vocab_prob, 2) # 选择概率最大的词作为下个时间步的输入 [1, batch] _done = next_input_id.squeeze(0) == self.config.end_id # 当前时间步完成解码的 [batch] done = done | _done # 所有完成解码的 if done.sum() == batch_size: # 如果全部解码完成则提前停止 break outputs = torch.cat(outputs, 0).transpose(0, 1) # [batch, seq, dim_out] output_vocab = self.projector(outputs) # [batch, seq, num_vocab] return output_vocab, _mu, _logvar, None, None def print_parameters(self): r""" 统计参数 """ total_num = 0 # 参数总数 for param in self.parameters(): num = 1 if param.requires_grad: size = param.size() for dim in size: num *= dim total_num += num print(f"参数总数: {total_num}") def save_model(self, epoch, global_step, path): r""" 保存模型 """ torch.save({'embedding': self.embedding.state_dict(), 'post_encoder': self.post_encoder.state_dict(), 'response_encoder': self.response_encoder.state_dict(), 'prior_net': self.prior_net.state_dict(), 'recognize_net': self.recognize_net.state_dict(), 'prepare_state': self.prepare_state.state_dict(), 'decoder': self.decoder.state_dict(), 'projector': self.projector.state_dict(), 'epoch': epoch, 'global_step': global_step}, path) def load_model(self, path): r""" 载入模型 """ checkpoint = torch.load(path, map_location=torch.device('cpu')) self.embedding.load_state_dict(checkpoint['embedding']) self.post_encoder.load_state_dict(checkpoint['post_encoder']) self.response_encoder.load_state_dict(checkpoint['response_encoder']) self.prior_net.load_state_dict(checkpoint['prior_net']) self.recognize_net.load_state_dict(checkpoint['recognize_net']) self.prepare_state.load_state_dict(checkpoint['prepare_state']) self.decoder.load_state_dict(checkpoint['decoder']) self.projector.load_state_dict(checkpoint['projector']) epoch = checkpoint['epoch'] global_step = checkpoint['global_step'] return epoch, global_step