def train(self): self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) learn_rate = self.lr for i in range(self.iter_num + 1): att_batch, img_batch, lab_batch = data_iterator( self.sess, self.train_data, self.selected_num, self.ave_img_pl, self.ave_att_pl, self.Weights_encoder) dataset = LoadDataset(att_batch, img_batch, lab_batch) next_batch = dataset.get_batch con_att_batch, con_img_batch, con_lab_batch = next_batch( self.batch_size) self.sess.run(self.optimizer, feed_dict={ self.att_pl: att_batch, self.img_pl: img_batch, self.con_img_pl: con_img_batch, self.con_att_pl: con_att_batch, self.similarity_float: con_lab_batch, self.lr_pl: learn_rate }) if i >= 1000: learn_rate = 5e-5 if i % 200 == 0: print('the %d-th iter' % i) self.test()
def validate(opt): param = _param() dataset = LoadDataset(opt) param.X_dim = dataset.feature_dim # data_layer = FeatDataLayer(dataset.labels_train, dataset.pfc_feat_data_train, opt) # initialize model netGs = [] checkpoint = torch.load(opt.resume) parts = 7 for part in range(parts): netG = _netG(dataset.text_dim, 512).cuda() netG.load_state_dict(checkpoint['state_dict_G' + str(part)]) netGs.append(netG) train_classifier(opt, param, dataset, netGs)
def train(creative_weight=1000, model_num=1, is_val=True): param = _param() if opt.dataset == 'CUB': dataset = LoadDataset(opt, main_dir, is_val) exp_info = 'CUB_EASY' if opt.splitmode == 'easy' else 'CUB_HARD' elif opt.dataset == 'NAB': dataset = LoadDataset_NAB(opt, main_dir, is_val) exp_info = 'NAB_EASY' if opt.splitmode == 'easy' else 'NAB_HARD' else: print('No Dataset with that name') sys.exit(0) param.X_dim = dataset.feature_dim opt.Creative_weight = creative_weight data_layer = FeatDataLayer(dataset.labels_train, dataset.pfc_feat_data_train, opt) result = Result() ones = Variable(torch.Tensor(1, 1)) ones.data.fill_(1.0) netG = _netG(dataset.text_dim, dataset.feature_dim).cuda() netG.apply(weights_init) if model_num == 6: netD = _netD(dataset.train_cls_num + 1, dataset.feature_dim).cuda() else: netD = _netD(dataset.train_cls_num, dataset.feature_dim).cuda() netD.apply(weights_init) if model_num == 2: log_SM_ab = Scale(2) log_SM_ab = nn.DataParallel(log_SM_ab).cuda() elif model_num == 4 or model_num == 5: log_SM_ab = Scale(1) log_SM_ab = nn.DataParallel(log_SM_ab).cuda() exp_params = 'Model_{}_CAN{}_Eu{}_Rls{}_RWz{}_{}'.format( model_num, opt.Creative_weight, opt.CENT_LAMBDA, opt.REG_W_LAMBDA, opt.REG_Wz_LAMBDA, opt.exp_name) out_subdir = main_dir + 'out/{:s}/{:s}'.format(exp_info, exp_params) if not os.path.exists(out_subdir): os.makedirs(out_subdir) log_dir = out_subdir + '/log_{:s}.txt'.format(exp_info) with open(log_dir, 'a') as f: f.write('Training Start:') f.write(strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) + '\n') start_step = 0 if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) netG.load_state_dict(checkpoint['state_dict_G']) netD.load_state_dict(checkpoint['state_dict_D']) start_step = checkpoint['it'] print(checkpoint['log']) else: print("=> no checkpoint found at '{}'".format(opt.resume)) if model_num == 2 or model_num == 4 or model_num == 5: nets = [netG, netD, log_SM_ab] else: nets = [netG, netD] tr_cls_centroid = Variable( torch.from_numpy(dataset.tr_cls_centroid.astype('float32'))).cuda() optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.9)) optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.9)) if model_num == 2 or model_num == 4 or model_num == 5: optimizer_SM_ab = optim.Adam(log_SM_ab.parameters(), lr=opt.lr, betas=(0.5, 0.999)) for it in tqdm(range(start_step, 3000 + 1)): # Creative Loss blobs = data_layer.forward() labels = blobs['labels'].astype(int) new_class_labels = Variable( torch.from_numpy(np.ones_like(labels) * dataset.train_cls_num)).cuda() text_feat_1 = np.array( [dataset.train_text_feature[i, :] for i in labels]) text_feat_2 = np.array( [dataset.train_text_feature[i, :] for i in labels]) np.random.shuffle( text_feat_1 ) # Shuffle both features to guarantee different permutations np.random.shuffle(text_feat_2) alpha = (np.random.random(len(labels)) * (.8 - .2)) + .2 text_feat_mean = np.multiply(alpha, text_feat_1.transpose()) text_feat_mean += np.multiply(1. - alpha, text_feat_2.transpose()) text_feat_mean = text_feat_mean.transpose() text_feat_mean = normalize(text_feat_mean, norm='l2', axis=1) text_feat_Creative = Variable( torch.from_numpy(text_feat_mean.astype('float32'))).cuda() z_creative = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() G_creative_sample = netG(z_creative, text_feat_Creative) """ Discriminator """ for _ in range(5): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array( [dataset.train_text_feature[i, :] for i in labels]) text_feat = Variable(torch.from_numpy( text_feat.astype('float32'))).cuda() X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy(labels.astype('int'))).cuda() z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() # GAN's D loss D_real, C_real = netD(X) D_loss_real = torch.mean(D_real) C_loss_real = F.cross_entropy(C_real, y_true) DC_loss = -D_loss_real + C_loss_real DC_loss.backward() # GAN's D loss G_sample = netG(z, text_feat).detach() D_fake, C_fake = netD(G_sample) D_loss_fake = torch.mean(D_fake) C_loss_fake = F.cross_entropy(C_fake, y_true) DC_loss = D_loss_fake + C_loss_fake DC_loss.backward() # train with gradient penalty (WGAN_GP) grad_penalty = calc_gradient_penalty(netD, X.data, G_sample.data) grad_penalty.backward() Wasserstein_D = D_loss_real - D_loss_fake optimizerD.step() reset_grad(nets) """ Generator """ for _ in range(1): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array( [dataset.train_text_feature[i, :] for i in labels]) text_feat = Variable(torch.from_numpy( text_feat.astype('float32'))).cuda() X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy(labels.astype('int'))).cuda() z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() G_sample = netG(z, text_feat) D_fake, C_fake = netD(G_sample) _, C_real = netD(X) # GAN's G loss G_loss = torch.mean(D_fake) # Auxiliary classification loss C_loss = (F.cross_entropy(C_real, y_true) + F.cross_entropy(C_fake, y_true)) / 2 GC_loss = -G_loss + C_loss # Centroid loss Euclidean_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_W_LAMBDA != 0: for i in range(dataset.train_cls_num): sample_idx = (y_true == i).data.nonzero().squeeze() if sample_idx.numel() == 0: Euclidean_loss += 0.0 else: G_sample_cls = G_sample[sample_idx, :] Euclidean_loss += ( G_sample_cls.mean(dim=0) - tr_cls_centroid[i]).pow(2).sum().sqrt() Euclidean_loss *= 1.0 / dataset.train_cls_num * opt.CENT_LAMBDA # ||W||_2 regularization reg_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_W_LAMBDA != 0: for name, p in netG.named_parameters(): if 'weight' in name: reg_loss += p.pow(2).sum() reg_loss.mul_(opt.REG_W_LAMBDA) # ||W_z||21 regularization, make W_z sparse reg_Wz_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_Wz_LAMBDA != 0: Wz = netG.rdc_text.weight reg_Wz_loss = Wz.pow(2).sum(dim=0).sqrt().sum().mul( opt.REG_Wz_LAMBDA) # D(C| GX_fake)) + Classify GX_fake as real D_creative_fake, C_creative_fake = netD(G_creative_sample) if model_num == 1: # KL Divergence G_fake_C = F.log_softmax(C_creative_fake) else: G_fake_C = F.softmax(C_creative_fake) if model_num == 1: # KL Divergence entropy_GX_fake = (G_fake_C / G_fake_C.data.size(1)).mean() elif model_num == 2: # SM Divergence q_shape = Variable( torch.FloatTensor(G_fake_C.data.size(0), G_fake_C.data.size(1))).cuda() q_shape.data.fill_(1.0 / G_fake_C.data.size(1)) SM_ab = F.sigmoid(log_SM_ab(ones)) SM_a = 0.2 + torch.div(SM_ab[0][0], 1.6666666666666667).cuda() SM_b = 0.2 + torch.div(SM_ab[0][1], 1.6666666666666667).cuda() pow_a_b = torch.div(1 - SM_a, 1 - SM_b) alpha_term = (torch.pow(G_fake_C + 1e-5, SM_a) * torch.pow(q_shape, 1 - SM_a)).sum(1) entropy_GX_fake_vec = torch.div( torch.pow(alpha_term, pow_a_b) - 1, SM_b - 1) elif model_num == 3: # Bachatera Divergence q_shape = Variable( torch.FloatTensor(G_fake_C.data.size(0), G_fake_C.data.size(1))).cuda() q_shape.data.fill_(1.0 / G_fake_C.data.size(1)) SM_a = Variable(torch.FloatTensor(1, 1)).cuda() SM_a.data.fill_(opt.SM_Alpha) SM_b = Variable(torch.FloatTensor(1, 1)).cuda() SM_b.data.fill_(opt.SM_Alpha) pow_a_b = torch.div(1 - SM_a, 1 - SM_b) alpha_term = (torch.pow(G_fake_C + 1e-5, SM_a) * torch.pow(q_shape, 1 - SM_a)).sum(1) entropy_GX_fake_vec = -torch.div( torch.pow(alpha_term, pow_a_b) - 1, SM_b - 1) elif model_num == 4: # Tsallis Divergence q_shape = Variable( torch.FloatTensor(G_fake_C.data.size(0), G_fake_C.data.size(1))).cuda() q_shape.data.fill_(1.0 / G_fake_C.data.size(1)) SM_ab = F.sigmoid(log_SM_ab(ones)) SM_a = 0.2 + torch.div(SM_ab[0][0], 1.6666666666666667).cuda() SM_b = SM_a pow_a_b = torch.div(1 - SM_a, 1 - SM_b) alpha_term = (torch.pow(G_fake_C + 1e-5, SM_a) * torch.pow(q_shape, 1 - SM_a)).sum(1) entropy_GX_fake_vec = -torch.div( torch.pow(alpha_term, pow_a_b) - 1, SM_b - 1) elif model_num == 5: # Renyi Divergence q_shape = Variable( torch.FloatTensor(G_fake_C.data.size(0), G_fake_C.data.size(1))).cuda() q_shape.data.fill_(1.0 / G_fake_C.data.size(1)) SM_ab = F.sigmoid(log_SM_ab(ones)) SM_a = 0.2 + torch.div(SM_ab[0][0], 1.6666666666666667).cuda() SM_b = Variable(torch.FloatTensor(1, 1)).cuda() SM_b.data.fill_(opt.SM_Beta) pow_a_b = torch.div(1 - SM_a, 1 - SM_b) alpha_term = (torch.pow(G_fake_C + 1e-5, SM_a) * torch.pow(q_shape, 1 - SM_a)).sum(1) entropy_GX_fake_vec = -torch.div( torch.pow(alpha_term, pow_a_b) - 1, SM_b - 1) if model_num == 6: loss_creative = F.cross_entropy(C_creative_fake, new_class_labels) else: if model_num != 1: # Normalize SM-Divergence & Report mean min_e, max_e = torch.min(entropy_GX_fake_vec), torch.max( entropy_GX_fake_vec) entropy_GX_fake_vec = (entropy_GX_fake_vec - min_e) / (max_e - min_e) entropy_GX_fake = -entropy_GX_fake_vec.mean() loss_creative = -opt.Creative_weight * entropy_GX_fake disc_GX_fake_real = -torch.mean(D_creative_fake) total_loss_creative = loss_creative + disc_GX_fake_real all_loss = GC_loss + Euclidean_loss + reg_loss + reg_Wz_loss + total_loss_creative all_loss.backward() if model_num == 2 or model_num == 4 or model_num == 5: optimizer_SM_ab.step() optimizerG.step() reset_grad(nets) if it % opt.disp_interval == 0 and it: acc_real = (np.argmax(C_real.data.cpu().numpy(), axis=1) == y_true.data.cpu().numpy()).sum() / float( y_true.data.size()[0]) acc_fake = (np.argmax(C_fake.data.cpu().numpy(), axis=1) == y_true.data.cpu().numpy()).sum() / float( y_true.data.size()[0]) log_text = 'Iter-{}; rl: {:.4}%; fk: {:.4}%'.format( it, acc_real * 100, acc_fake * 100) with open(log_dir, 'a') as f: f.write(log_text + '\n') if it % opt.evl_interval == 0 and it > opt.disp_interval: netG.eval() cur_acc = eval_fakefeat_test(it, netG, dataset, param, result) cur_auc = eval_fakefeat_GZSL(netG, dataset, param, out_subdir, result) if cur_acc > result.best_acc: result.best_acc = cur_acc if cur_auc > result.best_auc: result.best_auc = cur_auc if it % opt.save_interval: files2remove = glob.glob(out_subdir + '/Best_model*') for _i in files2remove: os.remove(_i) torch.save( { 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, }, out_subdir + '/Best_model_AUC_{:.2f}.tar'.format(cur_auc)) netG.train() return result
def train(): param = _param() dataset = LoadDataset(opt) param.X_dim = dataset.feature_dim data_layer = FeatDataLayer(dataset.labels_train, dataset.pfc_feat_data_train, opt) result = Result() netG = _netG(dataset.text_dim, dataset.feature_dim).cuda() netG.apply(weights_init) print(netG) netD = _netD(dataset.train_cls_num, dataset.feature_dim).cuda() netD.apply(weights_init) print(netD) exp_info = 'CUB_EASY' if opt.splitmode == 'easy' else 'CUB_HARD' exp_params = 'Eu{}_Rls{}_RWz{}'.format(opt.CENT_LAMBDA , opt.REG_W_LAMBDA, opt.REG_Wz_LAMBDA) out_dir = 'out/{:s}'.format(exp_info) out_subdir = 'out/{:s}/{:s}'.format(exp_info, exp_params) if not os.path.exists('out'): os.mkdir('out') if not os.path.exists(out_dir): os.mkdir(out_dir) if not os.path.exists(out_subdir): os.mkdir(out_subdir) cprint(" The output dictionary is {}".format(out_subdir), 'red') log_dir = out_subdir + '/log_{:s}.txt'.format(exp_info) with open(log_dir, 'a') as f: f.write('Training Start:') f.write(strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) + '\n') start_step = 0 if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) netG.load_state_dict(checkpoint['state_dict_G']) netD.load_state_dict(checkpoint['state_dict_D']) start_step = checkpoint['it'] print(checkpoint['log']) else: print("=> no checkpoint found at '{}'".format(opt.resume)) nets = [netG, netD] tr_cls_centroid = Variable(torch.from_numpy(dataset.tr_cls_centroid.astype('float32'))).cuda() optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.9)) optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.9)) for it in range(start_step, 3000+1): """ Discriminator """ for _ in range(5): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array([dataset.train_text_feature[i,:] for i in labels]) text_feat = Variable(torch.from_numpy(text_feat.astype('float32'))).cuda() X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy(labels.astype('int'))).cuda() z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() # GAN's D loss D_real, C_real = netD(X) D_loss_real = torch.mean(D_real) C_loss_real = F.cross_entropy(C_real, y_true) DC_loss = -D_loss_real + C_loss_real DC_loss.backward() # GAN's D loss G_sample = netG(z, text_feat).detach() D_fake, C_fake = netD(G_sample) D_loss_fake = torch.mean(D_fake) C_loss_fake = F.cross_entropy(C_fake, y_true) DC_loss = D_loss_fake + C_loss_fake DC_loss.backward() # train with gradient penalty (WGAN_GP) grad_penalty = calc_gradient_penalty(netD, X.data, G_sample.data) grad_penalty.backward() Wasserstein_D = D_loss_real - D_loss_fake optimizerD.step() reset_grad(nets) """ Generator """ for _ in range(1): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array([dataset.train_text_feature[i, :] for i in labels]) text_feat = Variable(torch.from_numpy(text_feat.astype('float32'))).cuda() X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy(labels.astype('int'))).cuda() z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() G_sample = netG(z, text_feat) D_fake, C_fake = netD(G_sample) _, C_real = netD(X) # GAN's G loss G_loss = torch.mean(D_fake) # Auxiliary classification loss C_loss = (F.cross_entropy(C_real, y_true) + F.cross_entropy(C_fake, y_true))/2 GC_loss = -G_loss + C_loss # Centroid loss Euclidean_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_W_LAMBDA != 0: for i in range(dataset.train_cls_num): sample_idx = (y_true == i).data.nonzero().squeeze() if sample_idx.numel() == 0: Euclidean_loss += 0.0 else: G_sample_cls = G_sample[sample_idx, :] Euclidean_loss += (G_sample_cls.mean(dim=0) - tr_cls_centroid[i]).pow(2).sum().sqrt() Euclidean_loss *= 1.0/dataset.train_cls_num * opt.CENT_LAMBDA # ||W||_2 regularization reg_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_W_LAMBDA != 0: for name, p in netG.named_parameters(): if 'weight' in name: reg_loss += p.pow(2).sum() reg_loss.mul_(opt.REG_W_LAMBDA) # ||W_z||21 regularization, make W_z sparse reg_Wz_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_Wz_LAMBDA != 0: Wz = netG.rdc_text.weight reg_Wz_loss = Wz.pow(2).sum(dim=0).sqrt().sum().mul(opt.REG_Wz_LAMBDA) all_loss = GC_loss + Euclidean_loss + reg_loss + reg_Wz_loss all_loss.backward() optimizerG.step() reset_grad(nets) if it % opt.disp_interval == 0 and it: acc_real = (np.argmax(C_real.data.cpu().numpy(), axis=1) == y_true.data.cpu().numpy()).sum() / float(y_true.data.size()[0]) acc_fake = (np.argmax(C_fake.data.cpu().numpy(), axis=1) == y_true.data.cpu().numpy()).sum() / float(y_true.data.size()[0]) log_text = 'Iter-{}; Was_D: {:.4}; Euc_ls: {:.4}; reg_ls: {:.4}; Wz_ls: {:.4}; G_loss: {:.4}; D_loss_real: {:.4};' \ ' D_loss_fake: {:.4}; rl: {:.4}%; fk: {:.4}%'\ .format(it, Wasserstein_D.data[0], Euclidean_loss.data[0], reg_loss.data[0],reg_Wz_loss.data[0], G_loss.data[0], D_loss_real.data[0], D_loss_fake.data[0], acc_real * 100, acc_fake * 100) print(log_text) with open(log_dir, 'a') as f: f.write(log_text+'\n') if it % opt.evl_interval == 0 and it >= 100: netG.eval() eval_fakefeat_test(it, netG, dataset, param, result) if result.save_model: files2remove = glob.glob(out_subdir + '/Best_model*') for _i in files2remove: os.remove(_i) torch.save({ 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, }, out_subdir + '/Best_model_Acc_{:.2f}.tar'.format(result.acc_list[-1])) netG.train() if it % opt.save_interval == 0 and it: torch.save({ 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, }, out_subdir + '/Iter_{:d}.tar'.format(it)) cprint('Save model to ' + out_subdir + '/Iter_{:d}.tar'.format(it), 'red')
import torch.nn as nn import torch.nn.functional as F from torch import optim from torch.autograd import Variable from torch.utils.data import DataLoader from datetime import datetime from dataset import LoadDataset from evalution_segmentaion import eval_semantic_segmentation from Models import FCN import cfg device = t.device('cuda') if t.cuda.is_available() else t.device('cpu') num_class = cfg.DATASET[1] Load_train = LoadDataset([cfg.TRAIN_ROOT, cfg.TRAIN_LABEL], cfg.crop_size) Load_val = LoadDataset([cfg.VAL_ROOT, cfg.VAL_LABEL], cfg.crop_size) train_data = DataLoader(Load_train, batch_size=cfg.BATCH_SIZE, shuffle=True, num_workers=1) val_data = DataLoader(Load_val, batch_size=cfg.BATCH_SIZE, shuffle=True, num_workers=1) fcn = FCN.FCN(num_class) fcn = fcn.to(device) criterion = nn.NLLLoss().to(device) optimizer = optim.Adam(fcn.parameters(), lr=1e-4) def train(model): best = [0] net = model.train()
def train(): param = _param() dataset = LoadDataset(opt) param.X_dim = dataset.feature_dim data_layer = FeatDataLayer(dataset.labels_train, dataset.pfc_feat_data_train, opt) # initialize model netGs = [] netDs = [] parts = 6 #if opt.dataset == "CUB2011" else 6 for part in range(parts): netGs.append(_netG(dataset.text_dim, 512).cuda().apply(weights_init)) netDs.append( _netD(dataset.train_cls_num, 512).cuda().apply(weights_init)) exp_info = 'CUB_EASY' if opt.splitmode == 'easy' else 'CUB_HARD' exp_params = 'Eu{}_Rls{}_RWz{}'.format(opt.CENT_LAMBDA, opt.REG_W_LAMBDA, opt.REG_Wz_LAMBDA) out_dir = 'out/{:s}'.format(exp_info) out_subdir = 'out/{:s}/{:s}'.format(exp_info, exp_params) if not os.path.exists('out'): os.mkdir('out') if not os.path.exists(out_dir): os.mkdir(out_dir) if not os.path.exists(out_subdir): os.mkdir(out_subdir) cprint(" The output dictionary is {}".format(out_subdir), 'red') log_dir = out_subdir + '/log_{:s}.txt'.format(exp_info) with open(log_dir, 'a') as f: f.write('Training Start:') f.write(strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) + '\n') start_step = 0 part_cls_centrild = torch.from_numpy( dataset.part_cls_centrild.astype('float32')).cuda() # initialize optimizers optimizerGs = [] optimizerDs = [] for netG in netGs: optimizerGs.append( optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.9))) for netD in netDs: optimizerDs.append( optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.9))) for it in range(start_step, 3000 + 1): """ Discriminator """ for _ in range(5): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array( [dataset.train_text_feature[i, :] for i in labels]) text_feat = torch.from_numpy(text_feat.astype('float32')).cuda() X = torch.from_numpy(feat_data).cuda() y_true = torch.from_numpy(labels.astype('int')).cuda() z = torch.randn(opt.batchsize, param.z_dim).cuda() for part in range(parts): z = torch.randn(opt.batchsize, param.z_dim).cuda() D_real, C_real = netDs[part](X[:, part * 512:(part + 1) * 512]) D_loss_real = torch.mean(D_real) C_loss_real = F.cross_entropy(C_real, y_true) DC_loss = -D_loss_real + C_loss_real DC_loss.backward() G_sample = netGs[part](z, text_feat) D_fake, C_fake = netDs[part](G_sample) D_loss_fake = torch.mean(D_fake) C_loss_fake = F.cross_entropy(C_fake, y_true) DC_loss = D_loss_fake + C_loss_fake DC_loss.backward() grad_penalty = calc_gradient_penalty( netDs[part], X.data[:, part * 512:(part + 1) * 512], G_sample.data) grad_penalty.backward() Wasserstein_D = D_loss_real - D_loss_fake # writer.add_scalar("Wasserstein_D"+str(part), Wasserstein_D.item(), it) optimizerDs[part].step() netGs[part].zero_grad() netDs[part].zero_grad() """ Generator """ for _ in range(1): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array( [dataset.train_text_feature[i, :] for i in labels]) text_feat = torch.from_numpy(text_feat.astype('float32')).cuda() X = torch.from_numpy(feat_data).cuda() y_true = torch.from_numpy(labels.astype('int')).cuda() for part in range(parts): z = torch.randn(opt.batchsize, param.z_dim).cuda() G_sample = netGs[part](z, text_feat) # G_sample_all[:, part*512:(part+1)*512] = G_sample D_fake, C_fake = netDs[part](G_sample) _, C_real = netDs[part](X[:, part * 512:(part + 1) * 512]) G_loss = torch.mean(D_fake) C_loss = (F.cross_entropy(C_real, y_true) + F.cross_entropy(C_fake, y_true)) / 2 GC_loss = -G_loss + C_loss # writer.add_scalar("GC_loss"+str(part), GC_loss.item(), it) Euclidean_loss = torch.tensor([0.0]).cuda() if opt.REG_W_LAMBDA != 0: for i in range(dataset.train_cls_num): sample_idx = (y_true == i).data.nonzero().squeeze() if sample_idx.numel() == 0: Euclidean_loss += 0.0 else: G_sample_cls = G_sample[sample_idx, :] Euclidean_loss += (G_sample_cls.mean(dim=0) - part_cls_centrild[i][part] ).pow(2).sum().sqrt() Euclidean_loss *= 1.0 / dataset.train_cls_num * opt.CENT_LAMBDA # ||W||_2 regularization reg_loss = torch.Tensor([0.0]).cuda() if opt.REG_W_LAMBDA != 0: for name, p in netGs[part].named_parameters(): if 'weight' in name: reg_loss += p.pow(2).sum() reg_loss.mul_(opt.REG_W_LAMBDA) # writer.add_scalar("reg_loss"+str(part), reg_loss.item(), it) # ||W_z||21 regularization, make W_z sparse reg_Wz_loss = torch.Tensor([0.0]).cuda() if opt.REG_Wz_LAMBDA != 0: Wz = netGs[part].rdc_text.weight reg_Wz_loss = reg_Wz_loss + Wz.pow(2).sum( dim=0).sqrt().sum().mul(opt.REG_Wz_LAMBDA) # writer.add_scalar("reg_Wz_loss"+str(part), reg_Wz_loss.item(), it) all_loss = GC_loss + Euclidean_loss + reg_loss + reg_Wz_loss all_loss.backward() optimizerGs[part].step() if it % opt.evl_interval == 0 and it >= 1000: print(it) for part in range(parts): netGs[part].eval() train_classifier(opt, param, dataset, netGs) for part in range(parts): netGs[part].train()
def train(self): # 新建保存 checkpoint 的文件夹 if not os.path.exists(self.checkpoint_save): os.makedirs(self.checkpoint_save) for epoch in range(epochs): print("=====This is Epoch =====", epoch) ''' if epoch < 2: for idx, p in enumerate(self.model.parameters()): if self.flag[idx] == 0: p.requires_grad = False else: for idx, p in enumerate(self.model.parameters()): p.requires_grad = True fixed = 1 ''' if epoch % decay == 0: for param_group in self.optimizer.param_groups: param_group['lr'] = self.lr * 0.1 self.lr = param_group['lr'] print("learning rate : {}".format( self.optimizer.state_dict()['param_groups'][0]['lr'])) learning_rate = str( self.optimizer.state_dict()['param_groups'][0]['lr']) dir_num = 0 for dir in self.train_list: i = 0 train_set = LoadDataset(dir) train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=0, shuffle=True) for idx, train_data in enumerate(train_loader): audio, gt = train_data # print('Train : ', audio.shape, gt.shape) # exit(0) # torch.Size([bs, 3, f_num, 256, 320]) # torch.Size([bs, 3, f_num, 256, 320]) # torch.Size([bs, 1, 256, 320]) # image = Variable(image).cuda() audio = Variable(audio).cuda() gt = Variable(gt).cuda() det = self.model(audio) # print('Result : ', det.shape, gt.shape) # exit(0) # loss1 = obj_func(att, gt) loss2 = obj_func(det, gt) loss = loss2 self.optimizer.zero_grad() loss.backward() self.optimizer.step() self.loss_dict.append(loss.item()) i += 1 print( 'Epoch: [{}], Root_folder: {}, Frames: {}, Enumerate: {}\n=====Loss: {:.6f}=====\n' .format(epoch, dir, len(train_set), i, loss.item())) ''' dir_num += 1 if dir_num == 500: break ''' if epoch % save_hop == 0: torch.save( self.model.state_dict(), os.path.join( self.checkpoint_save, 'model_%s_%s_%s_%d_%s_%d.pth' % (checkpoint_name, self.optimizer_name, learning_rate, batch_size, self.func_name, epoch))) end_time = str(datetime.now())[11:13] + str(datetime.now())[14:16] save_name = './charts/' + self.optimizer_name + '_' + str( lr) + '_' + self.func_name + '_' + end_time + '.png' plt.title(self.optimizer_name + '_' + str(lr) + '_' + str(epochs) + '_' + str(decay) + '_' + str(fixed) + end_time) plt.xlabel('batch') plt.ylabel('loss') plt.plot(self.loss_dict) plt.savefig(save_name) plt.show()
def train(): param = _param() dataset = LoadDataset(opt) param.X_dim = dataset.feature_dim data_layer = FeatDataLayer(dataset.labels_train, dataset.pfc_feat_data_train, opt) result = Result() result_gzsl = Result() netG = _netG(dataset.text_dim, dataset.feature_dim).cuda() netG.apply(weights_init) print(netG) netD = _netD(dataset.train_cls_num, dataset.feature_dim).cuda() netD.apply(weights_init) print(netD) exp_info = 'CUB_EASY' if opt.splitmode == 'easy' else 'CUB_HARD' exp_params = 'Eu{}_Rls{}_RWz{}'.format(opt.CENT_LAMBDA, opt.REG_W_LAMBDA, opt.REG_Wz_LAMBDA) train_dic = {} for i in range(len(dataset.labels_train)): try: train_dic[dataset.labels_train[i]].append( dataset.pfc_feat_data_train[i]) except: train_dic[dataset.labels_train[i]] = [ dataset.pfc_feat_data_train[i] ] out_dir = 'out/{:s}'.format(exp_info) out_subdir = 'out/{:s}/{:s}'.format(exp_info, exp_params) if not os.path.exists('out'): os.mkdir('out') if not os.path.exists(out_dir): os.mkdir(out_dir) if not os.path.exists(out_subdir): os.mkdir(out_subdir) cprint(" The output dictionary is {}".format(out_subdir), 'red') log_dir = out_subdir + '/log_{:s}.txt'.format(exp_info) with open(log_dir, 'a') as f: f.write('Training Start:') f.write(strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) + '\n') if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) netG.load_state_dict(checkpoint['state_dict_G']) netD.load_state_dict(checkpoint['state_dict_D']) start_step = checkpoint['it'] print(checkpoint['log']) else: print("=> no checkpoint found at '{}'".format(opt.resume)) nets = [netG, netD] tr_cls_centroid = Variable( torch.from_numpy(dataset.tr_cls_centroid.astype('float32'))).cuda() optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.9)) optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.9)) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") unsupervisedData = UnsupervisedData(dataset.test_text_feature, dataset.labels_test, dataset.pfc_feat_data_test, dataset.train_cls_num) first = True if opt.resume != None else False class_increment = False while True: if not first: start_step = 0 class_increment = False for it in range(start_step, 3000 + 1): """ Discriminator """ for _ in range(5): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array( [dataset.train_text_feature[i, :] for i in labels]) text_feat = Variable( torch.from_numpy(text_feat.astype('float32'))).cuda() np.unique(labels) X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy( labels.astype('int'))).cuda() z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() y_true = y_true.to(device=device, dtype=torch.long) # GAN's D loss D_real, C_real = netD(X) D_loss_real = torch.mean(D_real) # print(C_real) # print(y_true) C_loss_real = F.cross_entropy(C_real, y_true) DC_loss = -D_loss_real + C_loss_real DC_loss.backward() # GAN's D loss G_sample = netG(z, text_feat).detach() D_fake, C_fake = netD(G_sample) D_loss_fake = torch.mean(D_fake) C_loss_fake = F.cross_entropy(C_fake, y_true) DC_loss = D_loss_fake + C_loss_fake DC_loss.backward() # train with gradient penalty (WGAN_GP) grad_penalty = calc_gradient_penalty( netD, X.data, G_sample.data) grad_penalty.backward() Wasserstein_D = D_loss_real - D_loss_fake optimizerD.step() reset_grad(nets) """ Generator """ for _ in range(1): blobs = data_layer.forward() feat_data = blobs['data'] # image data, 最小批的图片数据 labels = blobs['labels'].astype( int) # class labels, 图片对应的标签 text_feat = np.array( [dataset.train_text_feature[i, :] for i in labels]) text_feat = Variable( torch.from_numpy( text_feat.astype('float32'))).cuda() # 获取对应的文本 anchor_text_feat = Variable( torch.from_numpy( dataset.train_text_feature.astype( 'float32'))).cuda() X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy( labels.astype('int'))).cuda() y_true = y_true.to(device=device, dtype=torch.long) z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() anchor_z = Variable( torch.randn(len(dataset.train_text_feature), param.z_dim)).cuda() G_sample = netG(z, text_feat) D_fake, C_fake = netD(G_sample) _, C_real = netD(X) # GAN's G loss G_loss = torch.mean(D_fake) # Auxiliary classification loss C_loss = (F.cross_entropy(C_real, y_true) + F.cross_entropy(C_fake, y_true)) / 2 GC_loss = -G_loss + C_loss # Centroid loss Euclidean_loss_1 = Variable(torch.Tensor([0.0])).cuda() Euclidean_loss_2 = Variable(torch.Tensor([0.0])).cuda() if opt.CENT_LAMBDA != 0: for i in range(dataset.train_cls_num): sample_idx = (y_true == i).data.nonzero().squeeze() try: eq_idx_len = sample_idx.shape[0] except: eq_idx_len = 0 if sample_idx.numel() == 0: Euclidean_loss_1 += 0.0 else: G_sample_cls = G_sample[sample_idx, :] Euclidean_loss_1 += ( G_sample_cls.mean(dim=0) - tr_cls_centroid[i]).pow(2).sum().sqrt() sample_idx = (y_true != i).data.nonzero().squeeze() try: sample_idx = random.sample( sample_idx, eq_idx_len) except: pass if eq_idx_len == 0: Euclidean_loss_2 += 0.0 else: G_sample_cls = G_sample[sample_idx, :] Euclidean_loss_2 += ( G_sample_cls.mean(dim=0) - tr_cls_centroid[i]).pow(2).sum().sqrt() Euclidean_loss_1 *= 1.0 / dataset.train_cls_num * opt.CENT_LAMBDA Euclidean_loss_2 *= 1.0 / dataset.train_cls_num * opt.CENT_LAMBDA Euclidean_loss = Euclidean_loss_1 - Euclidean_loss_2 # ||W||_2 regularization reg_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_W_LAMBDA != 0: for name, p in netG.named_parameters(): if 'weight' in name: reg_loss += p.pow(2).sum() reg_loss.mul_(opt.REG_W_LAMBDA) # ||W_z||21 regularization, make W_z sparse reg_Wz_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_Wz_LAMBDA != 0: Wz = netG.rdc_text.weight reg_Wz_loss = Wz.pow(2).sum(dim=0).sqrt().sum().mul( opt.REG_Wz_LAMBDA) anchor = netG(anchor_z, anchor_text_feat) triplet_loss = cal_triplets_loss(anchor, train_dic, opt.margin) all_loss = GC_loss + Euclidean_loss + reg_loss + reg_Wz_loss + triplet_loss all_loss.backward() optimizerG.step() reset_grad(nets) if it % opt.disp_interval == 0 and it: acc_real = (np.argmax(C_real.data.cpu().numpy(), axis=1) == y_true.data.cpu().numpy()).sum() / float( y_true.data.size()[0]) acc_fake = (np.argmax(C_fake.data.cpu().numpy(), axis=1) == y_true.data.cpu().numpy()).sum() / float( y_true.data.size()[0]) log_text = 'Iter-{}; Was_D: {:.4}; Euc_triplet_ls: {:.4}; reg_ls: {:.4}; Wz_ls: {:.4}; G_loss: {:.4}; D_loss_real: {:.4};' \ ' D_loss_fake: {:.4}; rl: {:.4}%; fk: {:.4}%'\ .format(it, Wasserstein_D.item(), Euclidean_loss.item()+triplet_loss.item(), reg_loss.item(), reg_Wz_loss.item(), G_loss.item(), D_loss_real.item(), D_loss_fake.item(), acc_real * 100, acc_fake * 100) print(log_text) with open(log_dir, 'a') as f: f.write(log_text + '\n') if it % opt.evl_interval == 0 and it >= 100: netG.eval() eval_fakefeat_test(it, netG, dataset, param, result) eval_fakefeat_GZSL(it, netG, dataset, param, result_gzsl) if result.save_model: files2remove = glob.glob(out_subdir + '/Best_model*') for _i in files2remove: os.remove(_i) torch.save( { 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, }, out_subdir + '/Best_model_Acc_{:.2f}.tar'.format( result.acc_list[-1])) netG.train() if it % opt.save_interval == 0 and it: torch.save( { 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, }, out_subdir + '/Iter_{:d}.tar'.format(it)) cprint( 'Save model to ' + out_subdir + '/Iter_{:d}.tar'.format(it), 'red') first = False # semi-supervised text_feat = Variable( torch.from_numpy( unsupervisedData.text_feature.astype('float32'))).cuda() z = Variable(torch.randn(text_feat.shape[0], param.z_dim)).cuda() text_feat = netG(z, text_feat).data.cpu().numpy() model = KNeighborsClassifier(50) model.fit(text_feat, unsupervisedData.labels) y_pro = model.predict_proba(unsupervisedData.image_feature) y = model.predict(unsupervisedData.image_feature) probabilities = y_pro[:, np.argsort(y_pro)[::, -1][0]] selectedHighConvinceIndex = list( np.where(probabilities >= opt.confidence)[0]) selectedHighConvinceIndex_y = y[selectedHighConvinceIndex] print("select high confidence number : " + str(len(selectedHighConvinceIndex))) for i, label in enumerate(selectedHighConvinceIndex_y): if label in unsupervisedData.unsupervised_label_mapping: label = unsupervisedData.unsupervised_label_mapping[label] insertIndex = np.where(dataset.labels_train == label)[0][0] dataset.labels_train = np.insert(dataset.labels_train, insertIndex, values=label, axis=0) dataset.pfc_feat_data_train = np.insert( dataset.pfc_feat_data_train, insertIndex, values=unsupervisedData.image_feature[ selectedHighConvinceIndex[i]], axis=0) train_dic[label].append(unsupervisedData.image_feature[ selectedHighConvinceIndex[i]]) else: unsupervisedData.unsupervised_label_mapping[ label] = unsupervisedData.label_index unsupervisedData.label_index += 1 label = unsupervisedData.unsupervised_label_mapping[label] dataset.labels_train = np.hstack( [dataset.labels_train, [label]]) dataset.pfc_feat_data_train = np.vstack([ dataset.pfc_feat_data_train, [ unsupervisedData.image_feature[ selectedHighConvinceIndex[i]] ] ]) dataset.train_text_feature = np.vstack([ dataset.train_text_feature, [ unsupervisedData.text_feature[ selectedHighConvinceIndex[i]] ] ]) train_dic[label] = [ unsupervisedData.image_feature[ selectedHighConvinceIndex[i]] ] dataset.train_cls_num += 1 class_increment = True unsupervisedData.text_feature = np.delete( unsupervisedData.text_feature, selectedHighConvinceIndex, axis=0) unsupervisedData.image_feature = np.delete( unsupervisedData.image_feature, selectedHighConvinceIndex, axis=0) unsupervisedData.labels = np.delete(unsupervisedData.labels, selectedHighConvinceIndex, axis=0) dataset.tr_cls_centroid = np.zeros( [dataset.train_cls_num, dataset.pfc_feat_data_train.shape[1]]).astype(np.float32) for i in range(dataset.train_cls_num): dataset.tr_cls_centroid[i] = np.mean( dataset.pfc_feat_data_train[dataset.labels_train == i], axis=0) tr_cls_centroid = Variable( torch.from_numpy( dataset.tr_cls_centroid.astype('float32'))).cuda() if class_increment: del netD netD = _netD(dataset.train_cls_num, dataset.feature_dim).cuda() netD.apply(weights_init) netG = _netG(dataset.text_dim, dataset.feature_dim).cuda() netG.apply(weights_init) optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.9)) optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.9)) nets = [netG, netD] print(netG) print(netD) data_layer = FeatDataLayer(dataset.labels_train, dataset.pfc_feat_data_train, opt)
# model = UNet() # serializers.load_npz(C.PATH_MODEL / "unet.model", model) run_num = args.runnum data_len = args.datalength epoch = args.epoch lr = args.learningrate shuffle = True if args.shuffle == 1 else False gpu_id = args.gpuid for i in range(1, 1 + run_num): print("{} / {} loop".format(i, run_num)) # データロード X, Y = LoadDataset(length=data_len, offset=(i - 1) * data_len, shuffle=shuffle) print("Dataset loaded.") # 学習 model, _ = \ TrainUNet( X=X, Y=Y, model_=model, optimizer_=optimizer, epoch=epoch, earlystop=False, alpha=lr, loop=i, gpu_id=gpu_id)
parser.add_argument('--pretrained', action='store_false', default=True) parser.add_argument('--load', default='', type=str) parser.add_argument('--name', default='', type=str) parser.add_argument('--start', type=int, default=0) params = parser.parse_args() data_dir = params.data_path if params.name == '': params.name = data_dir.split('/')[-1] if params.name == '': params.name = data_dir.split('/')[-2] dataloaders = { x: LoadDataset(x, data_dir, batch_size=10, n_jobs=6) for x in ['train', 'val'] } os.makedirs(os.path.join('log', params.name), exist_ok=True) logger = SummaryWriter(os.path.join('log', params.name)) device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") os.makedirs(os.path.join('ckpt', params.name), exist_ok=True) ckpt_dir = os.path.join('ckpt', params.name) def train_model(model, criterion, optimizer,
def train(): start_time = time.time() param = _param() dataset = LoadDataset(opt) param.X_dim = dataset.feature_dim data_layer = FeatDataLayer(dataset.labels_train, dataset.pfc_feat_data_train, dataset.seen_label_mapping, opt) result = Result() result_gzsl = Result() netG = _netG(dataset.text_dim, dataset.feature_dim).cuda() netG.apply(weights_init) print(netG) netD = _netD(dataset.train_cls_num + dataset.test_cls_num, dataset.feature_dim).cuda() netD.apply(weights_init) print(netD) exp_info = 'CUB_EASY' if opt.splitmode == 'easy' else 'CUB_HARD' exp_params = 'Eu{}_Rls{}_RWz{}'.format(opt.CENT_LAMBDA , opt.REG_W_LAMBDA, opt.REG_Wz_LAMBDA) out_dir = 'out_' + str(opt.epsilon) + '/{:s}'.format(exp_info) out_subdir = 'out_' + str(opt.epsilon) + '/{:s}/{:s}'.format(exp_info, exp_params) if not os.path.exists('out_' + str(opt.epsilon) ): os.mkdir('out_' + str(opt.epsilon)) if not os.path.exists(out_dir): os.mkdir(out_dir) if not os.path.exists(out_subdir): os.mkdir(out_subdir) cprint(" The output dictionary is {}".format(out_subdir), 'red') log_dir = out_subdir + '/log_{:s}.txt'.format(exp_info) with open(log_dir, 'a') as f: f.write('Training Start:') f.write(strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) + '\n') f.write("Running Parameter Logs") f.write(runing_parameters_logs) start_step = 0 if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) netG.load_state_dict(checkpoint['state_dict_G']) netD.load_state_dict(checkpoint['state_dict_D']) start_step = checkpoint['it'] print(checkpoint['log']) log_text = checkpoint['log'] else: print("=> no checkpoint found at '{}'".format(opt.resume)) nets = [netG, netD] tr_cls_centroid = Variable(torch.from_numpy(dataset.tr_cls_centroid.astype('float32'))).cuda() optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.9)) optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.9)) for it in range(start_step, 5000+1): if it > opt.mode_change: train_text = Variable(torch.from_numpy(dataset.train_text_feature.astype('float32'))).cuda() test_text = Variable(torch.from_numpy(dataset.test_text_feature.astype('float32'))).cuda() z_train = Variable(torch.randn(dataset.train_cls_num, param.z_dim)).cuda() z_test = Variable(torch.randn(dataset.test_cls_num, param.z_dim)).cuda() _, train_text_feature = netG(z_train, train_text) _, test_text_feature = netG(z_test, test_text) dataset.semantic_similarity_check(opt.Knn, train_text_feature.data.cpu().numpy(), test_text_feature.data.cpu().numpy()) """ Discriminator """ for _ in range(5): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels true_labels = blobs['true_labels'].astype(int) text_feat = np.array([dataset.train_text_feature[i,:] for i in labels]) text_feat = Variable(torch.from_numpy(text_feat.astype('float32'))).cuda() X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy(true_labels.astype('int'))).cuda() z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() # GAN's D loss D_real, C_real = netD(X) D_loss_real = torch.mean(D_real) C_loss_real = F.cross_entropy(C_real, y_true) DC_loss = -D_loss_real + C_loss_real DC_loss.backward() # GAN's D loss G_sample, _ = netG(z, text_feat) D_fake, C_fake = netD(G_sample) D_loss_fake = torch.mean(D_fake) C_loss_fake = F.cross_entropy(C_fake, y_true) DC_loss = D_loss_fake + C_loss_fake DC_loss.backward() # train with gradient penalty (WGAN_GP) grad_penalty = calc_gradient_penalty(netD, X.data, G_sample.data) grad_penalty.backward() Wasserstein_D = D_loss_real - D_loss_fake optimizerD.step() reset_grad(nets) """ Generator """ for _ in range(1): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels true_labels = blobs['true_labels'].astype(int) #True seen label class text_feat = np.array([dataset.train_text_feature[i, :] for i in labels]) text_feat = Variable(torch.from_numpy(text_feat.astype('float32'))).cuda() X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy(true_labels.astype('int'))).cuda() y_dummy = Variable(torch.from_numpy(labels.astype('int'))).cuda() z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() G_sample, _ = netG(z, text_feat) D_fake, C_fake = netD(G_sample) _, C_real = netD(X) # GAN's G loss G_loss = torch.mean(D_fake) # Auxiliary classification loss C_loss = (F.cross_entropy(C_real, y_true) + F.cross_entropy(C_fake, y_true))/2 GC_loss = -G_loss + C_loss # Centroid loss Euclidean_loss = Variable(torch.Tensor([0.0])).cuda() Correlation_loss = Variable(torch.Tensor([0.0])).cuda() if opt.CENT_LAMBDA != 0: for i in range(dataset.train_cls_num): sample_idx = (y_dummy == i).data.nonzero().squeeze() if sample_idx.numel() == 0: Euclidean_loss += 0.0 else: G_sample_cls = G_sample[sample_idx, :] if sample_idx.numel() != 1: generated_mean = G_sample_cls.mean(dim=0) else: generated_mean = G_sample_cls Euclidean_loss += (generated_mean - tr_cls_centroid[i]).pow(2).sum().sqrt() for n in range(dataset.Neighbours): Neighbor_correlation = cosine_similarity(generated_mean.data.cpu().numpy().reshape((1, dataset.feature_dim)), tr_cls_centroid[dataset.idx_mat[i,n]].data.cpu().numpy().reshape((1, dataset.feature_dim))) lower_limit = dataset.semantic_similarity_seen [i,n] - opt.epsilon upper_limit = dataset.semantic_similarity_seen [i,n] + opt.epsilon lower_limit = torch.as_tensor(lower_limit.astype('float')) upper_limit = torch.as_tensor(upper_limit.astype('float')) corr = torch.as_tensor(Neighbor_correlation[0][0].astype('float')) margin = (torch.max(corr- corr, corr - upper_limit))**2 + (torch.max(corr- corr, lower_limit - corr ))**2 Correlation_loss += margin Euclidean_loss *= 1.0/dataset.train_cls_num * opt.CENT_LAMBDA Correlation_loss = Correlation_loss * opt.correlation_penalty # ||W||_2 regularization reg_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_W_LAMBDA != 0: for name, p in netG.named_parameters(): if 'weight' in name: reg_loss += p.pow(2).sum() reg_loss.mul_(opt.REG_W_LAMBDA) # ||W_z||21 regularization, make W_z sparse reg_Wz_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_Wz_LAMBDA != 0: Wz = netG.rdc_text.weight reg_Wz_loss = Wz.pow(2).sum(dim=0).sqrt().sum().mul(opt.REG_Wz_LAMBDA) all_loss = GC_loss + Euclidean_loss + reg_loss + reg_Wz_loss + Correlation_loss all_loss.backward() optimizerG.step() reset_grad(nets) if (it > opt.unseen_start): for _ in range(1): # Zero shot Discriminator is training zero_shot_labels = np.random.randint(dataset.test_cls_num, size = opt.zeroshotbatchsize).astype(int) zero_shot_true_labels = np.array([dataset.unseen_label_mapping[i] for i in zero_shot_labels]) zero_text_feat = np.array([dataset.test_text_feature[i,:] for i in zero_shot_labels]) zero_text_feat = Variable(torch.from_numpy(zero_text_feat.astype('float32'))).cuda() zero_y_true = Variable(torch.from_numpy(zero_shot_true_labels.astype('int'))).cuda() z = Variable(torch.randn(opt.zeroshotbatchsize, param.z_dim)).cuda() # GAN's D loss G_sample_zero, _ = netG(z, zero_text_feat) _, C_fake_zero = netD(G_sample_zero) C_loss_fake_zero = F.cross_entropy(C_fake_zero, zero_y_true) C_loss_fake_zero.backward() optimizerD.step() reset_grad(nets) for _ in range(1): # Zero shot Generator is training zero_shot_labels = np.random.randint(dataset.test_cls_num, size = opt.zeroshotbatchsize).astype(int) zero_shot_true_labels = np.array([dataset.unseen_label_mapping[i] for i in zero_shot_labels]) zero_text_feat = np.array([dataset.test_text_feature[i,:] for i in zero_shot_labels]) zero_text_feat = Variable(torch.from_numpy(zero_text_feat.astype('float32'))).cuda() zero_y_true = Variable(torch.from_numpy(zero_shot_true_labels.astype('int'))).cuda() y_dummy_zero = Variable(torch.from_numpy(zero_shot_labels.astype('int'))).cuda() z = Variable(torch.randn(opt.zeroshotbatchsize, param.z_dim)).cuda() # GAN's D loss G_sample_zero, _ = netG(z, zero_text_feat) _, C_fake_zero = netD(G_sample_zero) C_loss_fake_zero = F.cross_entropy(C_fake_zero, zero_y_true) Correlation_loss_zero = Variable(torch.Tensor([0.0])).cuda() if opt.CENT_LAMBDA != 0: for i in range(dataset.test_cls_num): sample_idx = (y_dummy_zero == i).data.nonzero().squeeze() if sample_idx.numel() != 0: G_sample_cls = G_sample_zero[sample_idx, :] if sample_idx.numel() != 1: generated_mean = G_sample_cls.mean(dim=0) else: generated_mean = G_sample_cls for n in range(dataset.Neighbours): Neighbor_correlation = cosine_similarity(generated_mean.data.cpu().numpy().reshape((1, dataset.feature_dim)), tr_cls_centroid[dataset.unseen_idx_mat[i,n]].data.cpu().numpy().reshape((1, dataset.feature_dim))) lower_limit = dataset.semantic_similarity_unseen [i,n] - opt.epsilon upper_limit = dataset.semantic_similarity_unseen [i,n] + opt.epsilon lower_limit = torch.as_tensor(lower_limit.astype('float')) upper_limit = torch.as_tensor(upper_limit.astype('float')) corr = torch.as_tensor(Neighbor_correlation[0][0].astype('float')) margin = (torch.max(corr- corr, corr - upper_limit))**2 + (torch.max(corr- corr, lower_limit - corr ))**2 Correlation_loss_zero += margin Correlation_loss_zero = Correlation_loss_zero *opt.correlation_penalty # ||W||_2 regularization reg_loss_zero = Variable(torch.Tensor([0.0])).cuda() if opt.REG_W_LAMBDA != 0: for name, p in netG.named_parameters(): if 'weight' in name: reg_loss_zero += p.pow(2).sum() reg_loss_zero.mul_(opt.REG_W_LAMBDA) # ||W_z||21 regularization, make W_z sparse reg_Wz_loss_zero = Variable(torch.Tensor([0.0])).cuda() if opt.REG_Wz_LAMBDA != 0: Wz = netG.rdc_text.weight reg_Wz_loss_zero = Wz.pow(2).sum(dim=0).sqrt().sum().mul(opt.REG_Wz_LAMBDA) all_loss = C_loss_fake_zero + reg_loss_zero + reg_Wz_loss_zero + Correlation_loss_zero all_loss.backward() optimizerG.step() reset_grad(nets) if it % opt.disp_interval == 0 and it: acc_real = (np.argmax(C_real.data.cpu().numpy(), axis=1) == y_true.data.cpu().numpy()).sum() / float(y_true.data.size()[0]) acc_fake = (np.argmax(C_fake.data.cpu().numpy(), axis=1) == y_true.data.cpu().numpy()).sum() / float(y_true.data.size()[0]) log_text = 'Iter-{}; Was_D: {:.4}; Euc_ls: {:.4};reg_ls: {:.4}; Wz_ls: {:.4}; G_loss: {:.4}; Correlation_loss : {:.4} ; D_loss_real: {:.4};' \ ' D_loss_fake: {:.4}; rl: {:.4}%; fk: {:.4}%'.format(it, Wasserstein_D.item(), Euclidean_loss.item(), reg_loss.item(), reg_Wz_loss.item(), G_loss.item(), Correlation_loss.item() , D_loss_real.item(), D_loss_fake.item(), acc_real * 100, acc_fake * 100) log_text1 = "" if it > opt.unseen_start : acc_fake_zero = (np.argmax(C_fake_zero.data.cpu().numpy(), axis=1) == zero_y_true.data.cpu().numpy()).sum() / float(zero_y_true.data.size()[0]) log_text1 = 'Zero_Shot_Iter-{}; Correlation_loss : {:.4}; fk: {:.4}%'.format(it, Correlation_loss_zero.item(), acc_fake_zero * 100) ''' Here I have added .item instead of the .data[0] - Maunil ''' print(log_text) print (log_text1) with open(log_dir, 'a') as f: f.write(log_text+'\n') f.write(log_text1+'\n') if it % opt.evl_interval == 0 and it >=80 and log_text != None: netG.eval() # This will start the testing process, no batch norm and drop out - It will disable them eval_fakefeat_test(it, netG, netD, dataset, param, result) eval_fakefeat_GZSL(it, netG, dataset, param, result_gzsl) if result.save_model: files2remove = glob.glob(out_subdir + '/Best_model*') for _i in files2remove: os.remove(_i) torch.save({ 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, 'Zero Shot Acc' : result.acc_list[-1], 'Generalized Zero Shot Acc' : result_gzsl.acc_list[-1] }, out_subdir + '/Best_model_Acc_' + str(result.acc_list[-1]) + '_AUC_' + str(result_gzsl.acc_list[-1]) + '_' +'.tar') netG.train() if it % opt.save_interval == 0 and it: torch.save({ 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, 'Zero Shot Acc' : result.acc_list[-1], 'Generalized Zero Shot Acc' : result_gzsl.acc_list[-1] }, out_subdir + '/Iter_{:d}.tar'.format(it)) cprint('Save model to ' + out_subdir + '/Iter_{:d}.tar'.format(it), 'red') print ("########################################################") print("--- %s Time took seconds ---" % (time.time() - start_time)) print ("########################################################")
import torch as t import torch.nn.functional as F from torch.autograd import Variable from torch.utils.data import DataLoader from eval_semantic_segmentation import eval_semantic_segmentation from dataset import LoadDataset from Models import FCN import cfg device = t.device('cuda') if t.cuda.is_available() else t.device('cpu') num_class = cfg.DATASET[1] BATCH_SIZE = 4 miou_list = [0] Load_test = LoadDataset([cfg.TEST_ROOT, cfg.TEST_LABEL], cfg.crop_size) test_data = DataLoader(Load_test, batch_size=BATCH_SIZE, shuffle=False, num_workers=4) net = FCN.FCN(num_class) net.eval() net.to(device) net.load_state_dict(t.load("./Results/weights/xxx.pth")) train_acc = 0 train_miou = 0 train_class_acc = 0 train_mpa = 0 error = 0
def train(model_num=3, is_val=True, sim_func_number=None, creative_weight=None): param = _param(opt.z_dim) best_model_acc_path = best_model_auc_path = best_model_hm_path = '' if opt.dataset == 'CUB': dataset = LoadDataset(opt, main_dir, is_val) exp_info = 'CUB_EASY' if opt.splitmode == 'easy' else 'CUB_HARD' opt.is_gbu = False elif opt.dataset == 'NAB': dataset = LoadDataset_NAB(opt, main_dir, is_val) exp_info = 'NAB_EASY' if opt.splitmode == 'easy' else 'NAB_HARD' opt.is_gbu = False elif "GBU" in opt.dataset: opt.dataset = opt.dataset.split('_')[1] opt.is_gbu = True exp_info = opt.dataset dataset = LoadDataset_GBU(opt, main_dir, is_val) else: print('No Dataset with that name') sys.exit(0) param.X_dim = dataset.feature_dim data_layer = FeatDataLayer(np.array(dataset.train_label), np.array(dataset.train_feature), opt) result = Result() ones = Variable(torch.Tensor(1, 1)) ones.data.fill_(1.0) if opt.is_gbu: netG = _netG_att(param, dataset.text_dim, dataset.feature_dim).cuda() else: netG = _netG(dataset.text_dim, dataset.feature_dim).cuda() netG.apply(weights_init) netD = _netD(dataset.train_cls_num, dataset.feature_dim).cuda() netD.apply(weights_init) if model_num == 2 or model_num == 4: log_SM_ab = Scale(2) log_SM_ab = nn.DataParallel(log_SM_ab).cuda() if model_num == 3 or model_num == 4: netT = _netT(dataset.train_cls_num, dataset.feature_dim, dataset.text_dim).cuda() netT.apply(weights_init) similarity_func = None if sim_func_number == 1: similarity_func = F.cosine_similarity elif sim_func_number == 2: similarity_func = F.mse_loss exp_params = 'Model_{}_is_val_{}_sim_func_number_{}_creative_weight_{}_lr_{}_zdim_{}_{}'.format( model_num, is_val, sim_func_number, creative_weight, opt.lr, param.z_dim, opt.exp_name) out_subdir = main_dir + 'out/{:s}/{:s}'.format(exp_info, exp_params) if not os.path.exists(out_subdir): os.makedirs(out_subdir) log_dir = out_subdir + '/log_{:s}.txt'.format(exp_info) log_dir_2 = out_subdir + '/log_{:s}_iterations.txt'.format(exp_info) with open(log_dir, 'a') as f: f.write('Training Start:') f.write(strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) + '\n') start_step = 0 if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) netG.load_state_dict(checkpoint['state_dict_G']) netD.load_state_dict(checkpoint['state_dict_D']) if model_num == 3 or model_num == 4: netT.load_state_dict(checkpoint['state_dict_T']) start_step = checkpoint['it'] print(checkpoint['log']) else: print("=> no checkpoint found at '{}'".format(opt.resume)) if model_num == 1: nets = [netG, netD] elif model_num == 2: nets = [netG, netD, log_SM_ab] elif model_num == 3: nets = [netG, netD, netT] elif model_num == 4: nets = [netG, netD, netT, log_SM_ab] tr_cls_centroid = Variable( torch.from_numpy(dataset.tr_cls_centroid.astype('float32'))).cuda() optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.9)) optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.9)) if model_num == 2 or model_num == 4: optimizer_SM_ab = optim.Adam(log_SM_ab.parameters(), lr=opt.lr, betas=(0.5, 0.999)) if model_num == 3 or model_num == 4: optimizerT = optim.Adam(netT.parameters(), lr=opt.lr, betas=(0.5, 0.9)) for it in tqdm(range(start_step, 5000 + 1)): blobs = data_layer.forward() labels = blobs['labels'].astype(int) new_class_labels = Variable( torch.from_numpy(np.ones_like(labels) * dataset.train_cls_num)).cuda() text_feat_1 = np.array([dataset.train_att[i, :] for i in labels]) text_feat_2 = np.array([dataset.train_att[i, :] for i in labels]) np.random.shuffle( text_feat_1 ) # Shuffle both features to guarantee different permutations np.random.shuffle(text_feat_2) alpha = (np.random.random(len(labels)) * (.8 - .2)) + .2 text_feat_mean = np.multiply(alpha, text_feat_1.transpose()) text_feat_mean += np.multiply(1. - alpha, text_feat_2.transpose()) text_feat_mean = text_feat_mean.transpose() text_feat_mean = normalize(text_feat_mean, norm='l2', axis=1) text_feat_Creative = Variable( torch.from_numpy(text_feat_mean.astype('float32'))).cuda() # z_creative = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() # G_creative_sample = netG(z_creative, text_feat_Creative) if model_num == 3 or model_num == 4: """ Text Feat Generator """ for _ in range(5): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array([dataset.train_att[i, :] for i in labels]) text_feat_TG = Variable( torch.from_numpy(text_feat.astype('float32'))).cuda() X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy( labels.astype('int'))).cuda() z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() # GAN's T loss T_real = netT(X) T_loss_real = torch.mean(similarity_func(text_feat_TG, T_real)) # GAN's T loss G_sample = netG(z, text_feat_TG).detach() T_fake_TG = netT(G_sample) T_loss_fake = torch.mean( similarity_func(text_feat_TG, T_fake_TG)) # GAN's T loss G_sample_creative = netG(z, text_feat_Creative).detach() T_fake_creative_TG = netT(G_sample_creative) T_loss_fake_creative = torch.mean( similarity_func(text_feat_Creative, T_fake_creative_TG)) T_loss = -1 * T_loss_real - T_loss_fake - T_loss_fake_creative T_loss.backward() optimizerT.step() optimizerG.step() reset_grad(nets) """ Discriminator """ for _ in range(5): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array([dataset.train_att[i, :] for i in labels]) text_feat = Variable(torch.from_numpy( text_feat.astype('float32'))).cuda() X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy(labels.astype('int'))).cuda() z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() # GAN's D loss D_real, C_real = netD(X) D_loss_real = torch.mean(D_real) C_loss_real = F.cross_entropy(C_real, y_true) DC_loss = -D_loss_real + C_loss_real DC_loss.backward() # GAN's D loss G_sample = netG(z, text_feat).detach() D_fake, C_fake = netD(G_sample) D_loss_fake = torch.mean(D_fake) C_loss_fake = F.cross_entropy(C_fake, y_true) DC_loss = D_loss_fake + C_loss_fake DC_loss.backward() # train with gradient penalty (WGAN_GP) grad_penalty = calc_gradient_penalty(netD, X.data, G_sample.data) grad_penalty.backward() Wasserstein_D = D_loss_real - D_loss_fake optimizerD.step() reset_grad(nets) """ Generator """ for _ in range(1): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array([dataset.train_att[i, :] for i in labels]) text_feat = Variable(torch.from_numpy( text_feat.astype('float32'))).cuda() X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy(labels.astype('int'))).cuda() z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() G_sample = netG(z, text_feat) D_fake, C_fake = netD(G_sample) _, C_real = netD(X) # GAN's G loss G_loss = torch.mean(D_fake) # Auxiliary classification loss C_loss = (F.cross_entropy(C_real, y_true) + F.cross_entropy(C_fake, y_true)) / 2 # GAN's G loss creative G_sample_creative = netG(z, text_feat_Creative).detach() if model_num == 3 or model_num == 4: D_creative_fake, _ = netD(G_sample_creative) G_loss_fake_creative = torch.mean(D_creative_fake) T_fake = netT(G_sample) T_loss_fake = torch.mean(similarity_func(text_feat, T_fake)) T_fake_creative = netT(G_sample_creative) T_loss_fake_creative = torch.mean( similarity_func(text_feat_Creative, T_fake_creative)) GC_loss = -G_loss - G_loss_fake_creative + C_loss - T_loss_fake - T_loss_fake_creative else: GC_loss = -G_loss + C_loss # Centroid loss Euclidean_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_W_LAMBDA != 0: for i in range(dataset.train_cls_num): sample_idx = (y_true == i).data.nonzero().squeeze() if sample_idx.numel() == 0: Euclidean_loss += 0.0 else: G_sample_cls = G_sample[sample_idx, :] Euclidean_loss += ( G_sample_cls.mean(dim=0) - tr_cls_centroid[i]).pow(2).sum().sqrt() Euclidean_loss *= 1.0 / dataset.train_cls_num * opt.CENT_LAMBDA # ||W||_2 regularization reg_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_W_LAMBDA != 0: for name, p in netG.named_parameters(): if 'weight' in name: reg_loss += p.pow(2).sum() reg_loss.mul_(opt.REG_W_LAMBDA) # ||W_z||21 regularization, make W_z sparse reg_Wz_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_Wz_LAMBDA != 0 and not opt.is_gbu: Wz = netG.rdc_text.weight reg_Wz_loss = Wz.pow(2).sum(dim=0).sqrt().sum().mul( opt.REG_Wz_LAMBDA) if model_num == 2 or model_num == 4: # D(C| GX_fake)) + Classify GX_fake as real D_creative_fake, C_creative_fake = netD(G_sample_creative) G_fake_C = F.softmax(C_creative_fake) # SM Divergence q_shape = Variable( torch.FloatTensor(G_fake_C.data.size(0), G_fake_C.data.size(1))).cuda() q_shape.data.fill_(1.0 / G_fake_C.data.size(1)) SM_ab = F.sigmoid(log_SM_ab(ones)) SM_a = 0.2 + torch.div(SM_ab[0][0], 1.6666666666666667).cuda() SM_b = 0.2 + torch.div(SM_ab[0][1], 1.6666666666666667).cuda() pow_a_b = torch.div(1 - SM_a, 1 - SM_b) alpha_term = (torch.pow(G_fake_C + 1e-5, SM_a) * torch.pow(q_shape, 1 - SM_a)).sum(1) entropy_GX_fake_vec = torch.div( torch.pow(alpha_term, pow_a_b) - 1, SM_b - 1) min_e, max_e = torch.min(entropy_GX_fake_vec), torch.max( entropy_GX_fake_vec) entropy_GX_fake_vec = (entropy_GX_fake_vec - min_e) / (max_e - min_e) entropy_GX_fake = -entropy_GX_fake_vec.mean() loss_creative = -creative_weight * entropy_GX_fake disc_GX_fake_real = -torch.mean(D_creative_fake) total_loss_creative = loss_creative + disc_GX_fake_real all_loss = GC_loss + Euclidean_loss + reg_loss + reg_Wz_loss + total_loss_creative else: all_loss = GC_loss + Euclidean_loss + reg_loss + reg_Wz_loss all_loss.backward() if model_num == 2 or model_num == 4: optimizer_SM_ab.step() optimizerG.step() reset_grad(nets) if it % opt.disp_interval == 0 and it: acc_real = (np.argmax(C_real.data.cpu().numpy(), axis=1) == y_true.data.cpu().numpy()).sum() / float( y_true.data.size()[0]) acc_fake = (np.argmax(C_fake.data.cpu().numpy(), axis=1) == y_true.data.cpu().numpy()).sum() / float( y_true.data.size()[0]) log_text = 'Iter-{}; rl: {:.4}%; fk: {:.4}%'.format( it, acc_real * 100, acc_fake * 100) with open(log_dir, 'a') as f: f.write(log_text + '\n') if it % opt.evl_interval == 0 and it > opt.disp_interval: cur_acc = 0 cur_auc = 0 cur_hm = 0 netG.eval() if is_val: cur_acc = eval_fakefeat_test(netG, dataset.val_cls_num, dataset.val_att, dataset.val_unseen_feature, dataset.val_unseen_label, param, result) if opt.is_gbu: cur_hm, acc_S_T, acc_U_T = eval_fakefeat_test_gzsl( netG, dataset, dataset.val_cls_num, dataset.val_att, dataset.val_unseen_feature, dataset.val_unseen_label, param, result) else: cur_auc = eval_fakefeat_GZSL(netG, dataset, dataset.val_cls_num, dataset.val_att, dataset.val_unseen_feature, dataset.val_unseen_label, param, out_subdir, result) else: cur_acc = eval_fakefeat_test(netG, dataset.test_cls_num, dataset.test_att, dataset.test_unseen_feature, dataset.test_unseen_label, param, result) if opt.is_gbu: cur_hm, acc_S_T, acc_U_T = eval_fakefeat_test_gzsl( netG, dataset, dataset.test_cls_num, dataset.test_att, dataset.test_unseen_feature, dataset.test_unseen_label, param, result) else: cur_auc = eval_fakefeat_GZSL(netG, dataset, dataset.test_cls_num, dataset.test_att, dataset.test_unseen_feature, dataset.test_unseen_label, param, out_subdir, result) if cur_acc > result.best_acc: result.best_acc = cur_acc files2remove = glob.glob(out_subdir + '/Best_model_ACC*') for _i in files2remove: os.remove(_i) save_dict = { 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, } if model_num == 3 or model_num == 4: save_dict.update({'state_dict_T': netT.state_dict()}) best_model_acc_path = '/Best_model_ACC_{:.2f}.tar'.format( cur_acc) torch.save(save_dict, out_subdir + best_model_acc_path) if cur_auc > result.best_auc: result.best_auc = cur_auc files2remove = glob.glob(out_subdir + '/Best_model_AUC*') for _i in files2remove: os.remove(_i) save_dict = { 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, } if model_num == 3 or model_num == 4: save_dict.update({'state_dict_T': netT.state_dict()}) best_model_auc_path = '/Best_model_AUC_{:.2f}.tar'.format( cur_auc) torch.save(save_dict, out_subdir + best_model_auc_path) if cur_hm > result.best_hm: result.best_hm = cur_hm result.best_acc_S_T = acc_S_T result.best_acc_U_T = acc_U_T files2remove = glob.glob(out_subdir + '/Best_model_HM*') for _i in files2remove: os.remove(_i) save_dict = { 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, } if model_num == 3 or model_num == 4: save_dict.update({'state_dict_T': netT.state_dict()}) best_model_hm_path = '/Best_model_HM_{:.2f}.tar'.format(cur_hm) torch.save(save_dict, out_subdir + best_model_hm_path) log_text_2 = 'iteration: %f, best_acc: %f, best_auc: %f, best_hm: %f' % ( it, result.best_acc, result.best_auc, result.best_hm) with open(log_dir_2, 'a') as f: f.write(log_text_2 + '\n') netG.train() if is_val: if os.path.isfile(out_subdir + best_model_acc_path): print("=> loading checkpoint '{}'".format(best_model_acc_path)) checkpoint = torch.load(out_subdir + best_model_acc_path) netG.load_state_dict(checkpoint['state_dict_G']) netD.load_state_dict(checkpoint['state_dict_D']) if model_num == 3 or model_num == 4: netT.load_state_dict(checkpoint['state_dict_T']) it = checkpoint['it'] print("iteration: {}".format(it)) netG.eval() test_acc = eval_fakefeat_test(netG, dataset.test_cls_num, dataset.test_att, dataset.test_unseen_feature, dataset.test_unseen_label, param, result) result.test_acc = test_acc else: print("=> no checkpoint found at '{}'".format(out_subdir + best_model_acc_path)) if os.path.isfile(out_subdir + best_model_auc_path): print("=> loading checkpoint '{}'".format(best_model_auc_path)) checkpoint = torch.load(out_subdir + best_model_auc_path) netG.load_state_dict(checkpoint['state_dict_G']) netD.load_state_dict(checkpoint['state_dict_D']) if model_num == 3 or model_num == 4: netT.load_state_dict(checkpoint['state_dict_T']) it = checkpoint['it'] print("iteration: {}".format(it)) netG.eval() test_auc = eval_fakefeat_GZSL(netG, dataset, dataset.test_cls_num, dataset.test_att, dataset.test_unseen_feature, dataset.test_unseen_label, param, out_subdir, result) result.test_auc = test_auc else: print("=> no checkpoint found at '{}'".format(out_subdir + best_model_auc_path)) if os.path.isfile(out_subdir + best_model_hm_path): print("=> loading checkpoint '{}'".format(best_model_hm_path)) checkpoint = torch.load(out_subdir + best_model_hm_path) netG.load_state_dict(checkpoint['state_dict_G']) netD.load_state_dict(checkpoint['state_dict_D']) if model_num == 3 or model_num == 4: netT.load_state_dict(checkpoint['state_dict_T']) it = checkpoint['it'] print("iteration: {}".format(it)) netG.eval() test_hm, test_acc_S_T, test_acc_U_T = eval_fakefeat_test_gzsl( netG, dataset, dataset.test_cls_num, dataset.test_att, dataset.test_unseen_feature, dataset.test_unseen_label, param, result) result.test_hm = test_hm result.test_acc_S_T = test_acc_S_T result.test_acc_U_T = test_acc_U_T else: print("=> no checkpoint found at '{}'".format(out_subdir + best_model_hm_path)) log_text_2 = 'test_acc: %f, test_auc: %f, test_hm: %f, test_acc_S_T: %f, test_acc_U_T: %f' % ( result.test_acc, result.test_auc, result.test_hm, result.test_acc_S_T, result.test_acc_U_T) with open(log_dir_2, 'a') as f: f.write(log_text_2 + '\n') return result
def train(): param = _param() print("load dataset origin") dataset_origin = LoadDataset_origin(opt) print("load dataset") dataset = LoadDataset(opt) param.X_dim = dataset.feature_dim data_layer_origin = FeatDataLayer_origin( dataset_origin.labels_train, dataset_origin.pfc_feat_data_train, opt) data_layer = FeatDataLayer_add_FG( dataset.labels_train, dataset.pfc_feat_data_train, opt, dataset.train_text_feature, dataset.familyToText, dataset.genusToText, dataset.familyLabelToBirdLabel, dataset.genusLabelToBirdLabel, dataset.labels_origin_train) result = Result() result_gzsl = Result() netG = _netG(dataset.text_dim, dataset.feature_dim).cuda() netG.apply(weights_init) print(netG) netD = _netD(dataset.train_cls_num, dataset.feature_dim).cuda() netD.apply(weights_init) print(netD) exp_info = 'CUB_EASY' if opt.splitmode == 'easy' else 'CUB_HARD' exp_params = 'Eu{}_Rls{}_RWz{}'.format(opt.CENT_LAMBDA, opt.REG_W_LAMBDA, opt.REG_Wz_LAMBDA) out_dir = 'out/{:s}'.format(exp_info) out_subdir = 'out/{:s}/{:s}'.format(exp_info, exp_params) opt.out_subdir = out_subdir if not os.path.exists('out'): os.mkdir('out') if not os.path.exists(out_dir): os.mkdir(out_dir) if not os.path.exists(out_subdir): os.mkdir(out_subdir) cprint(" The output dictionary is {}".format(out_subdir), 'red') log_dir = out_subdir + '/log_{:s}'.format(exp_info) if opt.exp_no != "": log_dir += "_" + opt.exp_no log_dir += ".txt" opt.log_dir = log_dir opt.auc_plot_dir = out_subdir + '/best_auc_plot{:s}_{:s}'.format( opt.exp_no, exp_info) opt.auc_solid_plot_dir = out_subdir + '/solid_auc_plot{:s}_{:s}'.format( opt.exp_no, exp_info) opt.history_D_loss_dir = out_subdir + '/D_loss_plot{:s}_{:s}'.format( opt.exp_no, exp_info) opt.history_G_loss_dir = out_subdir + '/G_loss_plot{:s}_{:s}'.format( opt.exp_no, exp_info) start_step = 0 if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) netG.load_state_dict(checkpoint['state_dict_G']) netD.load_state_dict(checkpoint['state_dict_D']) start_step = checkpoint['it'] print(checkpoint['log']) else: print("=> no checkpoint found at '{}'".format(opt.resume)) nets = [netG, netD] # tr_cls_centroid = Variable(torch.from_numpy(dataset.tr_cls_centroid.astype('float32'))).cuda() optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.9)) optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.9)) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") history_D_loss = [] history_G_loss = [] for it in range(start_step, 10000 + 1): cur_D_loss = 0 cur_G_loss = 0 """ Discriminator """ for _ in range(5): blobs = data_layer_origin.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array( [dataset_origin.train_text_feature[i, :] for i in labels]) text_feat = Variable(torch.from_numpy( text_feat.astype('float32'))).cuda() X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy(labels.astype('int'))).cuda() z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() y_true = y_true.to(device=device, dtype=torch.long) # GAN's D loss D_real, C_real = netD(X) D_loss_real = torch.mean(D_real) C_loss_real = F.cross_entropy(C_real, y_true) DC_loss = -D_loss_real + C_loss_real DC_loss.backward() cur_D_loss += DC_loss.item() # GAN's D loss G_sample = netG(z, text_feat).detach() D_fake, C_fake = netD(G_sample) D_loss_fake = torch.mean(D_fake) C_loss_fake = F.cross_entropy(C_fake, y_true) DC_loss = D_loss_fake + C_loss_fake DC_loss.backward() cur_D_loss += DC_loss.item() # train with gradient penalty (WGAN_GP) grad_penalty = calc_gradient_penalty(netD, X.data, G_sample.data) grad_penalty.backward() Wasserstein_D = D_loss_real - D_loss_fake optimizerD.step() reset_grad(nets) cur_D_loss += Wasserstein_D.item() """ Generator """ for _ in range(1): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) origin_labels = blobs['minibatch_origin_label'].astype(int) text_feat = blobs['text_feat'] # text_feat # text_feat = np.array([dataset.train_text_feature[i, :] for i in labels]) text_feat = Variable(torch.from_numpy( text_feat.astype('float32'))).cuda() X = Variable(torch.from_numpy(feat_data)).cuda() y_true = Variable(torch.from_numpy(labels.astype('int'))).cuda() y_origin_true = Variable( torch.from_numpy(origin_labels.astype('int'))).cuda() y_true = y_true.to(device=device, dtype=torch.long) z = Variable(torch.randn(opt.batchsize, param.z_dim)).cuda() G_sample = netG(z, text_feat) D_fake, C_fake = netD(G_sample) _, C_real = netD(X) # GAN's G loss G_loss = torch.mean(D_fake) # Auxiliary classification loss C_loss = (F.cross_entropy(C_real, y_true) + F.cross_entropy(C_fake, y_true)) / 2 GC_loss = -G_loss + C_loss cur_G_loss += -G_loss.item() + F.cross_entropy(C_fake, y_true).item() Bird_Euclidean_loss = Variable(torch.Tensor([0.0])).cuda() if opt.CENT_LAMBDA != 0 and opt.BIRD_CENT_LAMBDA != 0: for i in range(dataset.train_cls_num): sample_idx = (y_origin_true == i).data.nonzero().squeeze() if sample_idx.numel() == 0: Bird_Euclidean_loss += 0.0 else: G_sample_cls = G_sample[sample_idx, :] center = Variable( torch.from_numpy(dataset.tr_cls_centroid[i].astype( 'float32'))).cuda() Bird_Euclidean_loss += (G_sample_cls.mean(dim=0) - center).pow(2).sum().sqrt() Bird_Euclidean_loss *= 1.0 / dataset.train_cls_num Family_Euclidean_loss = Variable(torch.Tensor([0.0])).cuda() if opt.CENT_LAMBDA != 0 and opt.FAMILY_CENT_LAMBDA != 0: for i in range(dataset.familyLabelStart, dataset.familyLabelEnd): sample_idx = (y_origin_true == i).data.nonzero().squeeze() if sample_idx.numel() == 0: Family_Euclidean_loss += 0.0 else: G_sample_cls = G_sample[sample_idx, :] center = Variable( torch.from_numpy(dataset.tr_cls_centroid[i].astype( 'float32'))).cuda() Family_Euclidean_loss += (G_sample_cls.mean(dim=0) - center).pow(2).sum().sqrt() Family_Euclidean_loss *= 1.0 / (dataset.familyLabelEnd - dataset.familyLabelStart) Genus_Euclidean_loss = Variable(torch.Tensor([0.0])).cuda() if opt.CENT_LAMBDA != 0 and opt.GENUS_CENT_LAMBDA != 0: for i in range(dataset.genusLabelStart, dataset.genusLabelEnd): sample_idx = (y_origin_true == i).data.nonzero().squeeze() if sample_idx.numel() == 0: Genus_Euclidean_loss += 0.0 else: G_sample_cls = G_sample[sample_idx, :] center = Variable( torch.from_numpy(dataset.tr_cls_centroid[i].astype( 'float32'))).cuda() Genus_Euclidean_loss += (G_sample_cls.mean(dim=0) - center).pow(2).sum().sqrt() Genus_Euclidean_loss *= 1.0 / (dataset.genusLabelEnd - dataset.genusLabelStart) Euclidean_loss = opt.CENT_LAMBDA * ( opt.BIRD_CENT_LAMBDA * Bird_Euclidean_loss + opt.FAMILY_CENT_LAMBDA * Family_Euclidean_loss + opt.GENUS_CENT_LAMBDA * Genus_Euclidean_loss) # ||W||_2 regularization reg_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_W_LAMBDA != 0: for name, p in netG.named_parameters(): if 'weight' in name: reg_loss += p.pow(2).sum() reg_loss.mul_(opt.REG_W_LAMBDA) # ||W_z||21 regularization, make W_z sparse reg_Wz_loss = Variable(torch.Tensor([0.0])).cuda() if opt.REG_Wz_LAMBDA != 0: Wz = netG.rdc_text.weight reg_Wz_loss = Wz.pow(2).sum(dim=0).sqrt().sum().mul( opt.REG_Wz_LAMBDA) all_loss = GC_loss + Euclidean_loss + reg_loss + reg_Wz_loss all_loss.backward() optimizerG.step() reset_grad(nets) cur_G_loss += Euclidean_loss.item() history_D_loss.append(cur_D_loss) history_G_loss.append(cur_G_loss) print("Iter-" + str(it + 1) + "; G-loss: " + str(cur_G_loss) + "; D-loss: " + str(cur_D_loss)) if it % opt.disp_interval == 0 and it: acc_real = (np.argmax(C_real.data.cpu().numpy(), axis=1) == y_true.data.cpu().numpy()).sum() / float( y_true.data.size()[0]) acc_fake = (np.argmax(C_fake.data.cpu().numpy(), axis=1) == y_true.data.cpu().numpy()).sum() / float( y_true.data.size()[0]) log_text = 'Iter-{}; Was_D: {:.4}; Euc_ls: {:.4}; Bird_Euc_ls: {:.4}; family_Euc_ls: {:.4}; ' \ 'Genus_Euc_ls: {:.4}; reg_ls: {:.4}; Wz_ls: {:.4}; G_loss: {:.4}; D_loss_real: {:.4};' \ ' D_loss_fake: {:.4}; rl: {:.4}%; fk: {:.4}%' \ .format(it, Wasserstein_D.item(), Euclidean_loss.item(), Bird_Euclidean_loss.item(), Family_Euclidean_loss.item(), Genus_Euclidean_loss.item(), reg_loss.item(), reg_Wz_loss.item(), G_loss.item(), D_loss_real.item(), D_loss_fake.item(), acc_real * 100, acc_fake * 100) print(log_text) if it % opt.evl_interval == 0 and it >= 100: netG.eval() eval_fakefeat_test(it, netG, dataset_origin, param, result) eval_fakefeat_GZSL(it, netG, dataset_origin, param, result_gzsl) if result.save_model: files2remove = glob.glob( out_subdir + '/Best_model{}_Acc*'.format(opt.exp_no)) for _i in files2remove: os.remove(_i) torch.save( { 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, }, out_subdir + '/Best_model{}_Acc_{:.2f}.tar'.format( opt.exp_no, result.acc_list[-1])) if result_gzsl.save_model: files2remove = glob.glob( out_subdir + '/Best_model{}_Auc*'.format(opt.exp_no)) for _i in files2remove: os.remove(_i) torch.save( { 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, }, out_subdir + '/Best_model{}_Auc_{:.2f}.tar'.format( opt.exp_no, result_gzsl.best_auc * 100)) netG.train() if it % opt.save_interval == 0 and it: torch.save( { 'it': it + 1, 'state_dict_G': netG.state_dict(), 'state_dict_D': netD.state_dict(), 'random_seed': opt.manualSeed, 'log': log_text, }, out_subdir + '/Iter_{:d}.tar'.format(it)) cprint('Save model to ' + out_subdir + '/Iter_{:d}.tar'.format(it), 'red') print("Reproduce CUB {}".format(opt.splitmode)) print("Accuracy is {:.4}%, and Generalized AUC is {:.4}%".format( result.best_acc, result_gzsl.best_auc * 100)) np.savetxt(opt.history_D_loss_dir + '.txt', history_D_loss, fmt='%.015f') np.savetxt(opt.history_G_loss_dir + '.txt', history_G_loss, fmt='%.015f')
def train(opt): param = _param() dataset = LoadDataset(opt) param.X_dim = dataset.feature_dim data_layer = FeatDataLayer(dataset.labels_train, dataset.pfc_feat_data_train, opt) # initialize model netGs = [] netDs = [] parts = 7 if opt.dataset == "CUB2011" else 6 for part in range(parts): netGs.append(_netG(dataset.text_dim, 512).cuda().apply(weights_init)) netDs.append( _netD(dataset.train_cls_num, 512).cuda().apply(weights_init)) start_step = 0 part_cls_centrild = torch.from_numpy( dataset.part_cls_centrild.astype('float32')).cuda() # initialize optimizers optimizerGs = [] optimizerDs = [] for netG in netGs: optimizerGs.append( optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.9))) for netD in netDs: optimizerDs.append( optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.9))) for it in range(start_step, 3000 + 1): """ Discriminator """ for _ in range(5): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array( [dataset.train_text_feature[i, :] for i in labels]) text_feat = torch.from_numpy(text_feat.astype('float32')).cuda() X = torch.from_numpy(feat_data).cuda() y_true = torch.from_numpy(labels.astype('int')).cuda() for part in range(parts): z = torch.randn(opt.batchsize, param.z_dim).cuda() D_real, C_real = netDs[part](X[:, part * 512:(part + 1) * 512]) D_loss_real = torch.mean(D_real) C_loss_real = F.cross_entropy(C_real, y_true) DC_loss = -D_loss_real + C_loss_real DC_loss.backward() G_sample = netGs[part](z, text_feat) D_fake, C_fake = netDs[part](G_sample) D_loss_fake = torch.mean(D_fake) C_loss_fake = F.cross_entropy(C_fake, y_true) DC_loss = D_loss_fake + C_loss_fake DC_loss.backward() grad_penalty = calc_gradient_penalty( opt.batchsize, netDs[part], X.data[:, part * 512:(part + 1) * 512], G_sample.data, opt.GP_LAMBDA) grad_penalty.backward() Wasserstein_D = D_loss_real - D_loss_fake # writer.add_scalar("Wasserstein_D"+str(part), Wasserstein_D.item(), it) optimizerDs[part].step() netGs[part].zero_grad() netDs[part].zero_grad() """ Generator """ for _ in range(1): blobs = data_layer.forward() feat_data = blobs['data'] # image data labels = blobs['labels'].astype(int) # class labels text_feat = np.array( [dataset.train_text_feature[i, :] for i in labels]) text_feat = torch.from_numpy(text_feat.astype('float32')).cuda() X = torch.from_numpy(feat_data).cuda() y_true = torch.from_numpy(labels.astype('int')).cuda() for part in range(parts): z = torch.randn(opt.batchsize, param.z_dim).cuda() G_sample = netGs[part](z, text_feat) # G_sample_all[:, part*512:(part+1)*512] = G_sample D_fake, C_fake = netDs[part](G_sample) _, C_real = netDs[part](X[:, part * 512:(part + 1) * 512]) G_loss = torch.mean(D_fake) C_loss = (F.cross_entropy(C_real, y_true) + F.cross_entropy(C_fake, y_true)) / 2 GC_loss = -G_loss + C_loss # writer.add_scalar("GC_loss"+str(part), GC_loss.item(), it) Euclidean_loss = torch.tensor([0.0]).cuda() if opt.REG_W_LAMBDA != 0: for i in range(dataset.train_cls_num): sample_idx = (y_true == i).data.nonzero().squeeze() if sample_idx.numel() == 0: Euclidean_loss += 0.0 else: G_sample_cls = G_sample[sample_idx, :] Euclidean_loss += (G_sample_cls.mean(dim=0) - part_cls_centrild[i][part] ).pow(2).sum().sqrt() Euclidean_loss *= 1.0 / dataset.train_cls_num * opt.CENT_LAMBDA # ||W||_2 regularization reg_loss = torch.Tensor([0.0]).cuda() if opt.REG_W_LAMBDA != 0: for name, p in netGs[part].named_parameters(): if 'weight' in name: reg_loss += p.pow(2).sum() reg_loss.mul_(opt.REG_W_LAMBDA) # writer.add_scalar("reg_loss"+str(part), reg_loss.item(), it) # ||W_z||21 regularization, make W_z sparse reg_Wz_loss = torch.Tensor([0.0]).cuda() if opt.REG_Wz_LAMBDA != 0: Wz = netGs[part].rdc_text.weight reg_Wz_loss = reg_Wz_loss + Wz.pow(2).sum( dim=0).sqrt().sum().mul(opt.REG_Wz_LAMBDA) # writer.add_scalar("reg_Wz_loss"+str(part), reg_Wz_loss.item(), it) all_loss = GC_loss + Euclidean_loss + reg_loss + reg_Wz_loss all_loss.backward() optimizerGs[part].step() if it % opt.evl_interval == 0 and it > 500: print(it) for part in range(parts): netGs[part].eval() train_classifier(opt, param, dataset, netGs) for part in range(parts): netGs[part].train()