コード例 #1
0
    def load_model(self,
                   vgg_path=None,
                   decoder_path=None,
                   message_encoder_path=None,
                   message_decoder_path=None):
        self.vgg = model.VGG()
        if vgg_path:
            self.vgg.load_state_dict(torch.load(vgg_path))
            self.vgg = nn.Sequential(*list(self.vgg.children())[:31])

        self.decoder = model.Decoder()
        if decoder_path:
            self.decoder.load_state_dict(torch.load(decoder_path))

        self.message_encoder = model.Message_Encoder()
        if message_encoder_path:
            self.message_encoder.load_state_dict(
                torch.load(message_encoder_path))

        self.message_decoder = model.Message_Decoder(
            input_width=self.image_size,
            content_feat_shape=(self.vgg_relu_4_1_dim,
                                int(self.image_size / self.down_scale),
                                int(self.image_size / self.down_scale)))
        if message_decoder_path:
            self.message_decoder.load_state_dict(
                torch.load(message_decoder_path))
コード例 #2
0
ファイル: main.py プロジェクト: aul12/WuppDiKlickDi
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    data_device = torch.device("cpu")
    print("Device type: %s" % device.type)

    encoder = model.Encoder(depth)
    decoder = model.Decoder(depth)
    net = torch.nn.Sequential(encoder, decoder).to(device)
    optimizer = optim.Adadelta(net.parameters(), lr=0.01)

    dataset = load.WavDataSet("data/wav/", model.downsample_factor**depth,
                              data_device)
    dataloader = torch.utils.data.dataloader.DataLoader(dataset,
                                                        batch_size=batch_size,
                                                        shuffle=True)

    print("Starting training")
    for e in range(epochs):
        net.train()
        loss_sum = 0
        for batch_idx, (data_noise, data) in enumerate(dataloader):
            data = data.to(device)
            data_noise = data_noise.to(device)
            optimizer.zero_grad()
            output = net(data_noise)
            loss = F.mse_loss(output, data)
            loss.backward()
            optimizer.step()
            loss_sum += loss
        print("Epoch: %d\tLoss: %f" % (e, loss_sum))
        if e % 50 == 0:
            torch.save(net.state_dict(), "checkpoint/model_%d.pth" % e)
コード例 #3
0
def main():
    ### Create the torch datasets and get the size of the 'on-the-fly' created vocabulary and the length of the longest caption
    trainDataset = loadData.FlickrTrainDataset(images_folder, captions_folder,
                                               trans, 'TRAIN')
    valDataset = loadData.FlickrValDataset(images_folder, captions_folder,
                                           trans, 'VAL')
    voc_size = trainDataset.getVocabSize()
    max_capt = trainDataset.getMaxCaptionsLength()

    ### Create the models
    Encoder = model.Encoder()
    Decoder = model.Decoder(encoder_dim=2048,
                            decoder_dim=512,
                            attention_dim=256,
                            vocab_size=voc_size)
    Embedding = model.Embedding(vocab_size=voc_size, embedding_dim=128)

    ### Set the optimizer for the decoder(the only component that is actually trained) and the device for the model tensors
    decoder_optimizer = torch.optim.Adam(params=filter(
        lambda p: p.requires_grad, Decoder.parameters()),
                                         lr=e - 3)
    Encoder.to(device)
    Decoder.to(device)
    Embedding.to(device)

    ### Create the data loaders for training and evaluation
    loader_train = DataLoader(trainDataset,
                              32,
                              sampler=sampler.SubsetRandomSampler(
                                  range(30000)))
    val_loader = DataLoader(valDataset,
                            32,
                            sampler=sampler.SubsetRandomSampler(range(30000)))

    best_bleu = 0  #The best blue score by now
    for i in range(epochs):
        ## One epoch's training
        train.train(data_loader=loader_train,
                    encoder=Encoder,
                    decoder=Decoder,
                    embedding=Embedding,
                    max_caption_length=max_capt,
                    optim=decoder_optimizer)
        ## One epoch's validation
        new_bleu = train.validate(data_loader=val_loader,
                                  encoder=Encoder,
                                  decoder=Decoder,
                                  embedding=Embedding,
                                  max_capt)

        if new_bleu > best_bleu:
            best_bleu = new_bleu
        else:
            ## We had no improvement since last time,so se don't train more
            break

    ## Save the model for deploying
    torch.save(Encoder, 'Encoder')
    torch.save(Decoder, 'Decoder')
    torch.save(Embedding, 'Embedding')
コード例 #4
0
 def __init__(self, model_file1=model_path):
     # You should
     #       1. create the model object
     #       2. load your state_dict
     #       3. call cuda()
     # self.model = ...
     #
     self.models_detector = []
     for i in range(6):
         models = {}
         encoder_path = os.path.join(model_file1[i], "encoder.pth")
         encoder_dict = torch.load(encoder_path, map_location=device)
         feed_height = encoder_dict["height"]
         feed_width = encoder_dict["width"]
         models["encoder"] = model.Encoder(feed_width, feed_height, False)
         filtered_dict_enc = {
             k: v
             for k, v in encoder_dict.items()
             if k in models["encoder"].state_dict()
         }
         models["encoder"].load_state_dict(filtered_dict_enc)
         decoder_path = os.path.join(model_file1[i], "decoder.pth")
         models["decoder"] = model.Decoder(models["encoder"].num_ch_enc)
         models["decoder"].load_state_dict(
             torch.load(decoder_path, map_location=device))
         for key in models.keys():
             models[key].to(device)
             models[key].eval()
         self.models_detector.append(models)
コード例 #5
0
    def load_model(self, vgg_path, decoder_path=None):
        self.vgg = model.VGG()
        self.vgg.load_state_dict(torch.load(vgg_path))
        self.vgg = nn.Sequential(*list(self.vgg.children())[:31])
        self.vgg_encoder = model.VGG_Encoder(self.vgg)

        self.decoder = model.Decoder()
        if decoder_path:
            self.decoder.load_state_dict(torch.load(decoder_path))
コード例 #6
0
ファイル: model_test.py プロジェクト: KevinFang97/ano
def decoder_test(h,score,l_score):
    decoder = model.Decoder(3,50,6,10,20)
    target = Variable( torch.from_numpy( np.random.rand(3,6,20) ).float() )
    first_word = Variable( torch.from_numpy( np.random.rand(3,1,20)/10 ).float() )
    training_out = decoder(h,score,training=True,target=target)
    pred_out = decoder(h,score,training=False,first_word=first_word)
    print("training_out:")
    print(training_out)
    print("pred_out:")
    print(pred_out)
コード例 #7
0
ファイル: eval.py プロジェクト: aul12/WuppDiKlickDi
def main():
    device = torch.device("cpu")
    print("Device type: %s" % device.type)

    encoder = model.Encoder(depth)
    decoder = model.Decoder(depth)
    net = torch.nn.Sequential(encoder, decoder).to(device)
    net.load_state_dict(torch.load("checkpoint/model_3150.pth"))
    net.eval()

    dataset = load.WavDataSet("data/wav/", model.downsample_factor**depth,
                              device)
    dataloader = torch.utils.data.dataloader.DataLoader(dataset,
                                                        batch_size=batch_size,
                                                        shuffle=True)

    for batch_idx, data in enumerate(dataloader):
        data = data.to(device)
        output = net(data)
        scipy.io.wavfile.write("out/%d.wav" % batch_idx, load.sample_rate,
                               output.data.numpy())
        print("Finished %d" % batch_idx)
コード例 #8
0
ファイル: utils.py プロジェクト: Iliaavilov/image_captioning
def load_models(checkpoint_name=None,
                encoded_image_size=None,
                word_embeddings_dim=None,
                attention_dim=None,
                decoder_hidden_size=None,
                vocab_size=None,
                device=None):
    '''
    :param checkpoint_name: name of checkpoint file
    :param encoded_image_size: params to initialize model if there is no checkpoint name
    :param word_embeddings_dim: params to initialize model if there is no checkpoint name
    :param attention_dim: params to initialize model if there is no checkpoint name
    :param decoder_hidden_size: params to initialize model if there is no checkpoint name
    :param vocab_size: params to initialize model if there is no checkpoint name
    :param device: on this device to store model
    :return: start_epoch, end_epoch, loss_fn, enc, dec, optimizer_encoder, optimizer_decoder
    '''
    loss_fn = nn.CrossEntropyLoss().to(device)
    end_epoch = 10_000
    if checkpoint_name == None:
        start_epoch = 0
        enc = model.Encoder(encoded_image_size=encoded_image_size).to(device)
        dec = model.Decoder(vocab_size=vocab_size,
                            word_embeddings_dim=word_embeddings_dim,
                            attention_dim=attention_dim,
                            decoder_hidden_size=decoder_hidden_size,
                            encoded_image_size=encoded_image_size).to(device)

        optimizer_decoder = torch.optim.Adam(enc.parameters(), lr=4e-4)
        optimizer_encoder = torch.optim.Adam(dec.parameters(), lr=1e-4)
    else:
        checkpoint = torch.load(checkpoint_name)
        start_epoch = checkpoint['epoch']
        dec = checkpoint['decoder'].to(device)
        optimizer_decoder = checkpoint['decoder_optimizer']
        enc = checkpoint['encoder'].to(device)
        optimizer_encoder = checkpoint['encoder_optimizer']

    return start_epoch, end_epoch, loss_fn, enc, dec, optimizer_encoder, optimizer_decoder
コード例 #9
0
ファイル: train.py プロジェクト: menDDang/tf-end2end-asr
def build_model(hp):

    encoder = model.Encoder(num_layers=hp["encoder_num_layers"],
                            num_units=hp["encoder_num_units"],
                            dropout=hp["encoder_dropout"],
                            dropout_prob=hp["encoder_dropout_prob"],
                            layer_norm=hp["encoder_layer_norm"],
                            dtype=tf.float32)

    decoder = model.Decoder(
        attention_unit_num=hp["decoder_attention_unit_num"],
        vocab_size=hp["decoder_vocab_size"],
        gru_unit_num=hp["decoder_gru_unit_num"],
        fc_layer_num=hp["decoder_fc_layer_num"],
        fc_unit_num=hp["decoder_fc_unit_num"],
        attention_type=hp["decoder_attention_type"],
        gru_layer_norm=hp["decoder_gru_layer_norm"],
        gru_dropout=hp["decoder_gru_dropout"],
        gru_dropout_prob=hp["decoder_gru_dropout_prob"],
        fc_activation=hp["decoder_fc_activation"],
        dtype=tf.float32)

    return encoder, decoder
コード例 #10
0
def make_model(src_vocab,
               tgt_vocab,
               emb_size=256,
               hidden_size=512,
               num_layers=1,
               dropout=0.1):
    "Helper: Construct a model from hyperparameters."

    attention = model.BahdanauAttention(hidden_size)

    mdl = model.EncoderDecoder(
        model.Encoder(emb_size,
                      hidden_size,
                      num_layers=num_layers,
                      dropout=dropout),
        model.Decoder(emb_size,
                      hidden_size,
                      attention,
                      num_layers=num_layers,
                      dropout=dropout), nn.Embedding(src_vocab, emb_size),
        nn.Embedding(tgt_vocab, emb_size),
        model.Generator(hidden_size, tgt_vocab))

    return mdl.cuda() if USE_CUDA else mdl
コード例 #11
0
ファイル: convert.py プロジェクト: rkwlgh/AI_Homework_VC
Enc = model.Encoder(style_dim=4,
                    latent_dim=latent_dim,
                    vae_type=args.model_type)
if args.epoch == 0:
    Enc.load_state_dict(torch.load(model_dir + "/final_enc.pt"))
else:
    Enc.load_state_dict(
        torch.load(model_dir + "/parm/" + str(args.epoch) + "_enc.pt"))
Enc.cuda()
Enc.eval()
if args.model_type == "MD":
    Dec_dict = dict()
    for spk_id in spk_list:
        cur_Dec = model.Decoder(style_dim=4,
                                latent_dim=latent_dim,
                                vae_type=args.model_type)
        cur_Dec.cuda()
        cur_Dec.eval()
        if args.epoch == 0:
            cur_Dec.load_state_dict(
                torch.load(model_dir + "/final_VCC2" + spk_id + "_dec.pt"))
        else:
            cur_Dec.load_state_dict(
                torch.load(model_dir + "/parm/" + str(args.epoch) + "_VCC2" +
                           spk_id + "_dec.pt"))
        Dec_dict[spk_id] = cur_Dec
else:
    Dec = model.Decoder(style_dim=4,
                        latent_dim=latent_dim,
                        vae_type=args.model_type)
コード例 #12
0
print("Config:", config)

train_loader = dataloader.train_loader(args.dataset, args.data_directory,
                                       args.batch_size, args.input_h,
                                       args.input_w, args.cpu_num)
test_loader = dataloader.test_loader(args.dataset, args.data_directory,
                                     args.batch_size, args.input_h,
                                     args.input_w, args.cpu_num)

# encoder = model.Encoder(args.input_h, args.input_w, args.hidden_size, args.latent_size).to(args.device)
# decoder = model.Decoder(args.input_h, args.input_w, args.hidden_size, args.latent_size).to(args.device)
encoder = model.Encoder(args.channel_size, args.filter_size, args.kernel_size,
                        args.stride_size, args.layer_size,
                        args.latent_size).to(args.device)
decoder = model.Decoder(args.channel_size, args.filter_size, args.kernel_size,
                        args.stride_size, args.layer_size,
                        args.latent_size).to(args.device)

if args.load_model != '000000000000':
    encoder.load_state_dict(
        torch.load(args.log_directory + args.name + '/' + args.load_model +
                   'bvae_encoder.pt')).to(args.device)
    decoder.load_state_dict(
        torch.load(args.log_director + args.name + '/' + args.load_model +
                   '/bvae_decoder.pt')).to(args.device)
    args.time_stamp = args.load_model[:12]
    print('Model {} loaded.'.format(args.load_model))

log = args.log_directory + args.name + '/' + args.time_stamp + config + '/'
writer = SummaryWriter(log)
コード例 #13
0

dial_act_dict, dial_acts_data = get_dial_acts('data/dialogue_act_feats.json')

# Create models
encoder = model.Encoder(vocab_size=len(input_w2i), 
                                    emb_size=args.emb_size, 
                                    hid_size=args.hid_size)

policy = model.Policy(hidden_size=args.hid_size,
                      db_size=args.db_size,
                      bs_size=args.bs_size,
                      da_size=args.da_size)

decoder = model.Decoder(emb_size=args.emb_size,
                        hid_size=args.hid_size,
                        vocab_size=len(output_w2i),
                        use_attn=args.use_attn)

if args.shallow_fusion or args.deep_fusion:
  s2s = model.Model(encoder=encoder,
                    policy=policy,
                    decoder=decoder,
                    input_w2i=input_w2i,
                    output_w2i=output_w2i,
                    args=args)
  lm_decoder = model.Decoder(emb_size=args.emb_size,
                             hid_size=args.hid_size,
                             vocab_size=len(output_w2i),
                             use_attn=False)
  lm = model.LanguageModel(decoder=lm_decoder,
                           input_w2i=input_w2i,
コード例 #14
0
def train():
    # 开启tensorflow 动态图功能
    tf.enable_eager_execution()
    # generate dataset
    num_examples = 3000
    input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_tar = utils.load_dataset(
        num_examples)
    input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(
        input_tensor, target_tensor, test_size=0.2)
    logging.info('验证集和数据集大小' + str(len(input_tensor_train)) + ' ' +
                 str(len(input_tensor_val)))

    # 创建tf.data.dataset数据集并且设定部分参数
    BUFFER_SIZE = len(input_tensor_train)
    BATCH_SIZE = config.BATCH_SIZE
    N_BATCH = BUFFER_SIZE / BATCH_SIZE
    embedding_dim = config.EMBEDDING_DIM
    units = config.UNITS
    vocab_inp_size = config.VOCAB_INP_SIZE
    vocab_tar_size = config.VOCAB_TAR_SIZE

    dataset = tf.data.Dataset.from_tensor_slices(
        (input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
    dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)

    # 解码器,编码器
    encoder = model.Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
    decoder = model.Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)

    # 优化器
    optimizer = tf.train.AdamOptimizer()

    # 损失函数
    def loss_function(real, pred):
        mask = 1 - np.equal(real, 0)
        loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=real, logits=pred) * mask
        return tf.reduce_mean(loss_)

    # checkpoints(object-based saving)
    checkpoint_dir = config.CHECK_POINT_DIR
    checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')
    checkpoint = tf.train.Checkpoint(optimizer=optimizer,
                                     encoder=encoder,
                                     decoder=decoder)

    # 训练
    EPOCHS = config.EPOCHS
    for epoch in range(EPOCHS):
        start = time.time()
        hidden = encoder.initialize_hidden_state()
        total_loss = 0
        for (batch, (inp, targ)) in enumerate(dataset):
            loss = 0
            with tf.GradientTape() as tape:
                enc_output, enc_hidden = encoder(inp, hidden)
                dec_hidden = enc_hidden
                dec_input = tf.expand_dims([targ_lang.word2idx['<start>']] *
                                           BATCH_SIZE, 1)
                # Teacher forcing - feeding the target as the next input
                for t in range(1, targ.shape[1]):
                    # passing enc_output to the decoder
                    predictions, dec_hidden, _ = decoder(
                        dec_input, dec_hidden, enc_output)
                    loss += loss_function(targ[:, t], predictions)
                    # using teacher forcing
                    dec_input = tf.expand_dims(targ[:, t], 1)

            batch_loss = (loss / int(targ.shape[1]))
            total_loss += batch_loss
            variables = encoder.variables + decoder.variables
            gradients = tape.gradient(loss, variables)
            optimizer.apply_gradients(zip(gradients, variables))
            if batch % 100 == 0:
                print('Epoch {} Batch {} Loss {:.4f}'.format(
                    epoch + 1, batch, batch_loss.numpy()))
        if (epoch + 1) % 2 == 0:
            checkpoint.save(file_prefix=checkpoint_prefix)
        print('Epoch {} Loss {:.4f}'.format(epoch + 1, total_loss / N_BATCH))
        print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
コード例 #15
0
dataset = torch.utils.data.TensorDataset(xs, ys, yphi)

dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         drop_last=True)
device = torch.device("cuda" if args.cuda else "cpu")

###############################################################################
# Build the model
###############################################################################

ntokens = 850
extractor = model.Extractor(args.model, ntokens, args.emsize, args.nhid,
                            args.nlayers, args.dropout).to(device)
decoder = model.Decoder(args.nhid, args.nview, args.dropout).to(device)
generator = model.Generator(args.emsize, args.noiseDim,
                            args.dropout).to(device)
discriminator = model.Discriminator(args.emsize, args.dropout).to(device)

criterionD1 = nn.CrossEntropyLoss()  # for D_S
criterionD2 = nn.BCELoss()  # for D_N
LocalityConstrains = nn.CosineEmbeddingLoss()

noiseDim = int(args.noiseDim)

real_label = 1
fake_label = 0

lr = args.lr
optimizerExtractor = optim.Adam(extractor.parameters(),
コード例 #16
0
]
if args.sample:
    config_list.append('sample')
config = ""
for i in map(str, config_list):
    config = config + '_' + i
print("Config:", config)

train_loader = dataloader.train_loader('mnist', args.data_directory,
                                       args.batch_size)
test_loader = dataloader.test_loader('mnist', args.data_directory,
                                     args.batch_size)

encoder = model.Encoder(args.channel_size, args.input_h, args.input_w,
                        args.filter_num, args.latent_size).to(device)
decoder = model.Decoder(args.channel_size, args.input_h, args.input_w,
                        args.filter_num, args.latent_size).to(device)
if args.load_model != '000000000000':
    encoder.load_state_dict(
        torch.load(args.log_directory + args.name + '/' + args.load_model +
                   '/{}_encoder.pt'.format(args.name)))
    decoder.load_state_dict(
        torch.load(args.log_directory + args.name + '/' + args.load_model +
                   '/{}_decoder.pt'.format(args.name)))
    args.time_stamp = args.load_model[:12]

log = args.log_directory + args.name + '/' + args.time_stamp + config + '/'
writer = SummaryWriter(log)

optimizer = optim.Adam(list(encoder.parameters()) + list(decoder.parameters()),
                       lr=args.lr)
コード例 #17
0
def main():
    print('Loading word embedding')
    emb = KeyedVectors.load_word2vec_format(hp.word_embedding,
                                            binary=hp.emb_binary)

    print("Loading data")
    stereotype_words = {}
    gender_words = {}
    no_gender_words = make_no_gender_words(open(hp.no_gender_words), emb)
    stereotype_words['female'], stereotype_words['male'] = \
              make_pair_words(hp.stereotype_words, emb)
    gender_words['female'], gender_words['male'] = \
              make_pair_words(hp.gender_words, emb)
    all_words = no_gender_words \
              + stereotype_words['female'] \
              + stereotype_words['male'] \
              + gender_words['female'] \
              + gender_words['male']

    train_words, dev_words = create_train_dev(gender_words, no_gender_words,
                                              stereotype_words)

    word2emb = {}
    for word in all_words:
        word2emb[word] = emb[word]

    if hp.pre_train_autoencoder:
        print('Pre-training autoencoder')
        encoder = model.Encoder(hp.emb_size, hp.hidden_size,
                                hp.pta_dropout_rate)
        decoder = model.Decoder(hp.hidden_size, hp.emb_size,
                                hp.pta_dropout_rate)
        if hp.gpu >= 0:
            encoder.cuda()
            decoder.cuda()
        encoder_optim = make_optim(encoder, hp.pta_optimizer,
                                   hp.pta_learning_rate, hp.pta_lr_decay,
                                   hp.pta_max_grad_norm)
        decoder_optim = make_optim(decoder, hp.pta_optimizer,
                                   hp.pta_learning_rate, hp.pta_lr_decay,
                                   hp.pta_max_grad_norm)
        if hp.pre_data == 'random':
            checkpoint = pre_train_autoencoder(hp, encoder, encoder_optim,
                                               decoder, decoder_optim, emb)
        elif hp.pre_data == 'common':
            checkpoint = pre_train_autoencoder(hp,
                                               encoder,
                                               encoder_optim,
                                               decoder,
                                               decoder_optim,
                                               emb,
                                               dev_words=dev_words)

    encoder = model.Encoder(hp.emb_size, hp.hidden_size, hp.dropout_rate)
    decoder = model.Decoder(hp.hidden_size, hp.emb_size, hp.dropout_rate)
    if hp.gpu >= 0:
        encoder.cuda()
        decoder.cuda()
    if hp.pre_train_autoencoder:
        encoder.load_state_dict(checkpoint['encoder'])
        decoder.load_state_dict(checkpoint['decoder'])

    if hp.pre_train_classifier:
        female_classifier = model.Classifier(hp.hidden_size)
        male_classifier = model.Classifier(hp.hidden_size)
        if hp.gpu >= 0:
            female_classifier.cuda()
            male_classifier.cuda()
        female_classifier_optim = make_optim(female_classifier,
                                             hp.cls_optimizer,
                                             hp.cls_learning_rate,
                                             hp.cls_lr_decay,
                                             hp.cls_max_grad_norm)
        male_classifier_optim = make_optim(male_classifier, hp.cls_optimizer,
                                           hp.cls_learning_rate,
                                           hp.cls_lr_decay,
                                           hp.cls_max_grad_norm)

        encoder.eval()
        encoder.zero_grad()

        train_females = []
        train_males = []
        dev_females = []
        dev_males = []

        train_female_embs = [
            encoder(torch.FloatTensor(emb[word[0]]).cuda()).data
            if hp.gpu >= 0 else encoder(torch.FloatTensor(emb[word[0]])).data
            for word in train_words['female & male']
        ]
        encoder.zero_grad()
        train_male_embs = [
            encoder(torch.FloatTensor(emb[word[1]]).cuda()).data
            if hp.gpu >= 0 else encoder(torch.FloatTensor(emb[word[1]])).data
            for word in train_words['female & male']
        ]
        encoder.zero_grad()
        train_stereotype_embs = [
            encoder(torch.FloatTensor(emb[word]).cuda()).data
            if hp.gpu >= 0 else encoder(torch.FloatTensor(emb[word])).data
            for word in train_words['no gender']
        ]
        encoder.zero_grad()

        dev_female_embs = [
            encoder(torch.FloatTensor(emb[word[0]]).cuda()).data
            if hp.gpu >= 0 else encoder(torch.FloatTensor(emb[word[0]])).data
            for word in dev_words['female & male']
        ]
        encoder.zero_grad()
        dev_male_embs = [
            encoder(torch.FloatTensor(emb[word[1]]).cuda()).data
            if hp.gpu >= 0 else encoder(torch.FloatTensor(emb[word[1]])).data
            for word in dev_words['female & male']
        ]
        encoder.zero_grad()
        dev_stereotype_embs = [
            encoder(torch.FloatTensor(emb[word]).cuda()).data
            if hp.gpu >= 0 else encoder(torch.FloatTensor(emb[word])).data
            for word in dev_words['no gender']
        ]
        encoder.zero_grad()

        print('Pre-training classifier')
        female_checkpoint, male_checkpoint = pre_train_classifier(
            hp, female_classifier, male_classifier, female_classifier_optim,
            male_classifier_optim, train_female_embs, train_male_embs,
            train_stereotype_embs, dev_female_embs, dev_male_embs,
            dev_stereotype_embs)

    print('Building female & male classifiers')
    female_classifier = model.Classifier(hp.hidden_size)
    male_classifier = model.Classifier(hp.hidden_size)
    if hp.gpu >= 0:
        female_classifier.cuda()
        male_classifier.cuda()
    if hp.pre_train_classifier:
        female_classifier.load_state_dict(female_checkpoint['female'])
        male_classifier.load_state_dict(male_checkpoint['male'])

    print('Setting optimizer')
    encoder_optim = make_optim(encoder, hp.optimizer, hp.learning_rate,
                               hp.lr_decay, hp.max_grad_norm)
    female_classifier_optim = make_optim(female_classifier, hp.optimizer,
                                         hp.learning_rate, hp.lr_decay,
                                         hp.max_grad_norm)
    male_classifier_optim = make_optim(male_classifier, hp.optimizer,
                                       hp.learning_rate, hp.lr_decay,
                                       hp.max_grad_norm)
    decoder_optim = make_optim(decoder, hp.optimizer, hp.learning_rate,
                               hp.lr_decay, hp.max_grad_norm)

    trainModel(encoder, encoder_optim, female_classifier,
               female_classifier_optim, male_classifier, male_classifier_optim,
               decoder, decoder_optim, train_words, dev_words, word2emb)
コード例 #18
0
config_list = [args.name, args.epochs, args.batch_size, args.lr, 
				args.input_h, args.input_w, 
				args.hidden_size, args.latent_size,
				args.L, args.binarize, args.mc]
if args.sample:
	config_list.append('sample')
config = ""
for i in map(str, config_list):
	config = config + '_' + i
print("Config:", config)

train_loader = dataloader.train_loader('mnist', args.data_directory, args.batch_size)
test_loader = dataloader.test_loader('mnist', args.data_directory, args.batch_size)

encoder = model.Encoder(args.input_h, args.input_w, args.hidden_size, args.latent_size).to(device)
decoder = model.Decoder(args.input_h, args.input_w, args.hidden_size, args.latent_size).to(device)
if args.load_model != '000000000000':
	encoder.load_state_dict(torch.load(args.log_directory + args.name + '/' + args.load_model+ '/{}_encoder.pt'.format(args.name)))
	decoder.load_state_dict(torch.load(args.log_directory + args.name + '/' + args.load_model + '/{}_decoder.pt'.format(args.name)))
	args.time_stamp = args.load_model[:12]

log = args.log_directory + args.name + '/' + args.time_stamp + config + '/'
writer = SummaryWriter(log)

optimizer = optim.Adam(list(encoder.parameters())+list(decoder.parameters()), lr = args.lr)

def binarize(data):
	data = data > 0.5
	return data.float()

def train(epoch):
def run():
    Seed = 1234
    random.seed(Seed)
    np.random.seed(Seed)
    torch.manual_seed(Seed)
    torch.cuda.manual_seed(Seed)
    torch.backends.cudnn.deterministic = True
    train, valid, test, SRC, TRG = dataset.create_dataset()
    train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
        (train, valid, test),
        sort_key=lambda x: len(x.source),
        batch_size=config.BATCH_SIZE,
        device=config.device)

    INPUT_DIM = len(SRC.vocab)
    OUTPUT_DIM = len(TRG.vocab)

    ENC_EMB_DIM = config.ENCODER_EMBEDDING_DIMENSION
    DEC_EMB_DIM = config.DECODER_EMBEDDING_DIMENSION
    HID_DIM = config.LSTM_HIDDEN_DIMENSION
    N_LAYERS = config.LSTM_LAYERS
    ENC_DROPOUT = config.ENCODER_DROPOUT
    DEC_DROPOUT = config.DECODER_DROPOUT

    attn = model.Attention(HID_DIM, HID_DIM)
    enc = model.Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, HID_DIM, ENC_DROPOUT)
    dec = model.Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, HID_DIM, DEC_DROPOUT,
                        attn)

    model_rnn = model.Seq2Seq(enc, dec, config.device).to(config.device)

    optimizer = optim.Adam(model_rnn.parameters(), lr=config.LEARNING_RATE)

    TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]

    criterion = nn.CrossEntropyLoss(ignore_index=TRG_PAD_IDX)

    if (args.action == 'train'):
        model_rnn.apply(utils.init_weights)

        best_valid_loss = float('inf')

        for epoch in range(config.N_EPOCHS):
            start_time = time.time()

            train_loss = engine.train_fn(model_rnn, train_iterator, optimizer,
                                         criterion, config.CLIP)
            valid_loss = engine.evaluate_fn(model_rnn, valid_iterator,
                                            criterion)

            end_time = time.time()

            epoch_mins, epoch_secs = utils.epoch_time(start_time, end_time)

            if valid_loss < best_valid_loss:
                best_valid_loss = valid_loss
                torch.save(model_rnn.state_dict(), config.MODEL_SAVE_FILE)

            with open(config.RESULTS_SAVE_FILE, 'a') as f:
                print(
                    f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s',
                    file=f)
                print(
                    f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}',
                    file=f)
                print(
                    f'\t Val. Loss: {valid_loss:.3f} |  Val. PPL: {math.exp(valid_loss):7.3f}',
                    file=f)

    elif (args.action == 'test'):
        model_rnn.load_state_dict(torch.load(config.TEST_MODEL))
        loss, target, output = engine.test_fn(model_rnn, test_iterator,
                                              criterion, SRC, TRG)
        bl = bleu_score(output, target, max_n=1, weights=[1])
        met = 0

        for z in range(len(output)):
            out = ' '.join(output[z][y]
                           for y in range(min(10, len(output[z]))))
            tar = ' '.join(y for y in target[z])

            met = met + metric_utils.compute_metric(out, 1.0, tar)

        with open(config.TEST_RESULTS_FILE, 'w') as f:
            print(f'Test bleu :, {bl*100}, Test PPL: {math.exp(loss):7.3f}',
                  'Metric:',
                  met / len(output),
                  file=f)

    elif (args.action == 'save_vocab'):
        print('Source Vocab Length', len(SRC.vocab))
        print('Target vocab length', len(TRG.vocab))
        s1 = '\n'.join(k for k in SRC.vocab.itos)
        s2 = '\n'.join(k for k in TRG.vocab.itos)
        with open('NL_vocabulary.txt', 'w') as f:
            f.write(s1)
        with open('Bash_vocabulary.txt', 'w') as f:
            f.write(s2)
コード例 #20
0
 def __init__(self):
     self.e1 = model.Encoder()
     self.e_shared = model.Eshared(0.5)
     self.d_shared = model.Dshared()
     self.d2 = model.Decoder()
     self.denoiser = model.Denoiser()
コード例 #21
0
if args.load_model != '000000000000':
	ce1 = torch.load(args.log_directory + args.load_model + '/content_encoder1.pt')
	ce2 = torch.load(args.log_directory + args.load_model + '/content_encoder2.pt')
	se1 = torch.load(args.log_directory + args.load_model + '/style_encoder1.pt')
	se2 = torch.load(args.log_directory + args.load_model + '/style_encoder2.pt')
	de1 = torch.load(args.log_directory + args.load_model + '/decoder1.pt')
	de2 = torch.load(args.log_directory + args.load_model + '/decoder2.pt')
	dis1 = torch.load(args.log_directory + args.load_model + '/discriminator1.pt')
	dis2 = torch.load(args.log_directory + args.load_model + '/discriminator2.pt')
	args.time_stamep = args.load_mode[:12]
else:
	ce1 = model.Content_encoder(args.channel_size, args.content_code_h, args.content_code_w).to(device)
	ce2 = model.Content_encoder(args.channel_size, args.content_code_h, args.content_code_w).to(device)
	se1 = model.Style_encoder(args.channel_size, args.style_code_num).to(device)
	se2 = model.Style_encoder(args.channel_size, args.style_code_num).to(device)
	de1 = model.Decoder(args.input_h, args.input_w, args.channel_size, args.style_code_num).to(device)
	de2 = model.Decoder(args.input_h, args.input_w, args.channel_size, args.style_code_num).to(device)
	dis1 = model.Discriminator(args.channel_size).to(device)
	dis2 = model.Discriminator(args.channel_size).to(device)

log = args.log_directory + 'munit/' + args.time_stamp + config + '/'
writer = SummaryWriter(log)

gen_optimizer = optim.Adam(list(ce1.parameters()) + list(ce2.parameters()) + 
							list(se1.parameters()) + list(se2.parameters()) +
							list(de1.parameters()) + list(de2.parameters()), lr = args.lr)
dis_optimizer = optim.Adam(list(dis1.parameters()) + list(dis2.parameters()), lr = args.lr)

def train(epoch):
	epoch_start_time = time.time()
	train_loss = 0
コード例 #22
0
    def __init__(self):
        self.opt = get_args()
        self.models = {}
        self.weight = {}
        self.weight["dynamic"] = self.opt.dynamic_weight
        self.device = "cuda"
        self.criterion_d = nn.BCEWithLogitsLoss()
        self.parameters_to_train = []
        self.parameters_to_train_D = []

        # Initializing models
        self.models["encoder"] = model.Encoder(self.opt.height, self.opt.width,
                                               True)
        self.models["decoder"] = model.Decoder(
            self.models["encoder"].num_ch_enc)
        self.models["discriminator"] = model.Discriminator()

        for key in self.models.keys():
            self.models[key].to(self.device)
            if "discr" in key:
                self.parameters_to_train_D += list(
                    self.models[key].parameters())
            else:
                self.parameters_to_train += list(self.models[key].parameters())

        # Optimization
        self.model_optimizer = optim.Adam(self.parameters_to_train,
                                          self.opt.lr)
        self.model_lr_scheduler = optim.lr_scheduler.StepLR(
            self.model_optimizer, self.opt.scheduler_step_size, 0.1)

        self.model_optimizer_D = optim.Adam(self.parameters_to_train_D,
                                            self.opt.lr)
        self.model_lr_scheduler_D = optim.lr_scheduler.StepLR(
            self.model_optimizer_D, self.opt.scheduler_step_size, 0.1)

        self.patch = (1, self.opt.occ_map_size // 2**4,
                      self.opt.occ_map_size // 2**4)

        self.valid = Variable(torch.Tensor(
            np.ones((self.opt.batch_size, *self.patch))),
                              requires_grad=False).float().cuda()
        self.fake = Variable(torch.Tensor(
            np.zeros((self.opt.batch_size, *self.patch))),
                             requires_grad=False).float().cuda()

        transform = torchvision.transforms.ToTensor()
        labeled_trainset = LabeledDataset(image_folder=image_folder,
                                          annotation_file=annotation_csv,
                                          scene_index=labeled_scene_index,
                                          transform=transform)
        self.train_loader = DataLoader(labeled_trainset,
                                       batch_size=self.opt.batch_size,
                                       shuffle=True,
                                       num_workers=self.opt.num_workers,
                                       collate_fn=None,
                                       pin_memory=True,
                                       drop_last=True)

        labeled_valset = LabeledDataset(image_folder=image_folder,
                                        annotation_file=annotation_csv,
                                        scene_index=labeled_scene_index_val,
                                        transform=transform)
        self.val_loader = DataLoader(labeled_valset,
                                     batch_size=1,
                                     shuffle=True,
                                     num_workers=self.opt.num_workers,
                                     collate_fn=None,
                                     pin_memory=True,
                                     drop_last=True)
        print(
            "There are {:d} training items and {:d} validation items\n".format(
                len(labeled_trainset), len(labeled_valset)))
コード例 #23
0
                                               num_workers=1)

dataset_secret = datasets.ImageFolder(secret_dir, data_transform)

dataloader_secret = torch.utils.data.DataLoader(dataset_secret,
                                                batch_size=batch_size,
                                                shuffle=False,
                                                num_workers=1)

# initialize the model and load the params

encoder = model.Encoder()
encoder = encoder.to(device)

# decoder (discriminator)
decoder = model.Decoder()
decoder = decoder.to(device)

ssim_loss = pytorch_ssim.SSIM()
mssim_loss = pytorch_msssim.MSSSIM()
mse_loss = nn.MSELoss()
# dis_loss=nn.BCELoss()

print('loading params')

path = model_dir + '/' + str(epoch) + '.pth.tar'  # load theepoch params

checkpoint = torch.load(path, map_location='cpu')
encoder.load_state_dict(checkpoint['encoder_state_dict'])
decoder.load_state_dict(checkpoint['deocoder_state_dict'])
コード例 #24
0
ファイル: train.py プロジェクト: mandiehyewon/AI502
    (train_data, valid_data, test_data), 
    batch_size = BATCH_SIZE, 
    device = device)

#Training the seq2seq model
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
ENC_EMB_DIM = 256
DEC_EMB_DIM = 256
HID_DIM = 512
N_LAYERS = 2
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5

enc = model.Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = model.Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)

model = model.Seq2Seq(enc, dec, device).to(device)

def init_weights(m):
    for name, param in m.named_parameters():
        nn.init.uniform_(param.data, -0.08, 0.08)
        
model.apply(init_weights)

def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

print(f'The model has {count_parameters(model):,} trainable parameters')

optimizer = optim.Adam(model.parameters())
コード例 #25
0
    def __init__(self):
        self.opt = get_args()
        self.models = {}
        self.weight = {}
        self.weight["static"] = self.opt.static_weight
        self.weight["dynamic"] = self.opt.dynamic_weight
        self.device = "cuda"
        self.criterion_d = nn.BCEWithLogitsLoss()
        self.parameters_to_train = []
        self.parameters_to_train_D = []

        # Initializing models
        self.models["encoder"] = model.Encoder(18, self.opt.height,
                                               self.opt.width, True)
        if self.opt.type == "both":
            self.models["static_decoder"] = model.Decoder(
                self.models["encoder"].resnet_encoder.num_ch_enc)
            self.models["static_discr"] = model.Discriminator()
            self.models["dynamic_decoder"] = model.Discriminator()
            self.models["dynamic_decoder"] = model.Decoder(
                self.models["encoder"].resnet_encoder.num_ch_enc)
        else:
            self.models["decoder"] = model.Decoder(
                self.models["encoder"].resnet_encoder.num_ch_enc)
            self.models["discriminator"] = model.Discriminator()

        for key in self.models.keys():
            self.models[key].to(self.device)
            if "discr" in key:
                self.parameters_to_train_D += list(
                    self.models[key].parameters())
            else:
                self.parameters_to_train += list(self.models[key].parameters())

        # Optimization
        self.model_optimizer = optim.Adam(self.parameters_to_train,
                                          self.opt.lr)
        self.model_lr_scheduler = optim.lr_scheduler.StepLR(
            self.model_optimizer, self.opt.scheduler_step_size, 0.1)

        self.model_optimizer_D = optim.Adam(self.parameters_to_train_D,
                                            self.opt.lr)
        self.model_lr_scheduler_D = optim.lr_scheduler.StepLR(
            self.model_optimizer_D, self.opt.scheduler_step_size, 0.1)

        self.patch = (1, self.opt.occ_map_size // 2**4,
                      self.opt.occ_map_size // 2**4)

        self.valid = Variable(torch.Tensor(
            np.ones((self.opt.batch_size, *self.patch))),
                              requires_grad=False).float().cuda()
        self.fake = Variable(torch.Tensor(
            np.zeros((self.opt.batch_size, *self.patch))),
                             requires_grad=False).float().cuda()

        ## Data Loaders
        dataset_dict = {
            "3Dobject": dataloader.KITTIObject,
            "odometry": dataloader.KITTIOdometry,
            "argo": dataloader.Argoverse
        }

        self.dataset = dataset_dict[self.opt.split]
        fpath = os.path.join(os.path.dirname(__file__), "splits",
                             self.opt.split, "{}_files.txt")

        train_filenames = readlines(fpath.format("train"))
        val_filenames = readlines(fpath.format("val"))
        self.val_filenames = val_filenames
        self.train_filenames = train_filenames
        img_ext = '.png' if self.opt.ext == "png" else '.jpg'

        train_dataset = self.dataset(self.opt, train_filenames)
        val_dataset = self.dataset(self.opt, val_filenames)

        self.train_loader = DataLoader(train_dataset,
                                       self.opt.batch_size,
                                       True,
                                       num_workers=self.opt.num_workers,
                                       pin_memory=True,
                                       drop_last=True)
        self.val_loader = DataLoader(val_dataset,
                                     1,
                                     True,
                                     num_workers=self.opt.num_workers,
                                     pin_memory=True,
                                     drop_last=True)

        print("Using split:\n  ", self.opt.split)
        print(
            "There are {:d} training items and {:d} validation items\n".format(
                len(train_dataset), len(val_dataset)))
コード例 #26
0
ファイル: train.py プロジェクト: ruizhu90/Translation
    traindata, en_field, zh_field = data.translate_dataloader("data/en-zh.txt",
                                                              args.batch_size,
                                                              shuffle=True)
    data.save_vocab(en_field.vocab, "models/english.vocab")
    data.save_vocab(zh_field.vocab, "models/chinese.vocab")

    en_size = len(en_field.vocab)
    zh_size = len(zh_field.vocab)
    zh_pad = zh_field.vocab.stoi['<pad>']

    if os.path.exists(args.model):
        seq2seq = torch.load(args.model)
        seq2seq = seq2seq.cuda()
    else:
        encoder = model.Encoder(en_size,
                                embed_size,
                                hidden_size,
                                n_layers=2,
                                dropout=0.5)
        decoder = model.Decoder(embed_size,
                                hidden_size,
                                zh_size,
                                n_layers=1,
                                dropout=0.5)
        seq2seq = model.Seq2Seq(encoder, decoder).cuda()

    optimizer = optim.Adam(seq2seq.parameters(), lr=args.lr)
    print(seq2seq)

    train_model(traindata, args.epochs, seq2seq, optimizer, zh_size, zh_pad)
コード例 #27
0
encoder = model.Encoder(vocab_size=INPUT_DIM,
                        d_model=config.HID_DIM,
                        ff_dim=config.PF_DIM,
                        n_heads=config.N_HEADS,
                        max_len=config.MAX_LEN,
                        dropout=config.DROPOUT,
                        n_layers=config.N_LAYERS,
                        n_experts=config.N_EXP,
                        capacity_factor=config.CAPACITY_FACTOR,
                        device=config.DEVICE).to(config.DEVICE)

decoder = model.Decoder(output_dim=OUTPUT_DIM,
                        d_model=config.HID_DIM,
                        ff_dim=config.PF_DIM,
                        n_heads=config.N_HEADS,
                        max_len=config.MAX_LEN,
                        dropout=config.DROPOUT,
                        n_layers=config.N_LAYERS,
                        n_experts=config.N_EXP,
                        capacity_factor=config.CAPACITY_FACTOR,
                        device=config.DEVICE).to(config.DEVICE)

seq_2_seq = model.Seq2Seq(encoder, decoder, SRC_PAD_IDX, TRG_PAD_IDX,
                          config.DEVICE).to(config.DEVICE)

optimizer = torch.optim.Adam(seq_2_seq.parameters(), lr=config.LR)
criterion = torch.nn.CrossEntropyLoss(ignore_index=TRG_PAD_IDX)


def initialize_weights(m):
    if hasattr(m, 'weight') and m.weight.dim() > 1:
        torch.nn.init.xavier_uniform_(m.weight.data)
コード例 #28
0
    print('Predicted translation: {}'.format(result))


if __name__ == '__main__':
    num_examples = 3000
    # 开启tensorflow 动态图功能
    tf.enable_eager_execution()
    # 解码器,编码器
    vocab_inp_size, vocab_tar_size = config.VOCAB_INP_SIZE, config.VOCAB_TAR_SIZE
    embedding_dim, units, BATCH_SIZE = config.EMBEDDING_DIM, config.UNITS, config.BATCH_SIZE
    _, __, inp_lang, targ_lang, max_length_inp, max_length_tar = utils.load_dataset(
        num_examples)
    del _, __
    gc.collect()
    encoder = model.Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
    decoder = model.Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
    optimizer = tf.train.AdamOptimizer()

    # checkpoints(object-based saving)
    checkpoint_dir = config.CHECK_POINT_DIR
    checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')
    checkpoint = tf.train.Checkpoint(optimizer=optimizer,
                                     encoder=encoder,
                                     decoder=decoder)
    # restoring the latest checkpoint in checkpoint_dir
    checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
    last = time.time()
    translate(u'hace mucho frio aqui.', encoder, decoder, inp_lang, targ_lang,
              max_length_inp, max_length_tar)
    print(time.time() - last)
コード例 #29
0
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import preprocessing
import model

# Encoder, Decoderの初期化
encoder = model.Encoder(vocab_size, embedding_dim, hidden_dim, batch_size).to(device)
decoder = model.Decoder(vocab_size, embedding_dim, hidden_dim, batch_size).to(device)
criterion = nn.CrossEntropyLoss(ignore_index=word2id["<pad>"], size_average=False)

# 最適化関数の定義
encoder_optimizer = optim.Adam(encoder.parameters(), lr=0.001)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=0.001)

def get_current_time():
    return datetime.now().strftime("%Y-%m-%d %H:%M:%S")

print("学習開始")
n_epoch = 60
sig_cnt = 0
all_losses = []

for epoch in range(1, n_epoch+1):
    title_batch = preprocessing.train2batch(title_train, batch_size)
    
    if epoch > 25:
        sig_cnt += 1
コード例 #30
0
img_size = (256, 256)
pix_num = img_size[0] * img_size[1]
batch_size = 32
epoch_num = 200
img_path = '../yumi_data/yumi/_target/'  # to model natural cartoon img
test_path = './samples/'
model_dir = './model/'
model_name = '%d_yumi_%s.pth'
model_path = model_dir + model_name
sample_path = './samples/'
conv_dim = 64

enc = model.Encoder(conv_dim=conv_dim)
enc.cuda()
enc_optimizer = torch.optim.Adam(enc.parameters(), lr=6e-4)
dec = model.Decoder(conv_dim=conv_dim)
dec.cuda()
dec_optimizer = torch.optim.Adam(dec.parameters(), lr=2e-4)
data_iterer = data_loader.get_dir_loader(img_path, img_size[0],
                                         batch_size)  # TODO: test
data_num = len(data_iterer)
bern_dist = torch.distributions.bernoulli.Bernoulli(probs=0.5 *
                                                    torch.ones(batch_size, 31))

print('preparation ready. training...')

for e_idx in range(epoch_num):
    for b_idx, img in enumerate(data_iterer):
        # Normalize
        norm_img = normalize_img(img).cuda()