Esempio n. 1
0
def test(args):

    vocab = Vocabulary()
    vocab.load_vocab(os.path.join(args['data_dir'], 'vocabulary.json'))
    args['voca_size'] = vocab.get_vocab_size()
    test_data = get_dataloader(
        os.path.join(args['data_dir'], 'encoded_test_dialogue_pair.json'),
        os.path.join(args['data_dir'], 'vocabulary.json'), 1)
    test_sent_pair_list = []

    model = Seq2Seq(args).eval()
    if torch.cuda.is_available():
        model = model.cuda()

    path = Checkpoint.get_latest_checkpoint(args['exp_dir'])
    model.load_state_dict(torch.load(os.path.join(path, 'model.pt')))

    for batch_idx, (sour, sour_len, targ, targ_len) in enumerate(test_data):
        if torch.cuda.is_available():
            sour = sour.cuda()
            targ = targ.cuda()
        enco_hidd_state = model.encoder.encoder_forward(sour, sour_len)
        out_prob = model.decoder.decoder_forward(targ, targ_len,
                                                 enco_hidd_state, 0)
        sent_list = [(out_prob.topk(1)[1].view(-1).tolist(), 0)]
        test_sent_pair_list += process_sent_list(vocab, sour, targ, sent_list)
#   logger.info('batch_idx:{} \nsent:{}'.format(batch_idx,test_sent_pair_list))

    save_test_sent(args['exp_data'], 'generated_test_sent.txt',
                   test_sent_pair_list)
Esempio n. 2
0
def main():
    train_loader, test_loader = get_mnist_data('../%s' % opt.dataset,
                                               opt.batch_size)
    model = CapsuleNetwork(opt)
    if opt.cuda == True:
        model = model.cuda()

    if opt.is_train == True:
        if opt.resume == True:
            latest_checkpoint_path = Checkpoint.get_latest_checkpoint(
                opt.save_folder)
            resume_checkpoint = Checkpoint.load(latest_checkpoint_path)
            model = resume_checkpoint.model
            optimizer = resume_checkpoint.optimizer
            start_epoch = resume_checkpoint.epoch + 1
        else:
            start_epoch = 0
            optimizer = Adam(model.parameters())

        for epoch in range(start_epoch, opt.n_epochs):
            train(epoch, model, train_loader, test_loader, optimizer)
            Checkpoint(model=model, optimizer=optimizer,
                       epoch=epoch).save(opt.save_folder)
    else:
        run_test(model, test_loader)
Esempio n. 3
0
    def train(self,
              model,
              data,
              num_epochs=5,
              resume=False,
              dev_data=None,
              optimizer=None,
              teacher_forcing_ratio=0):
        """ Run training for a given model.

        Args:
            model (seq2seq.models): model to run training on, if `resume=True`, it would be
               overwritten by the model loaded from the latest checkpoint.
            data (seq2seq.dataset.dataset.Dataset): dataset object to train on
            num_epochs (int, optional): number of epochs to run (default 5)
            resume(bool, optional): resume training with the latest checkpoint, (default False)
            dev_data (seq2seq.dataset.dataset.Dataset, optional): dev Dataset (default None)
            optimizer (seq2seq.optim.Optimizer, optional): optimizer for training
               (default: Optimizer(pytorch.optim.Adam, max_grad_norm=5))
            teacher_forcing_ratio (float, optional): teaching forcing ratio (default 0)
        Returns:
            model (seq2seq.models): trained model.
        """
        # If training is set to resume
        if resume:
            latest_checkpoint_path = Checkpoint.get_latest_checkpoint(
                self.expt_dir)
            resume_checkpoint = Checkpoint.load(latest_checkpoint_path)
            model = resume_checkpoint.model
            self.optimizer = resume_checkpoint.optimizer

            # A walk around to set optimizing parameters properly
            resume_optim = self.optimizer.optimizer
            defaults = resume_optim.param_groups[0]
            defaults.pop('params', None)
            defaults.pop('initial_lr', None)
            self.optimizer.optimizer = resume_optim.__class__(
                model.parameters(), **defaults)

            start_epoch = resume_checkpoint.epoch
            step = resume_checkpoint.step
        else:
            start_epoch = 1
            step = 0
            if optimizer is None:
                optimizer = Optimizer(optim.Adam(model.parameters()),
                                      max_grad_norm=5)
            self.optimizer = optimizer

        self.logger.info("Optimizer: %s, Scheduler: %s" %
                         (self.optimizer.optimizer, self.optimizer.scheduler))

        self._train_epochs(data,
                           model,
                           num_epochs,
                           start_epoch,
                           step,
                           dev_data=dev_data,
                           teacher_forcing_ratio=teacher_forcing_ratio)
        return model
Esempio n. 4
0
def load_model():
    checkpoint_path = ""
    if not FLAGS.load_checkpoint is None:
        checkpoint_path = os.path.join(FLAGS.expt_dir,
                                       Checkpoint.CHECKPOINT_DIR_NAME,
                                       FLAGS.load_checkpoint)
    else:
        checkpoint_path = Checkpoint.get_latest_checkpoint(FLAGS.expt_dir)
    logging.info("loading checkpoint from {}".format(checkpoint_path))
    checkpoint = Checkpoint.load(checkpoint_path)
    seq2seq = checkpoint.model
    # these are vocab classes with members stoi and itos
    input_vocab = checkpoint.input_vocab
    output_vocab = checkpoint.output_vocab
    classifier = (seq2seq, input_vocab, output_vocab)

    return classifier
Esempio n. 5
0
    def train(self,encoder, decoder, n_epochs, train_data, dev_data,
                resume, optimizer, log_file):
        """
        ------------------------------------------------------------------------
        Args:
            encoder:                  Self explanatory.
            decoder:                  Self explanatory.
            n_epoch (int):            Number of epochs to train the model.
            train_data (Composition): Self explanatory.
            dev_data (Composition):   Self explanatory.
            resume (bool):            If true, load last checkpoint.
        ------------------------------------------------------------------------
        """
        if resume:
            latest_checkpoint_path = Checkpoint.get_latest_checkpoint(self.exp_dir)
            resume_checkpoint = Checkpoint.load(latest_checkpoint_path)
            encoder        = resume_checkpoint.encoder
            decoder        = resume_checkpoint.decoder
            start_epoch    = resume_checkpoint.epoch
            step           = resume_checkpoint.step
            self.scheduler = resume_checkpoint.scheduler
            self.optimizer = resume_checkpoint.optimizer
            self.samp_rate = resume_checkpoint.samp_rate
            self.KL_rate   = resume_checkpoint.KL_rate
            self.free_bits = resume_checkpoint.free_bits
            self.vocab_size = decoder.vocab_size
        else:
            self.optimizer = optimizer
            if optimizer is None:
                params = list(encoder.parameters()) + list(decoder.parameters())
                self.optimizer = Adam(params, lr=1e-3)
            self.scheduler = LambdaLR(self.optimizer,decay)
            self.vocab_size = decoder.vocab_size

            start_epoch = 1
            step = 0

        self.train_epochs(encoder, decoder, start_epoch, step, train_data, dev_data,
                        start_epoch + n_epochs, log_file)
        return encoder,decoder
Esempio n. 6
0
def run_test(model, test_loader):
    latest_checkpoint_path = Checkpoint.get_latest_checkpoint(opt.save_folder)
    resume_checkpoint = Checkpoint.load(latest_checkpoint_path)
    model = resume_checkpoint.model
    optimizer = resume_checkpoint.optimizer

    model.eval()
    test_loss = 0
    num_error = 0
    num_data = 0
    for batch_id, (data, target) in enumerate(test_loader):
        data = Variable(data)
        if opt.cuda == True:
            data = data.cuda()

        output, mask, recon = model(data)
        out_mag = torch.sqrt((output**2).sum(2))
        out_mag = F.softmax(out_mag, dim=1)
        max_val, max_idx = out_mag.max(dim=1)

        for idx in range(data.size(0)):
            print "(batch_index, sample_index, estimated, target) : ", batch_id, idx, max_idx[
                idx].data.cpu().numpy(), target[idx]
            if max_idx[idx].data.cpu().numpy() != target[idx]:
                num_error = num_error + 1
            num_data = num_data + 1
        if opt.vis == True:
            idx = random.randint(0, data.size(0) - 1)
            show_recon = recon[idx].data.cpu().numpy().reshape(28, 28)
            show_data = data[idx].data.cpu().numpy().reshape(28, 28)

            cv2.namedWindow("recon", cv2.WINDOW_NORMAL)
            cv2.imshow("recon", np.concatenate((show_data, show_recon),
                                               axis=1))
            cv2.waitKey(1)
    print 'test error : ', float(num_error) / float(num_data)
Esempio n. 7
0
def main():
    torch.manual_seed(233)
    logging.basicConfig(level=logging.INFO, format='%(asctime)s [INFO] %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('--data_path', type=str,dest='data_path',
                        default='/home/ml/ydong26/data/EditNTS_data/editnet_data/%s/'%dataset,
                        help='Path to train vocab_data')
    parser.add_argument('--store_dir', action='store', dest='store_dir',
                        default='/home/ml/ydong26/tmp_store/editNTS_%s'%dataset,
                        help='Path to exp storage directory.')
    parser.add_argument('--vocab_path', type=str, dest='vocab_path',
                        default='../vocab_data/',
                        help='Path contains vocab, embedding, postag_set')
    parser.add_argument('--load_model', type=str, dest='load_model',
                        default=None,
                        help='Path for loading pre-trained model for further training')

    parser.add_argument('--vocab_size', dest='vocab_size', default=30000, type=int)
    parser.add_argument('--batch_size', dest='batch_size', default=32, type=int)
    parser.add_argument('--max_seq_len', dest='max_seq_len', default=100)

    parser.add_argument('--epochs', type=int, default=50)
    parser.add_argument('--hidden', type=int, default=200)
    parser.add_argument('--lr', type=float, default=1e-4)
    parser.add_argument('--device', type=int, default=1,
                        help='select GPU')

    #train_file = '/media/vocab_data/yue/TS/editnet_data/%s/train.df.filtered.pos'%dataset
    # test='/media/vocab_data/yue/TS/editnet_data/%s/test.df.pos' % args.dataset
    args = parser.parse_args()
    torch.cuda.set_device(args.device)

            # load vocab-related files and init vocab
    print('*'*10)
    vocab = data.Vocab()
    vocab.add_vocab_from_file(args.vocab_path+'vocab.txt', args.vocab_size)
    vocab.add_embedding(gloveFile=args.vocab_path+'glove.6B.100d.txt')
    pos_vocab = data.POSvocab(args.vocab_path) #load pos-tags embeddings
    print('*' * 10)

    print(args)
    print("generating config")
    hyperparams=collections.namedtuple(
        'hps', #hyper=parameters
        ['vocab_size', 'embedding_dim',
         'word_hidden_units', 'sent_hidden_units',
         'pretrained_embedding', 'word2id', 'id2word',
         'pos_vocab_size', 'pos_embedding_dim']
    )
    hps = hyperparams(
        vocab_size=vocab.count,
        embedding_dim=100,
        word_hidden_units=args.hidden,
        sent_hidden_units=args.hidden,
        pretrained_embedding=vocab.embedding,
        word2id=vocab.w2i,
        id2word=vocab.i2w,
        pos_vocab_size=pos_vocab.count,
        pos_embedding_dim=30
    )

    print('init editNTS model')
    edit_net = EditNTS(hps, n_layers=1)
    edit_net.cuda()

    if args.load_model is not None:
        ckpt_path = Checkpoint.get_latest_checkpoint(args.load_model)
        print("load edit_net for further training from %s", ckpt_path)
        ckpt = Checkpoint.load(ckpt_path)
        edit_net = ckpt.model
        edit_net.cuda()
        edit_net.train()

    training(edit_net, args.epochs, args, vocab)
Esempio n. 8
0
def main(args):

    configure(os.path.join(args['exp_dir'], 'log_dir'))

    transform = transforms.Compose([
        transforms.RandomCrop(args['crop_size']),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])

    data_loader = get_loader({
        'data_dir': args['data_dir'],
        'exp_dir': args['exp_dir'],
        'raw_data_dir': args['raw_data_dir'],
        'batch_size': args['batch_size'],
        'transform': transform,
        'num_workers': args['num_workers'],
        'shuffle': args['shuffle'],
        'mode': 'train'
    })

    #    valid_data_loader=get_loader({'data_dir' : args['data_dir'],
    #                             'raw_data_dir' : args['raw_data_dir'],
    #                             'batch_size' : int(args['batch_size']/4),
    #                             'transform' : transform,
    #                             'num_workers' : args['num_workers'],
    #                             'shuffle' : args['shuffle'],
    #                             'mode':'validate'})

    args['vocab_size'] = len(Vocabulary.load_vocab(args['exp_dir']))

    encoder = EncoderCNN(args).train()
    decoder = DecoderRNN(args).train()

    if args['pretrained']:
        checkpoint_path = Checkpoint.get_latest_checkpoint(args['exp_dir'])
        checkpoint = Checkpoint.load(checkpoint_path)
        encoder.load_state_dict(checkpoint.encoder)
        decoder.load_state_dict(checkpoint.decoder)
        step = checkpoint.step
        epoch = checkpoint.epoch
        omit = True

    else:
        step = 0
        epoch = 0
        omit = False

    encoder.to(device)
    decoder.to(device)

    criterion = nn.CrossEntropyLoss()
    params = list(decoder.parameters()) + list(
        encoder.linear.parameters()) + list(encoder.bn.parameters())
    #    params=list(decoder.parameters()) + list(encoder.parameters())
    optimizer = torch.optim.Adam(params, lr=args['lr'])
    scheduler = StepLR(optimizer, step_size=40, gamma=0.1)
    #    optimizer=YFOptimizer(params)

    total_step = len(data_loader)
    min_valid_loss = float('inf')

    for epoch in range(epoch, args['num_epochs']):
        scheduler.step()
        for idx, (images, captions, leng) in enumerate(data_loader):

            if omit:
                if idx < (step - total_step * epoch):
                    logger.info(
                        'idx:{},step:{}, epoch:{}, total_step:{}, diss:{}'.
                        format(idx, step, epoch, total_step,
                               step - total_step * epoch))
                    continue
                else:
                    omit = False

            images = images.to(device)
            captions = captions.to(device)
            targets = pack_padded_sequence(captions, leng, batch_first=True)[0]

            features = encoder(images)
            outputs = decoder(features, captions, leng)
            loss = criterion(outputs, targets)
            decoder.zero_grad()
            encoder.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(decoder.parameters(), 5)
            optimizer.step()

            log_value('loss', loss.item(), step)
            step += 1

            if step % args['log_step'] == 0:
                logger.info(
                    'Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}'
                    .format(epoch, args['num_epochs'], idx, total_step,
                            loss.item(), np.exp(loss.item())))

            if step % args['valid_step'] == 0:
                #                valid_loss=validate(encoder.eval(),decoder,criterion,valid_data_loader)
                #                if valid_loss<min_valid_loss:
                #                    min_valid_loss=valid_loss
                Checkpoint(encoder, decoder, optimizer, epoch,
                           step).save(args['exp_dir'])
Esempio n. 9
0
#Prepare trainer.
Ash_Ketchum = VAE_Trainer.Trainer(exp_dir=exp_dir,
                                  score_type=score_type,
                                  batch_size=128,
                                  random_seed=42,
                                  print_every=100,
                                  checkpoint_every=10000,
                                  samp_rate=None,
                                  KL_rate=0.99991,
                                  free_bits=125)

#Check to see if we've already started the experiment
resume = False
if resume:
    latest_checkpoint_path = Checkpoint.get_latest_checkpoint("experiment_1")
    print("Resuming training...")
    Ash_Ketchum.train(None, None, n_epochs, train_comp, val_comp, True, None,
                      log_file)
else:
    vocab_size = 275
    encoder_hidden_size = 256
    decoder_hidden_size = 512
    latent_size = 128
    seq_size = 52
    num_layers = 2
    encoder = Encoder(vocab_size, encoder_hidden_size, latent_size, seq_size,
                      num_layers).to(device)
    decoder = Decoder(latent_size, decoder_hidden_size, vocab_size, num_layers,
                      seq_size).to(device)