示例#1
0
def train(config, argv, name, ROOT_DIR,  model, dataset):
    _batchop = partial(batchop, VOCAB=dataset.input_vocab, LABELS=dataset.output_vocab)
    predictor_feed = DataFeed(name, dataset.testset, batchop=_batchop, batch_size=1)
    train_feed     = DataFeed(name, portion(dataset.trainset, config.HPCONFIG.trainset_size),
                              batchop=_batchop, batch_size=config.CONFIG.batch_size)
    
    predictor = Predictor(name,
                          model=model,
                          directory=ROOT_DIR,
                          feed=predictor_feed,
                          repr_function=partial(repr_function
                                                , VOCAB=dataset.input_vocab
                                                , LABELS=dataset.output_vocab
                                                , dataset=dataset.testset_dict))

    loss_ = partial(loss, loss_function=nn.NLLLoss())
示例#2
0
def experiment(VOCAB, raw_samples, datapoints=[[], []], eons=1000, epochs=10, checkpoint=5):
    try:
        encoder =  Encoder(Config(), 'encoder', len(VOCAB))
        decoder =  PtrDecoder(Config(), 'decoder', encoder.embed, VOCAB['GO'], len(VOCAB))
        try:
            encoder.load_state_dict(torch.load('{}.{}.{}'.format(SELF_NAME, 'encoder', 'pth')))
            decoder.load_state_dict(torch.load('{}.{}.{}'.format(SELF_NAME, 'decoder', 'pth')))
            log.info('loaded the old image for the model')
        except:
            log.exception('failed to load the model')

        if Config().cuda:
            log.info('cuda the model...')
            encoder.cuda()
            decoder.cuda()

        model = (encoder, decoder)
        print('**** the model', model)

        name = os.path.basename(__file__).replace('.py', '')
        
        _batchop = partial(batchop, WORD2INDEX=VOCAB)
        train_feed     = DataFeed(name, datapoints[0], batchop=_batchop, batch_size=100)
        test_feed      = DataFeed(name, datapoints[1], batchop=_batchop, batch_size=100)
        predictor_feed = DataFeed(name, datapoints[1], batchop=_batchop, batch_size=100)

        _loss = partial(loss, loss_function=nn.NLLLoss(), UNK=VOCAB['UNK'])
        _accuracy = partial(accuracy,  UNK=VOCAB['UNK'])
        trainer = Trainer(name=name,
                          model=(encoder, decoder),
                          loss_function=_loss, accuracy_function=_accuracy, f1score_function=f1score,
                          checkpoint=checkpoint, epochs=epochs,
                          feeder = Feeder(train_feed, test_feed))

        _repr_function=partial(repr_function, VOCAB=VOCAB, raw_samples=raw_samples)
        _process_predictor_output = partial(process_predictor_output, UNK=VOCAB['UNK'])
        predictor = Predictor(model = (encoder, decoder),
                              feed  = predictor_feed,
                              repr_function  = _repr_function,
                              process_output = _process_predictor_output)

        dump = open('results/experiment_attn.csv', 'w')        
        for e in range(eons):
            log.info('on {}th eon'.format(e))

            dump.write('#========================after eon: {}\n'.format(e))
            results = ListTable()
            for ri in tqdm(range(predictor_feed.num_batch//10)):
                output, _results = predictor.predict(predictor_feed.num_batch - ri, 3)
                results.extend(_results)
                
            dump.write(repr(results))
            dump.flush()

            if not trainer.train():
                raise Exception
    except :
        log.exception('####################')
        trainer.save_best_model()

        return locals()
示例#3
0
    def __init__(
        self,
        config,
        name,
        # feeds
        dataset,

        # loss function
        loss_function=nn.NLLLoss(),
        accuracy_function=None,
        f1score_function=None,
        save_model_weights=True,
        epochs=1000,
        checkpoint=1,
        early_stopping=True,

        # optimizer
        optimizer=None,
    ):

        super(Model, self).__init__(config, name)
        self.input_vocab_size = len(dataset.input_vocab)
        self.output_vocab_size = len(dataset.output_vocab)
        self.hidden_dim = config.HPCONFIG.hidden_dim
        self.embed_dim = config.HPCONFIG.embed_dim

        self.embed = nn.Embedding(self.input_vocab_size, self.embed_dim)
        self.encode = nn.LSTM(self.embed_dim,
                              self.hidden_dim,
                              bidirectional=True,
                              num_layers=config.HPCONFIG.num_layers)

        self.classify = nn.Linear(2 * self.hidden_dim, self.output_vocab_size)

        self.loss_function = loss_function if loss_function else nn.NLLLoss()

        if accuracy_function:
            self.accuracy_function = accuracy_function
        else:
            self.accuracy_function = lambda *x, **xx: 1 / loss_function(
                *x, **xx)

        self.optimizer = optimizer if optimizer else optim.SGD(
            self.parameters(), lr=0.0001, momentum=0.1)

        self.f1score_function = f1score_function

        self.epochs = epochs
        self.checkpoint = checkpoint
        self.early_stopping = early_stopping

        self.dataset = dataset
        self.train_feed = DataFeed(dataset.name + '.train',
                                   self.dataset.trainset,
                                   batchop=self.batchop,
                                   batch_size=self.config.CONFIG.batch_size)

        self.test_feed = DataFeed(dataset.name + '.test',
                                  self.dataset.testset,
                                  batchop=self.batchop,
                                  batch_size=self.config.CONFIG.batch_size)

        self.save_model_weights = save_model_weights

        self.__build_stats__()

        self.best_model_criteria = self.accuracy

        if config.CONFIG.cuda:
            self.cuda()
示例#4
0
class Model(Base):
    def __init__(
        self,
        config,
        name,
        # feeds
        dataset,

        # loss function
        loss_function=nn.NLLLoss(),
        accuracy_function=None,
        f1score_function=None,
        save_model_weights=True,
        epochs=1000,
        checkpoint=1,
        early_stopping=True,

        # optimizer
        optimizer=None,
    ):

        super(Model, self).__init__(config, name)
        self.input_vocab_size = len(dataset.input_vocab)
        self.output_vocab_size = len(dataset.output_vocab)
        self.hidden_dim = config.HPCONFIG.hidden_dim
        self.embed_dim = config.HPCONFIG.embed_dim

        self.embed = nn.Embedding(self.input_vocab_size, self.embed_dim)
        self.encode = nn.LSTM(self.embed_dim,
                              self.hidden_dim,
                              bidirectional=True,
                              num_layers=config.HPCONFIG.num_layers)

        self.classify = nn.Linear(2 * self.hidden_dim, self.output_vocab_size)

        self.loss_function = loss_function if loss_function else nn.NLLLoss()

        if accuracy_function:
            self.accuracy_function = accuracy_function
        else:
            self.accuracy_function = lambda *x, **xx: 1 / loss_function(
                *x, **xx)

        self.optimizer = optimizer if optimizer else optim.SGD(
            self.parameters(), lr=0.0001, momentum=0.1)

        self.f1score_function = f1score_function

        self.epochs = epochs
        self.checkpoint = checkpoint
        self.early_stopping = early_stopping

        self.dataset = dataset
        self.train_feed = DataFeed(dataset.name + '.train',
                                   self.dataset.trainset,
                                   batchop=self.batchop,
                                   batch_size=self.config.CONFIG.batch_size)

        self.test_feed = DataFeed(dataset.name + '.test',
                                  self.dataset.testset,
                                  batchop=self.batchop,
                                  batch_size=self.config.CONFIG.batch_size)

        self.save_model_weights = save_model_weights

        self.__build_stats__()

        self.best_model_criteria = self.accuracy

        if config.CONFIG.cuda:
            self.cuda()

    def restore_and_save(self):
        self.restore_checkpoint()
        self.save_best_model()

    def init_hidden(self, batch_size):
        ret = torch.zeros(2, batch_size, self.hidden_dim)
        if config.HPCONFIG().cuda: ret = ret.cuda()
        return Variable(ret)

    def batchop(self, datapoints, for_prediction=False, *args, **kwargs):
        indices = [d.id for d in datapoints]
        sequence = []
        label = []

        for d in datapoints:
            sequence.append([dataset.input_vocab[w] for w in d.sequence])

            if not for_prediction:
                label.append(dataset.output_vocab[d.label])

        sequence = LongVar(self.config, pad_seq(sequence))
        if not for_prediction:
            label = LongVar(self.config, label)

        batch = indices, (sequence, ), (label)
        return batch

    def forward(self, input_):
        ids, (seq, ), _ = input_
        if seq.dim() == 1: seq = seq.unsqueeze(0)

        batch_size, seq_size = seq.size()
        seq_emb = F.tanh(self.embed(seq))
        seq_emb = seq_emb.transpose(1, 0)
        pad_mask = (seq > 0).float()

        states, cell_state = self.encode(seq_emb)

        logits = self.classify(states[-1])

        return F.log_softmax(logits, dim=-1)

    def do_train(self):
        for epoch in range(self.epochs):
            self.log.critical('memory consumed : {}'.format(memory_consumed()))
            self.epoch = epoch
            if epoch % max(1, (self.checkpoint - 1)) == 0:
                #self.do_predict()
                if self.do_validate() == FLAGS.STOP_TRAINING:
                    self.log.info('loss trend suggests to stop training')
                    return

            self.train()
            losses = []
            for j in tqdm(range(self.train_feed.num_batch),
                          desc='Trainer.{}'.format(self.name())):
                self.optimizer.zero_grad()
                input_ = self.train_feed.next_batch()
                idxs, inputs, targets = input_

                output = self.forward(input_)
                loss = self.loss_function(output, input_)
                #print(loss.data.cpu().numpy())
                losses.append(loss)
                loss.backward()
                self.optimizer.step()

            epoch_loss = torch.stack(losses).mean()
            self.train_loss.append(epoch_loss.data.item())

            self.log.info('-- {} -- loss: {}\n'.format(epoch, epoch_loss))
            for m in self.metrics:
                m.write_to_file()

        return True

    def do_validate(self):
        self.eval()
        if self.test_feed.num_batch > 0:
            losses, accuracies = [], []
            for j in tqdm(range(self.test_feed.num_batch),
                          desc='Tester.{}'.format(self.name())):
                input_ = self.test_feed.next_batch()
                idxs, inputs, targets = input_

                output = self.forward(input_)
                loss = self.loss_function(output, input_)
                accuracy = self.accuracy_function(output, input_)

                losses.append(loss)
                accuracies.append(accuracy)

            epoch_loss = torch.stack(losses).mean()
            epoch_accuracy = torch.stack(accuracies).mean()

            self.test_loss.append(epoch_loss.data.item())
            self.accuracy.append(epoch_accuracy.data.item())
            #print('====', self.test_loss, self.accuracy)

            self.log.info('= {} =loss:{}'.format(self.epoch, epoch_loss))
            self.log.info('- {} -accuracy:{}'.format(self.epoch,
                                                     epoch_accuracy))

        if len(self.best_model_criteria
               ) > 1 and self.best_model[0] < self.best_model_criteria[-1]:
            self.log.info('beat best ..')
            self.best_model = (self.best_model_criteria[-1],
                               self.cpu().state_dict())

            self.save_best_model()

            if self.config.CONFIG.cuda:
                self.cuda()

        for m in self.metrics:
            m.write_to_file()

        if self.early_stopping:
            return self.loss_trend()

    def do_predict(self, input_=None):
        if not input_:
            input_ = self.train_feed.nth_batch(
                random.randint(0, self.train_feed.size), 1)

        output = self.forward(input_)
        output = output.max(1)[1].long()
        print(output.size())

        ids, (sequence, ), (label) = input_
        print(' '.join([
            self.dataset.input_vocab[i.data[0]] for i in sequence[0]
        ]).replace('@@ ', ''))
        print(self.dataset.output_vocab[output.data[0]], ' ==? ',
              self.dataset.output_vocab[label.data[0]])

        return True
示例#5
0
    log.info('dataset size: {}'.format(len(dataset.trainset)))
    for i in range(10):
        log.info('random sample: {}'.format(
            pformat(random.choice(dataset.trainset))))

    #log.info('vocab: {}'.format(pformat(dataset.output_vocab.freq_dict)))
    ########################################################################################
    # load model snapshot data
    ########################################################################################
    _batchop = partial(batchop,
                       VOCAB=dataset.input_vocab,
                       GENDER=dataset.gender_vocab,
                       config=config)
    train_feed = DataFeed(SELF_NAME,
                          dataset.trainset,
                          batchop=_batchop,
                          batch_size=config.CONFIG.batch_size)

    pretrain_feed = DataFeed(SELF_NAME,
                             dataset.pretrainset,
                             batchop=_batchop,
                             batch_size=config.CONFIG.batch_size)

    loss_ = partial(loss, loss_function=nn.NLLLoss())
    test_feed = DataFeed(SELF_NAME,
                         dataset.testset,
                         batchop=_batchop,
                         batch_size=config.CONFIG.batch_size)
    model = Model(
        config,
        'seq2seq_name_gen',
示例#6
0
def train(config, argv, name, ROOT_DIR,  model, dataset):
    _batchop = partial(batchop, VOCAB=dataset.input_vocab, config=config)
    predictor_feed = DataFeed(name,
                              dataset.testset,
                              batchop = _batchop,
                              batch_size=1)
    
    train_feed     = DataFeed(name,
                              portion(dataset.trainset,
                                      config.HPCONFIG.trainset_size),
                              batchop    = _batchop,
                              batch_size = config.CONFIG.batch_size)
    
    
    loss_ = partial(loss, loss_function=nn.NLLLoss())
    test_feed, tester = {}, {}
    for subset in dataset.datasets:
        test_feed[subset.name]      = DataFeed(subset.name,
                                               subset.testset,
                                               batchop    = _batchop,
                                               batch_size = config.CONFIG.batch_size)

        tester[subset.name] = Tester(name     = subset.name,
                                     config   = config,
                                     model    = model,
                                     directory = ROOT_DIR,
                                     loss_function = loss_,
                                     accuracy_function = loss_,
                                     feed = test_feed[subset.name],
                                     save_model_weights=False)

    test_feed[name]      = DataFeed(name, dataset.testset, batchop=_batchop, batch_size=config.CONFIG.batch_size)

    tester[name] = Tester(name  = name,
                          config   = config,
                          model    = model,
                          directory = ROOT_DIR,
                          loss_function = loss_,
                          accuracy_function = loss_,
                          feed = test_feed[name],
                          predictor=predictor)
    
    
    def do_every_checkpoint(epoch):
        if config.CONFIG.plot_metrics:
            from matplotlib import pyplot as plt
            fig = plt.figure(figsize=(10, 5))
            
        for t in tester.values():
            t.do_every_checkpoint(epoch)

            if config.CONFIG.plot_metrics:
                plt.plot(list(t.loss), label=t.name)

        if config.CONFIG.plot_metrics:
            plt.savefig('loss.png')
            plt.close()
        
            
            
    for e in range(config.CONFIG.EONS):
        if not trainer.train():
            raise Exception
示例#7
0
def multiplexed_train(config, argv, name, ROOT_DIR, model, dataset):
    _batchop = partial(batchop,
                       VOCAB=dataset.input_vocab,
                       LABELS=dataset.output_vocab)
    predictor_feed = DataFeed(name,
                              dataset.testset,
                              batchop=_batchop,
                              batch_size=1)
    predictor = Predictor(name,
                          model=model,
                          directory=ROOT_DIR,
                          feed=predictor_feed,
                          repr_function=partial(repr_function,
                                                VOCAB=dataset.input_vocab,
                                                LABELS=dataset.output_vocab,
                                                dataset=dataset.testset_dict))

    loss_ = partial(loss, loss_function=nn.NLLLoss())
    test_feed, tester = {}, {}
    train_feed = {}
    for subset in dataset.datasets:
        test_feed[subset.name] = DataFeed(subset.name,
                                          subset.testset,
                                          batchop=_batchop,
                                          batch_size=config.CONFIG.batch_size)
        train_feed[subset.name] = DataFeed(subset.name,
                                           portion(
                                               subset.trainset,
                                               config.HPCONFIG.trainset_size),
                                           batchop=_batchop,
                                           batch_size=config.CONFIG.batch_size)

        tester[subset.name] = Tester(name=subset.name,
                                     config=config,
                                     model=model,
                                     directory=ROOT_DIR,
                                     loss_function=loss_,
                                     accuracy_function=accuracy,
                                     feed=test_feed[subset.name],
                                     save_model_weights=False)

    test_feed[name] = DataFeed(name,
                               dataset.testset,
                               batchop=_batchop,
                               batch_size=config.CONFIG.batch_size)

    tester[name] = Tester(name=name,
                          config=config,
                          model=model,
                          directory=ROOT_DIR,
                          loss_function=loss_,
                          accuracy_function=accuracy,
                          feed=test_feed[name],
                          predictor=predictor)

    train_feed_muxed = MultiplexedDataFeed(name, train_feed, _batchop,
                                           config.CONFIG.batch_size)
    trainer = MultiplexedTrainer(
        name=name,
        config=config,
        model=model,
        directory=ROOT_DIR,
        optimizer=optim.Adam(model.parameters()),
        loss_function=loss_,
        testers=tester,
        checkpoint=config.CONFIG.CHECKPOINT,
        epochs=config.CONFIG.EPOCHS,
        feed=train_feed_muxed,
    )

    for e in range(config.CONFIG.EONS):

        if not trainer.train():
            raise Exception

        dump = open('{}/results/eon_{}.csv'.format(ROOT_DIR, e), 'w')
        log.info('on {}th eon'.format(e))
        results = ListTable()
        for ri in tqdm(range(predictor_feed.num_batch),
                       desc='\nrunning prediction on eon: {}'.format(e)):
            output, _results = predictor.predict(ri)
            results.extend(_results)
        dump.write(repr(results))
        dump.close()
示例#8
0
        dataset = load_data(config, char_level=False)
        pickle.dump(dataset, open('{}__cache.pkl'.format(SELF_NAME), 'wb'))
    else:
        dataset = pickle.load(open('{}__cache.pkl'.format(SELF_NAME), 'rb'))

    log.info('dataset size: {}'.format(len(dataset.trainset)))
    log.info('dataset[:10]: {}'.format(pformat(random.choice(
        dataset.trainset))))

    #log.info('vocab: {}'.format(pformat(dataset.output_vocab.freq_dict)))
    ########################################################################################
    # load model snapshot data
    ########################################################################################
    _batchop = partial(batchop, VOCAB=dataset.input_vocab, config=config)
    predictor_feed = DataFeed(SELF_NAME,
                              dataset.testset,
                              batchop=_batchop,
                              batch_size=1)

    train_feed = DataFeed(SELF_NAME,
                          portion(dataset.trainset,
                                  config.HPCONFIG.trainset_size),
                          batchop=_batchop,
                          batch_size=config.CONFIG.batch_size)

    loss_ = partial(loss, loss_function=nn.NLLLoss())
    test_feed = DataFeed(SELF_NAME,
                         dataset.testset,
                         batchop=_batchop,
                         batch_size=config.CONFIG.batch_size)
    model = LM(
        config,
示例#9
0
def train(config, argv, name, ROOT_DIR, model, dataset):
    _batchop = partial(batchop,
                       VOCAB=dataset.input_vocab,
                       LABELS=dataset.output_vocab,
                       config=config)
    predictor_feed = DataFeed(name,
                              dataset.testset,
                              batchop=_batchop,
                              batch_size=1)
    train_feed = DataFeed(name,
                          portion(dataset.trainset,
                                  config.HPCONFIG.trainset_size),
                          batchop=_batchop,
                          batch_size=config.CONFIG.batch_size)

    predictor = Predictor(name,
                          model=model,
                          directory=ROOT_DIR,
                          feed=predictor_feed,
                          repr_function=partial(repr_function,
                                                VOCAB=dataset.input_vocab,
                                                LABELS=dataset.output_vocab,
                                                dataset=dataset.testset_dict))

    loss_ = partial(loss, loss_function=nn.NLLLoss())
    test_feed, tester = {}, {}
    for subset in dataset.datasets:
        test_feed[subset.name] = DataFeed(subset.name,
                                          subset.testset,
                                          batchop=_batchop,
                                          batch_size=config.CONFIG.batch_size)

        tester[subset.name] = Tester(name=subset.name,
                                     config=config,
                                     model=model,
                                     directory=ROOT_DIR,
                                     loss_function=loss_,
                                     accuracy_function=accuracy,
                                     feed=test_feed[subset.name],
                                     save_model_weights=False)

    test_feed[name] = DataFeed(name,
                               dataset.testset,
                               batchop=_batchop,
                               batch_size=config.CONFIG.batch_size)

    tester[name] = Tester(name=name,
                          config=config,
                          model=model,
                          directory=ROOT_DIR,
                          loss_function=loss_,
                          accuracy_function=accuracy,
                          feed=test_feed[name],
                          predictor=predictor)

    def do_every_checkpoint(epoch):
        if config.CONFIG.plot_metrics:
            from matplotlib import pyplot as plt
            fig = plt.figure(figsize=(10, 5))

        for t in tester.values():
            t.do_every_checkpoint(epoch)

            if config.CONFIG.plot_metrics:
                plt.plot(list(t.accuracy), label=t.name)

        if config.CONFIG.plot_metrics:
            plt.savefig('accuracy.png')
            plt.close()

    trainer = Trainer(
        name=name,
        config=config,
        model=model,
        directory=ROOT_DIR,
        optimizer=optim.Adam(model.parameters()),
        loss_function=loss_,
        checkpoint=config.CONFIG.CHECKPOINT,
        do_every_checkpoint=do_every_checkpoint,
        epochs=config.CONFIG.EPOCHS,
        feed=train_feed,
    )

    for e in range(config.CONFIG.EONS):

        if not trainer.train():
            raise Exception
        """
示例#10
0
def experiment(VOCAB,
               raw_samples,
               datapoints=[[], []],
               eons=1000,
               epochs=10,
               checkpoint=5):
    try:
        try:
            model = DCN(Config(), 'model', len(VOCAB))
            if Config().cuda: model = model.cuda()
            model.load_state_dict(torch.load('{}.{}'.format(SELF_NAME, 'pth')))
            log.info('loaded the old image for the model')
        except:
            log.exception('failed to load the model')
            model = DCN(Config(), 'model', len(VOCAB))
            if Config().cuda: model = model.cuda()

        print('**** the model', model)

        name = os.path.basename(__file__).replace('.py', '')

        _batchop = partial(batchop, WORD2INDEX=VOCAB)
        train_feed = DataFeed(name,
                              datapoints[0],
                              batchop=_batchop,
                              batch_size=16)
        test_feed = DataFeed(name,
                             datapoints[1],
                             batchop=_batchop,
                             batch_size=16)
        predictor_feed = DataFeed(name,
                                  datapoints[1],
                                  batchop=_batchop,
                                  batch_size=16)

        loss_weight = Variable(torch.Tensor([0.1, 1, 1]))
        if Config.cuda: loss_weight = loss_weight.cuda()
        _loss = partial(loss, loss_function=nn.NLLLoss())
        trainer = Trainer(name=name,
                          model=model,
                          loss_function=_loss,
                          accuracy_function=accuracy,
                          f1score_function=f1score,
                          checkpoint=checkpoint,
                          epochs=epochs,
                          feeder=Feeder(train_feed, test_feed))

        _repr_function = partial(repr_function,
                                 VOCAB=VOCAB,
                                 raw_samples=raw_samples)
        predictor = Predictor(model=model,
                              feed=predictor_feed,
                              repr_function=_repr_function)

        for e in range(eons):
            dump = open('results/experiment_attn.csv', 'a')
            dump.write('#========================after eon: {}\n'.format(e))
            dump.close()
            log.info('on {}th eon'.format(e))

            with open('results/experiment_attn.csv', 'a') as dump:
                results = ListTable()
                for ri in tqdm(range(predictor_feed.num_batch // 100)):
                    output, _results = predictor.predict(ri)
                    results.extend(_results)
                dump.write(repr(results))
            log.info('on {}th eon training....'.format(e))

            if not trainer.train():
                raise Exception

    except:
        log.exception('####################')
        trainer.save_best_model()

        return locals()
示例#11
0
def experiment(config,
               ROOT_DIR,
               model,
               VOCAB,
               LABELS,
               datapoints=[[], [], []],
               eons=1000,
               epochs=20,
               checkpoint=1):
    try:
        name = SELF_NAME
        _batchop = partial(batchop, VOCAB=VOCAB, LABELS=LABELS)
        train_feed = DataFeed(name,
                              datapoints[0],
                              batchop=_batchop,
                              batch_size=config.HPCONFIG.batch_size)
        test_feed = DataFeed(name,
                             datapoints[1],
                             batchop=_batchop,
                             batch_size=config.HPCONFIG.batch_size)
        predictor_feed = DataFeed(name,
                                  datapoints[2],
                                  batchop=_batchop,
                                  batch_size=1)

        max_freq = max(LABELS.freq_dict[i] for i in LABELS.index2word)
        loss_weight = [
            1 / (LABELS.freq_dict[i] / max_freq) for i in LABELS.index2word
        ]
        print(list((l, w) for l, w in zip(LABELS.index2word, loss_weight)))
        loss_weight = Var(loss_weight)

        loss_ = partial(loss, loss_function=nn.NLLLoss(loss_weight))
        trainer = Trainer(name=name,
                          model=model,
                          optimizer=optim.SGD(
                              model.parameters(),
                              lr=config.HPCONFIG.OPTIM.lr,
                              momentum=config.HPCONFIG.OPTIM.momentum),
                          loss_function=loss_,
                          accuracy_function=waccuracy,
                          f1score_function=f1score,
                          checkpoint=checkpoint,
                          epochs=epochs,
                          directory=ROOT_DIR,
                          feeder=Feeder(train_feed, test_feed))

        predictor = Predictor(model=model.clone(),
                              feed=predictor_feed,
                              repr_function=partial(test_repr_function,
                                                    VOCAB=VOCAB,
                                                    LABELS=LABELS))

        for e in range(eons):

            if not trainer.train():
                raise Exception

            predictor.model.load_state_dict(trainer.best_model[1])

            dump = open('{}/results/eon_{}.csv'.format(ROOT_DIR, e), 'w')
            log.info('on {}th eon'.format(e))
            results = ListTable()
            for ri in tqdm(range(predictor_feed.num_batch)):
                output, _results = predictor.predict(ri)
                results.extend(_results)
            dump.write(repr(results))
            dump.close()

    except KeyboardInterrupt:
        return locals()
    except:
        log.exception('####################')
        return locals()
示例#12
0
def train(config, argv, name, ROOT_DIR,  model, dataset):
    _batchop = partial(batchop, VOCAB=dataset.input_vocab)
    predictor_feed = DataFeed(name, dataset.testset, batchop=_batchop, batch_size=1)
    train_feed     = DataFeed(name,
                              portion(dataset.trainset, config.HPCONFIG.trainset_size),
                              batchop=_batchop,
                              batch_size=config.CONFIG.batch_size)
    
    predictor = Predictor(name,
                          model=model,
                          directory=ROOT_DIR,
                          feed=predictor_feed,
                          repr_function=partial(repr_function
                                                , VOCAB=dataset.input_vocab
                                                , dataset=dataset.testset_dict))

    loss_ = partial(loss, loss_function=nn.NLLLoss())
    test_feed, tester = {}, {}
    
    def acc(*args, **kwargs):
        return -1 * loss_(*args, **kwargs)
    
    for subset in dataset.datasets:
        test_feed[subset.name]      = DataFeed(subset.name,
                                               subset.testset,
                                               batchop=_batchop,
                                               batch_size=config.CONFIG.batch_size)

        tester[subset.name] = Tester(name     = subset.name,
                                     config   = config,
                                     model    = model,
                                     directory = ROOT_DIR,
                                     loss_function = loss_,
                                     accuracy_function = acc,
                                     feed = test_feed[subset.name],
                                     save_model_weights=False)

    test_feed[name]      = DataFeed(name,
                                    dataset.testset,
                                    batchop=_batchop,
                                    batch_size=config.CONFIG.batch_size)

    tester[name] = Tester(name  = name,
                                  config   = config,
                                  model    = model,
                                  directory = ROOT_DIR,
                                  loss_function = loss_,
                                  accuracy_function = loss_,
                                  feed = test_feed[name],
                                  predictor=predictor)


    def do_every_checkpoint(epoch):
        if config.CONFIG.plot_metrics:
            from matplotlib import pyplot as plt
            fig = plt.figure(figsize=(10, 5))
            
        for t in tester.values():
            t.do_every_checkpoint(epoch)

            if config.CONFIG.plot_metrics:
                plt.plot(list(t.loss), label=t.name)

        if config.CONFIG.plot_metrics:
            plt.savefig('loss.png')
            plt.close()
        


    trainer = Trainer(name=name,
                      config = config,
                      model=model,
                      directory=ROOT_DIR,
                      optimizer  = optim.Adam(model.parameters()),
                      loss_function = loss_,
                      checkpoint = config.CONFIG.CHECKPOINT,
                      do_every_checkpoint = do_every_checkpoint,
                      epochs = config.CONFIG.EPOCHS,
                      feed = train_feed,
    )



    for e in range(config.CONFIG.EONS):

        if not trainer.train():
            raise Exception

        dump = open('{}/results/eon_{}.csv'.format(ROOT_DIR, e), 'w')
        log.info('on {}th eon'.format(e))
        results = ListTable()
        for ri in tqdm(range(predictor_feed.num_batch), desc='running prediction on eon: {}'.format(e)):
            output, _results = predictor.predict(ri)
            results.extend(_results)
        dump.write(repr(results))
        dump.close()
示例#13
0
def train(config,
          model,
          dataset,
          SELF_NAME,
          ROOT_DIR,
          batchop=batchop,
          loss=loss,
          accuracy=accuracy,
          repr_function=repr_function):
    _batchop = partial(batchop,
                       VOCAB=dataset.input_vocab,
                       LABELS=dataset.output_vocab)
    train_feed = DataFeed(SELF_NAME,
                          dataset.trainset,
                          batchop=_batchop,
                          batch_size=config.HPCONFIG.batch_size)
    predictor_feed = DataFeed(SELF_NAME,
                              dataset.testset,
                              batchop=_batchop,
                              batch_size=1)

    predictor = Predictor(SELF_NAME,
                          model=model,
                          directory=ROOT_DIR,
                          feed=predictor_feed,
                          repr_function=partial(repr_function,
                                                VOCAB=dataset.input_vocab,
                                                LABELS=dataset.output_vocab,
                                                dataset=dataset.testset_dict))

    loss_ = partial(loss, loss_function=nn.NLLLoss())
    test_feed = DataFeed(SELF_NAME,
                         dataset.testset,
                         batchop=_batchop,
                         batch_size=config.HPCONFIG.batch_size)

    tester = Tester(name=SELF_NAME,
                    config=config,
                    model=model,
                    directory=ROOT_DIR,
                    loss_function=loss_,
                    accuracy_function=accuracy,
                    feed=test_feed,
                    predictor=predictor)

    trainer = Trainer(
        name=SELF_NAME,
        config=config,
        model=model,
        directory=ROOT_DIR,
        optimizer=optim.Adam(model.parameters()),
        loss_function=loss_,
        checkpoint=config.CONFIG.CHECKPOINT,
        do_every_checkpoint=tester.do_every_checkpoint,
        epochs=config.CONFIG.EPOCHS,
        feed=train_feed,
    )

    for e in range(config.CONFIG.EONS):

        if not trainer.train():
            raise Exception

        dump = open('{}/results/eon_{}.csv'.format(ROOT_DIR, e), 'w')
        log.info('on {}th eon'.format(e))
        results = ListTable()
        for ri in tqdm(range(predictor_feed.num_batch)):
            output, _results = predictor.predict(ri)
            results.extend(_results)
        dump.write(repr(results))
        dump.close()