Пример #1
0
class Trainer(object):
    TRAIN_DATA_PATH = '{}/train/train_data'.format(DATASET_PATH)

    def __init__(self, hdfs_host: str = None):
        self.device = config.device
        self.task = HateSpeech(self.TRAIN_DATA_PATH, [9, 1])
        self.model = BaseLine(config.hidden_dim, config.filter_size,
                              config.dropout,
                              self.task.max_vocab_indexes['syllable_contents'],
                              config.embedding_dim,
                              config.padding)  ## 저장모델도 같게 해줘~
        ## Baseline : self.model = BaseLine(256, 3, 0.2, self.task.max_vocab_indexes['syllable_contents'], 384)
        #         print('can use gpu num = ',torch.cuda.device_count())
        if torch.cuda.device_count() > 1:
            self.model = nn.DataParallel(self.model)
        self.model.to(config.device)
        self.loss_fn = nn.BCEWithLogitsLoss(
            torch.tensor(config.weights[0] / config.weights[1]))
        self.batch_size = config.batch_size
        self.__test_iter = None
        bind_model(self.model)

    @property
    def test_iter(self) -> Iterator:
        if self.__test_iter:
            self.__test_iter.init_epoch()
            return self.__test_iter
        else:
            self.__test_iter = Iterator(
                self.task.datasets[-1],
                batch_size=self.batch_size,
                repeat=False,
                sort_key=lambda x: len(x.syllable_contents),
                train=False,
                device=self.device)
            return self.__test_iter

    def train(self):
        max_epoch = 50
        optimizer = optim.Adam(self.model.parameters(),
                               lr=config.learning_rate)
        total_len = len(self.task.datasets[0])
        ds_iter = Iterator(self.task.datasets[0],
                           batch_size=self.batch_size,
                           repeat=False,
                           sort_key=lambda x: len(x.syllable_contents),
                           train=True,
                           device=self.device)
        min_iters = 10
        max_acc = 0
        max_acc_epoch = 0
        for epoch in range(max_epoch):
            loss_sum, acc_sum, len_batch_sum = 0., 0., 0.
            ds_iter.init_epoch()
            tr_total = math.ceil(total_len / self.batch_size)

            self.model.train()
            for batch in ds_iter:
                self.model.zero_grad()
                data = batch.syllable_contents.cuda()
                target = batch.eval_reply.reshape(len(batch.eval_reply),
                                                  1).cuda()
                pred = self.model(data).cuda()
                loss = self.loss_fn(pred, target)

                loss.backward()
                optimizer.step()

                acc = torch.sum(pred.sigmoid().round() == target,
                                dtype=torch.float32)

                len_batch = len(batch)
                len_batch_sum += len_batch
                acc_sum += acc.tolist()
                loss_sum += loss.tolist() * len_batch

            pred_lst, loss_avg, acc_sum, te_total = self.eval(
                self.test_iter, len(self.task.datasets[1]))
            acc_current = acc_sum / te_total
            if acc_current > max_acc:
                max_acc = acc_current
                max_acc_epoch = epoch
            nsml.save(epoch)
        print(f'max accuracy = {max_acc} when epoch {max_acc_epoch}')

    def eval(self, iter: Iterator,
             total: int) -> (List[float], float, List[float], int):
        pred_lst = list()
        target_lst = list()
        loss_sum = 0.
        acc_sum = 0.

        self.model.eval()
        for batch in iter:
            data = batch.syllable_contents.cuda()
            target = batch.eval_reply.reshape(len(batch.eval_reply), 1).cuda()

            pred = self.model(data).cuda()

            accs = torch.sum(pred.sigmoid().round() == target,
                             dtype=torch.float32)
            losses = self.loss_fn(pred, target)

            pred_lst += pred.sigmoid().round().squeeze().tolist()
            acc_sum += accs.tolist()
            target_lst += batch.eval_reply.tolist()
            loss_sum += losses.tolist() * len(batch)
        return pred_lst, loss_sum / total, acc_sum, total

    def save_model(self, model, appendix=None):
        file_name = 'model'
        if appendix:
            file_name += '_{}'.format(appendix)
        torch.save({
            'model': model,
            'task': type(self.task).__name__
        }, file_name)
Пример #2
0
class Trainer(object):
    TRAIN_DATA_PATH = '{}/train/train_data'.format(DATASET_PATH)

    def __init__(self, hdfs_host: str = None, device: str = 'cpu'):
        self.device = device
        self.task = HateSpeech(self.TRAIN_DATA_PATH, (9, 1))
        self.model = BaseLine(256, 3, 0.2,
                              self.task.max_vocab_indexes['syllable_contents'],
                              384)
        self.model.to(self.device)
        self.loss_fn = nn.BCELoss()
        self.batch_size = 128
        self.__test_iter = None
        bind_model(self.model)

    @property
    def test_iter(self) -> Iterator:
        if self.__test_iter:
            self.__test_iter.init_epoch()
            return self.__test_iter
        else:
            self.__test_iter = Iterator(
                self.task.datasets[1],
                batch_size=self.batch_size,
                repeat=False,
                sort_key=lambda x: len(x.syllable_contents),
                train=False,
                device=self.device)
            return self.__test_iter

    def train(self):
        max_epoch = 32
        optimizer = optim.Adam(self.model.parameters())
        total_len = len(self.task.datasets[0])
        ds_iter = Iterator(self.task.datasets[0],
                           batch_size=self.batch_size,
                           repeat=False,
                           sort_key=lambda x: len(x.syllable_contents),
                           train=True,
                           device=self.device)
        min_iters = 10
        for epoch in range(max_epoch):
            loss_sum, acc_sum, len_batch_sum = 0., 0., 0.
            ds_iter.init_epoch()
            tr_total = math.ceil(total_len / self.batch_size)
            tq_iter = tqdm(
                enumerate(ds_iter),
                total=tr_total,
                miniters=min_iters,
                unit_scale=self.batch_size,
                bar_format=
                '{n_fmt}/{total_fmt} [{elapsed}<{remaining} {rate_fmt}] {desc}'
            )

            self.model.train()
            for i, batch in tq_iter:
                self.model.zero_grad()
                pred = self.model(batch.syllable_contents)
                acc = torch.sum((torch.reshape(pred, [-1]) >
                                 0.5) == (batch.eval_reply > 0.5),
                                dtype=torch.float32)
                loss = self.loss_fn(pred, batch.eval_reply)
                loss.backward()
                optimizer.step()

                len_batch = len(batch)
                len_batch_sum += len_batch
                acc_sum += acc.tolist()
                loss_sum += loss.tolist() * len_batch
                if i % min_iters == 0:
                    tq_iter.set_description(
                        '{:2} loss: {:.5}, acc: {:.5}'.format(
                            epoch, loss_sum / len_batch_sum,
                            acc_sum / len_batch_sum), True)
                if i == 3000:
                    break

            tq_iter.set_description(
                '{:2} loss: {:.5}, acc: {:.5}'.format(epoch,
                                                      loss_sum / total_len,
                                                      acc_sum / total_len),
                True)

            print(
                json.dumps({
                    'type': 'train',
                    'dataset': 'hate_speech',
                    'epoch': epoch,
                    'loss': loss_sum / total_len,
                    'acc': acc_sum / total_len
                }))

            pred_lst, loss_avg, acc_lst, te_total = self.eval(
                self.test_iter, len(self.task.datasets[1]))
            print(
                json.dumps({
                    'type': 'test',
                    'dataset': 'hate_speech',
                    'epoch': epoch,
                    'loss': loss_avg,
                    'acc': sum(acc_lst) / te_total
                }))
            nsml.save(epoch)
            self.save_model(self.model, 'e{}'.format(epoch))

    def eval(self, iter: Iterator,
             total: int) -> (List[float], float, List[float], int):
        tq_iter = tqdm(enumerate(iter),
                       total=math.ceil(total / self.batch_size),
                       unit_scale=self.batch_size,
                       bar_format='{r_bar}')
        pred_lst = list()
        loss_sum = 0.
        acc_lst = list()

        self.model.eval()
        for i, batch in tq_iter:
            preds = self.model(batch.syllable_contents)
            accs = torch.eq(preds > 0.5,
                            batch.eval_reply > 0.5).to(torch.float)
            losses = self.loss_fn(preds, batch.eval_reply)
            pred_lst += preds.tolist()
            acc_lst += accs.tolist()
            loss_sum += losses.tolist() * len(batch)
        return pred_lst, loss_sum / total, acc_lst, total

    def save_model(self, model, appendix=None):
        file_name = 'model'
        if appendix:
            file_name += '_{}'.format(appendix)
        torch.save({
            'model': model,
            'task': type(self.task).__name__
        }, file_name)