コード例 #1
0
ファイル: inference.py プロジェクト: ddxue/cs224u-project
def main():
    # **************************** 基础信息 ***********************
    logger = init_logger(log_name=config['model']['arch'],
                         log_dir=config['output']['log_dir'])
    logger.info(f"seed is {config['train']['seed']}")
    device = 'cuda:%d' % config['train']['n_gpu'][0] if len(
        config['train']['n_gpu']) else 'cpu'
    seed_everything(seed=config['train']['seed'], device=device)
    logger.info('starting load data from disk')
    id2label = {value: key for key, value in config['label2id'].items()}
    #**************************** 数据生成 ***********************
    DT = DataTransformer(logger=logger, seed=config['train']['seed'])

    # 读取数据集以及数据划分
    targets, sentences = DT.read_data(
        raw_data_path=config['data']['test_file_path'],
        preprocessor=EnglishPreProcessor(),
        is_train=False)
    tokenizer = BertTokenizer(
        vocab_file=config['pretrained']['bert']['vocab_path'],
        do_lower_case=config['train']['do_lower_case'])
    # train
    test_dataset = CreateDataset(data=list(zip(sentences, targets)),
                                 tokenizer=tokenizer,
                                 max_seq_len=config['train']['max_seq_len'],
                                 seed=config['train']['seed'],
                                 example_type='test')
    # 验证数据集
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=config['train']['batch_size'],
                             num_workers=config['train']['num_workers'],
                             shuffle=False,
                             drop_last=False,
                             pin_memory=False)

    # **************************** 模型 ***********************
    logger.info("initializing model")
    model = BertFine.from_pretrained(
        config['pretrained']['bert']['bert_model_dir'],
        cache_dir=config['output']['cache_dir'],
        num_classes=len(id2label))
    # **************************** training model ***********************
    logger.info('model predicting....')
    predicter = Predicter(
        model=model,
        logger=logger,
        n_gpu=config['train']['n_gpu'],
        model_path=config['output']['checkpoint_dir'] /
        f"best_{config['model']['arch']}_model.pth",
    )
    # 拟合模型
    result = predicter.predict(data=test_loader)
    print(result)

    # 释放显存
    if len(config['train']['n_gpu']) > 0:
        torch.cuda.empty_cache()
コード例 #2
0
def main():
    # **************************** Log initial data ***********************
    logger = init_logger(log_name=config['model']['arch'],
                         log_dir=config['output']['log_dir'])
    logger.info(f"seed is {config['train']['seed']}")
    device = f"cuda: {config['train']['n_gpu'][0] if len(config['train']['n_gpu']) else 'cpu'}"
    seed_everything(seed=config['train']['seed'], device=device)
    logger.info('starting load data from disk')
    id2label = {value: key for key, value in config['label2id'].items()}

    DT = DataTransformer(logger=logger, seed=config['train']['seed'])

    targets, sentences = DT.read_data(
        raw_data_path=config['data']['raw_data_path'],
        preprocessor=EnglishPreProcessor(),
        is_train=True)

    train, valid = DT.train_val_split(
        X=sentences,
        y=targets,
        save=True,
        shuffle=True,
        stratify=False,
        valid_size=config['train']['valid_size'],
        train_path=config['data']['train_file_path'],
        valid_path=config['data']['valid_file_path'])

    tokenizer = BertTokenizer(
        vocab_file=config['pretrained']['bert']['vocab_path'],
        do_lower_case=config['train']['do_lower_case'])

    # train
    train_dataset = CreateDataset(data=train,
                                  tokenizer=tokenizer,
                                  max_seq_len=config['train']['max_seq_len'],
                                  seed=config['train']['seed'],
                                  example_type='train')
    # valid
    valid_dataset = CreateDataset(data=valid,
                                  tokenizer=tokenizer,
                                  max_seq_len=config['train']['max_seq_len'],
                                  seed=config['train']['seed'],
                                  example_type='valid')
    # train loader
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=config['train']['batch_size'],
                              num_workers=config['train']['num_workers'],
                              shuffle=True,
                              drop_last=False,
                              pin_memory=False)
    # validation set loader
    valid_loader = DataLoader(dataset=valid_dataset,
                              batch_size=config['train']['batch_size'],
                              num_workers=config['train']['num_workers'],
                              shuffle=False,
                              drop_last=False,
                              pin_memory=False)

    # **************************** initialize model ***********************
    logger.info("initializing model")
    model = BertFine.from_pretrained(
        config['pretrained']['bert']['bert_model_dir'],
        cache_dir=config['output']['cache_dir'],
        num_classes=len(id2label))

    # ************************** set params *************************
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    num_train_steps = int(
        len(train_dataset.examples) / config['train']['batch_size'] /
        config['train']['gradient_accumulation_steps'] *
        config['train']['epochs'])
    # t_total: total number of training steps for the learning rate schedule
    # warmup: portion of t_total for the warmup
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=config['train']['learning_rate'],
                         warmup=config['train']['warmup_proportion'],
                         t_total=num_train_steps)

    # **************************** callbacks ***********************
    logger.info("initializing callbacks")
    # model checkpoint
    model_checkpoint = ModelCheckpoint(
        checkpoint_dir=config['output']['checkpoint_dir'],
        mode=config['callbacks']['mode'],
        monitor=config['callbacks']['monitor'],
        save_best_only=config['callbacks']['save_best_only'],
        arch=config['model']['arch'],
        logger=logger)
    # monitor
    train_monitor = TrainingMonitor(file_dir=config['output']['figure_dir'],
                                    arch=config['model']['arch'])
    # learning rate scheduler
    lr_scheduler = BertLR(optimizer=optimizer,
                          learning_rate=config['train']['learning_rate'],
                          t_total=num_train_steps,
                          warmup=config['train']['warmup_proportion'])

    # **************************** training model ***********************
    logger.info('training model....')

    train_configs = {
        'model':
        model,
        'logger':
        logger,
        'optimizer':
        optimizer,
        'resume':
        config['train']['resume'],
        'epochs':
        config['train']['epochs'],
        'n_gpu':
        config['train']['n_gpu'],
        'gradient_accumulation_steps':
        config['train']['gradient_accumulation_steps'],
        'epoch_metrics': [F1Score(average='micro', task_type='binary')],
        'batch_metrics': [AccuracyThresh(thresh=0.5)],
        'criterion':
        BCEWithLogLoss(),
        'model_checkpoint':
        model_checkpoint,
        'training_monitor':
        train_monitor,
        'lr_scheduler':
        lr_scheduler,
        'early_stopping':
        None,
        'verbose':
        1
    }

    trainer = Trainer(train_configs=train_configs)
    trainer.train(train_data=train_loader, valid_data=valid_loader)
    if len(config['train']['n_gpu']) > 0:
        torch.cuda.empty_cache()
コード例 #3
0
def main():
    # **************************** Basic Info  ***********************
    logger = init_logger(log_name=config['arch'], log_dir=config['log_dir'])
    logger.info("seed is %d" % config['seed'])
    device = 'cuda:%d' % config['n_gpus'][0] if len(
        config['n_gpus']) else 'cpu'
    seed_everything(seed=config['seed'], device=device)
    logger.info('starting load data from disk')

    # split the reports
    if config['resume']:
        split_reports = SplitReports(raw_reports_dir=config['raw_reports_dir'],
                                     raw_data_path=config['raw_data_path'])
        split_reports.split()

    df = pd.read_csv(config['raw_data_path'])
    label_list = df.columns.values[2:].tolist()
    config['label_to_id'] = {label: i for i, label in enumerate(label_list)}
    config['id_to_label'] = {i: label for i, label in enumerate(label_list)}
    config['vocab_path'] = path.sep.join(
        [config['bert_model_dir'], 'vocab.txt'])

    # **************************** Data  ***********************
    data_transformer = DataTransformer(logger=logger,
                                       raw_data_path=config['raw_data_path'],
                                       label_to_id=config['label_to_id'],
                                       train_file=config['train_file_path'],
                                       valid_file=config['valid_file_path'],
                                       valid_size=config['valid_size'],
                                       seed=config['seed'],
                                       preprocess=Preprocessor(),
                                       shuffle=config['shuffle'],
                                       skip_header=True,
                                       stratify=False)
    # dataloader and pre-processing
    data_transformer.read_data()

    tokenizer = BertTokenizer(vocab_file=config['vocab_path'],
                              do_lower_case=config['do_lower_case'])

    # train
    train_dataset = CreateDataset(data_path=config['train_file_path'],
                                  tokenizer=tokenizer,
                                  max_seq_len=config['max_seq_len'],
                                  seed=config['seed'],
                                  example_type='train')
    # valid
    valid_dataset = CreateDataset(data_path=config['valid_file_path'],
                                  tokenizer=tokenizer,
                                  max_seq_len=config['max_seq_len'],
                                  seed=config['seed'],
                                  example_type='valid')
    # resume best model
    if config['resume']:
        train_loader = [0]
    else:
        train_loader = DataLoader(dataset=train_dataset,
                                  batch_size=config['batch_size'],
                                  num_workers=config['num_workers'],
                                  shuffle=True,
                                  drop_last=False,
                                  pin_memory=False)
    # valid
    valid_loader = DataLoader(dataset=valid_dataset,
                              batch_size=config['batch_size'],
                              num_workers=config['num_workers'],
                              shuffle=False,
                              drop_last=False,
                              pin_memory=False)

    # **************************** Model  ***********************
    logger.info("initializing model")
    if config['resume']:
        with open(config['lab_dir'], 'r') as f:
            config['label_to_id'] = load(f)

    model = BertFine.from_pretrained(config['bert_model_dir'],
                                     cache_dir=config['cache_dir'],
                                     num_classes=len(config['label_to_id']))

    # ************************** Optimizer  *************************
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    num_train_steps = int(
        len(train_dataset.examples) / config['batch_size'] /
        config['gradient_accumulation_steps'] * config['epochs'])
    # t_total: total number of training steps for the learning rate schedule
    # warmup: portion of t_total for the warmup
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=config['learning_rate'],
                         warmup=config['warmup_proportion'],
                         t_total=num_train_steps)

    # **************************** callbacks ***********************
    logger.info("initializing callbacks")
    # save model
    model_checkpoint = ModelCheckpoint(
        checkpoint_dir=config['checkpoint_dir'],
        mode=config['mode'],
        monitor=config['monitor'],
        save_best_only=config['save_best_only'],
        best_model_name=config['best_model_name'],
        epoch_model_name=config['epoch_model_name'],
        arch=config['arch'],
        logger=logger)
    # monitor
    train_monitor = TrainingMonitor(fig_dir=config['figure_dir'],
                                    json_dir=config['log_dir'],
                                    arch=config['arch'])

    # TensorBoard
    start_time = datetime.datetime.now().strftime('%m%d_%H%M%S')
    writer_dir = os.path.join(config['writer_dir'], config['feature-based'],
                              start_time)
    TSBoard = WriterTensorboardX(writer_dir=writer_dir,
                                 logger=logger,
                                 enable=True)
    # learning rate
    lr_scheduler = BertLr(optimizer=optimizer,
                          lr=config['learning_rate'],
                          t_total=num_train_steps,
                          warmup=config['warmup_proportion'])

    # **************************** training model ***********************
    logger.info('training model....')
    trainer = Trainer(model=model,
                      train_data=train_loader,
                      val_data=valid_loader,
                      optimizer=optimizer,
                      epochs=config['epochs'],
                      criterion=BCEWithLogLoss(),
                      logger=logger,
                      model_checkpoint=model_checkpoint,
                      training_monitor=train_monitor,
                      TSBoard=TSBoard,
                      resume=config['resume'],
                      lr_scheduler=lr_scheduler,
                      n_gpu=config['n_gpus'],
                      label_to_id=config['label_to_id'],
                      evaluate_auc=AUC(sigmoid=True),
                      evaluate_f1=F1Score(sigmoid=True),
                      incorrect=Incorrect(sigmoid=True))

    trainer.summary()
    trainer.train()

    # release cache
    if len(config['n_gpus']) > 0:
        torch.cuda.empty_cache()
コード例 #4
0
ファイル: inference.py プロジェクト: suhaschowdary/bert_humor
def main():
    # **************************** log ***********************
    logger = init_logger(log_name=config['model']['arch'], log_dir=config['output']['log_dir'])
    logger.info(f"seed is {config['train']['seed']}")
    device = 'cuda:%d' % config['train']['n_gpu'][0] if len(config['train']['n_gpu']) else 'cpu'
    seed_everything(seed=config['train']['seed'],device=device)
    logger.info('starting load data from disk')
    id2label = {value: key for key, value in config['label2id'].items()}
    #**************************** data input ***********************
    DT = DataTransformer(logger = logger,seed = config['train']['seed'])

    # read test data
    targets, sentences = DT.read_data(raw_data_path=config['data']['test_file_path'],
                                      preprocessor=EnglishPreProcessor(),
                                      is_train=False)
    tokenizer = BertTokenizer(vocab_file=config['pretrained']['bert']['vocab_path'],
                              do_lower_case=config['train']['do_lower_case'])
    # prepare test dataset
    test_dataset   = CreateDataset(data  = list(zip(sentences,targets)),
                                   tokenizer = tokenizer,
                                   max_seq_len = config['train']['max_seq_len'],
                                   seed = config['train']['seed'],
                                   example_type = 'test')
    # pytorch dataloader
    test_loader = DataLoader(dataset     = test_dataset,
                             batch_size  = config['train']['batch_size'],
                             num_workers = config['train']['num_workers'],
                             shuffle     = False,
                             drop_last   = False,
                             pin_memory  = False)

    # **************************** start model ***********************
    logger.info("initializing model")
    model = BertFine.from_pretrained(config['pretrained']['bert']['bert_model_dir'],
                                     cache_dir=config['output']['cache_dir'],
                                     num_classes = len(id2label))
    # **************************** training model ***********************
    logger.info('model predicting....')
    '''predicter = Predicter(model = model,
                         logger = logger,
                         n_gpu=config['train']['n_gpu'],
                         model_path = config['output']['checkpoint_dir'] / f"best_{config['model']['arch']}_model.pth",
                         )'''

    predicter = Predicter(model = model,
                         logger = logger,
                         n_gpu=config['train']['n_gpu'],
                         model_path = config['output']['checkpoint_dir'] / f"best_{config['model']['arch']}_model.pth"
                         )
    
    # predict results
    result = predicter.predict(data = test_loader)
    result = np.where(result > 0.5, 1, 0)  
    print('accuracy score', accuracy_score(targets, result))
    print('\nF1 score', f1_score(targets, result))
    print('\nclassification report', classification_report(targets, result))

    
    
    # empty cache after testing
    if len(config['train']['n_gpu']) > 0:
        torch.cuda.empty_cache()
コード例 #5
0
def main():
    # **************************** 基础信息 ***********************
    logger = init_logger(log_name=config['arch'], log_dir=config['log_dir'])
    logger.info("seed is %d" % config['seed'])
    device = 'cuda:%d' % config['n_gpus'][0] if len(
        config['n_gpus']) else 'cpu'
    seed_everything(seed=config['seed'], device=device)
    logger.info('starting load data from disk')
    config['id_to_label'] = {v: k for k, v in config['label_to_id'].items()}
    # **************************** 数据生成 ***********************
    data_transformer = DataTransformer(logger=logger,
                                       raw_data_path=config['raw_data_path'],
                                       label_to_id=config['label_to_id'],
                                       train_file=config['train_file_path'],
                                       valid_file=config['valid_file_path'],
                                       valid_size=config['valid_size'],
                                       seed=config['seed'],
                                       preprocess=Preprocessor(),
                                       shuffle=True,
                                       skip_header=True,
                                       stratify=False)
    # 读取数据集以及数据划分
    data_transformer.read_data()

    tokenizer = BertTokenizer(vocab_file=config['vocab_path'],
                              do_lower_case=config['do_lower_case'])

    # train
    train_dataset = CreateDataset(data_path=config['train_file_path'],
                                  tokenizer=tokenizer,
                                  max_seq_len=config['max_seq_len'],
                                  seed=config['seed'],
                                  example_type='train')
    # valid
    valid_dataset = CreateDataset(data_path=config['valid_file_path'],
                                  tokenizer=tokenizer,
                                  max_seq_len=config['max_seq_len'],
                                  seed=config['seed'],
                                  example_type='valid')
    #加载训练数据集
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=config['batch_size'],
                              num_workers=config['num_workers'],
                              shuffle=True,
                              drop_last=False,
                              pin_memory=False)
    # 验证数据集
    valid_loader = DataLoader(dataset=valid_dataset,
                              batch_size=config['batch_size'],
                              num_workers=config['num_workers'],
                              shuffle=False,
                              drop_last=False,
                              pin_memory=False)

    # **************************** 模型 ***********************
    logger.info("initializing model")
    model = BertFine.from_pretrained(config['bert_model_dir'],
                                     cache_dir=config['cache_dir'],
                                     num_classes=len(config['label_to_id']))

    # ************************** 优化器 *************************
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    num_train_steps = int(
        len(train_dataset.examples) / config['batch_size'] /
        config['gradient_accumulation_steps'] * config['epochs'])
    # t_total: total number of training steps for the learning rate schedule
    # warmup: portion of t_total for the warmup
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=config['learning_rate'],
                         warmup=config['warmup_proportion'],
                         t_total=num_train_steps)

    # **************************** callbacks ***********************
    logger.info("initializing callbacks")
    # 模型保存
    model_checkpoint = ModelCheckpoint(
        checkpoint_dir=config['checkpoint_dir'],
        mode=config['mode'],
        monitor=config['monitor'],
        save_best_only=config['save_best_only'],
        best_model_name=config['best_model_name'],
        epoch_model_name=config['epoch_model_name'],
        arch=config['arch'],
        logger=logger)
    # 监控训练过程
    train_monitor = TrainingMonitor(fig_dir=config['figure_dir'],
                                    json_dir=config['log_dir'],
                                    arch=config['arch'])
    # 学习率机制
    lr_scheduler = BertLr(optimizer=optimizer,
                          lr=config['learning_rate'],
                          t_total=num_train_steps,
                          warmup=config['warmup_proportion'])

    # **************************** training model ***********************
    logger.info('training model....')
    trainer = Trainer(model=model,
                      train_data=train_loader,
                      val_data=valid_loader,
                      optimizer=optimizer,
                      epochs=config['epochs'],
                      criterion=BCEWithLogLoss(),
                      logger=logger,
                      model_checkpoint=model_checkpoint,
                      training_monitor=train_monitor,
                      resume=config['resume'],
                      lr_scheduler=lr_scheduler,
                      n_gpu=config['n_gpus'],
                      label_to_id=config['label_to_id'],
                      evaluate=AUC(sigmoid=True))
    # 查看模型结构
    trainer.summary()
    # 拟合模型
    trainer.train()
    # 释放显存
    if len(config['n_gpus']) > 0:
        torch.cuda.empty_cache()
コード例 #6
0
ファイル: inference.py プロジェクト: PlusLabNLP/CLUSTER
def main():

    logger = init_logger(log_name=config['model']['arch'],
                         log_dir=config['output']['log_dir'])
    logger.info(f"seed is {config['train']['seed']}")
    device = 'cuda:%d' % config['train']['n_gpu'][0] if len(
        config['train']['n_gpu']) else 'cpu'
    seed_everything(seed=config['train']['seed'], device=device)
    logger.info('starting load data from disk')
    id2label = {value: key for key, value in config['label2id'].items()}

    DT = DataTransformer(logger=logger, seed=config['train']['seed'])

    targets, sentences, ids = DT.read_data(
        raw_data_path=config['data']['test_file_path'],
        preprocessor=EnglishPreProcessor(),
        is_train=False)
    tokenizer = BertTokenizer(
        vocab_file=config['pretrained']['bert']['vocab_path'],
        do_lower_case=config['train']['do_lower_case'])
    # test dataset
    test_dataset = CreateDataset(data=list(zip(sentences, targets)),
                                 tokenizer=tokenizer,
                                 max_seq_len=config['train']['max_seq_len'],
                                 seed=config['train']['seed'],
                                 example_type='test')

    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=config['train']['batch_size'],
                             num_workers=config['train']['num_workers'],
                             shuffle=False,
                             drop_last=False,
                             pin_memory=False)

    # **************************** load pretrained model from cache ***********************
    logger.info("initializing model")
    model = BertFine.from_pretrained(
        config['pretrained']['bert']['bert_model_dir'],
        cache_dir=config['output']['cache_dir'],
        num_classes=len(id2label))
    # ****************************  inference ***********************
    logger.info('model predicting....')
    predicter = Predicter(
        model=model,
        logger=logger,
        n_gpu=config['train']['n_gpu'],
        model_path=config['output']['checkpoint_dir'] /
        f"best_{config['model']['arch']}_model.pth",
    )
    # predict
    result = predicter.predict(data=test_loader)

    file = open(config['output']['inference_output_dir'], 'w')

    for index, line, score in zip(ids, sentences, result):
        file.write(str(index) + '\t' + line + '\t' + str(score[0]))
        file.write('\n')
    file.close()

    if len(config['train']['n_gpu']) > 0:
        torch.cuda.empty_cache()
コード例 #7
0
def main():
    # **************************** SETUP/READ FROM CONFIG ***********************
    logger = init_logger(log_name=config['model']['arch'],
                         log_dir=config['output']['log_dir'])
    logger.info(f"seed is {config['train']['seed']}")
    device = 'cuda:%d' % config['train']['n_gpu'][0] if len(
        config['train']['n_gpu']) else 'cpu'
    seed_everything(seed=config['train']['seed'], device=device)
    logger.info('starting load data from disk')
    id2label = {value: key for key, value in config['label2id'].items()}
    #**************************** ***********************
    DT = DataTransformer(logger=logger, seed=config['train']['seed'])

    # Preprocessing
    targets, sentences = DT.read_data(
        raw_data_path=config['data']['test_file_path'],
        preprocessor=EnglishPreProcessor(),
        is_train=False)
    tokenizer = BertTokenizer(
        vocab_file=config['pretrained']['bert']['vocab_path'],
        do_lower_case=config['train']['do_lower_case'])
    #**************************** TOKENIZING *********************************
    test_dataset = CreateDataset(data=list(zip(sentences, targets)),
                                 tokenizer=tokenizer,
                                 max_seq_len=config['train']['max_seq_len'],
                                 seed=config['train']['seed'],
                                 example_type='test')
    #*************************** DATALOADER ******************************
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=config['train']['batch_size'],
                             num_workers=config['train']['num_workers'],
                             shuffle=False,
                             drop_last=False,
                             pin_memory=False)

    # **************************** LOAD MODEL ***********************
    logger.info("initializing model")
    model = BertFine.from_pretrained(
        config['pretrained']['bert']['bert_model_dir'],
        cache_dir=config['output']['cache_dir'],
        num_classes=len(id2label))
    # **************************** RUNNING PREDICTIONS ***********************
    logger.info('model predicting....')
    predicter = Predicter(
        model=model,
        logger=logger,
        n_gpu=config['train']['n_gpu'],
        model_path=config['output']['checkpoint_dir'] /
        f"best_{config['model']['arch']}_model.pth",
    )
    # *************************OUTPUT RESULTS TO CSV*************************
    result = predicter.predict(data=test_loader)
    print(result)
    df = pd.DataFrame(result)
    cols = [
        'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'
    ]
    df.columns = cols
    print(df.head())
    df.to_csv('pybert/output/result/result.csv')

    # ******************************EMPTY GPU CACHE************************************
    if len(config['train']['n_gpu']) > 0:
        torch.cuda.empty_cache()