コード例 #1
0
ファイル: __main__.py プロジェクト: vrmpx/hedwig
                              lr=args.lr,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.lr,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)

    trainer = BertTrainer(model, optimizer, processor, args)

    if not args.trained_model:
        trainer.train()
        model = torch.load(trainer.snapshot_path)
    else:
        model = model = HierarchicalBert(args.model)
        model_ = torch.load(args, map_location=lambda storage, loc: storage)
        state = {}
        for key in model_.state_dict().keys():
            new_key = key.replace("module.", "")
            state[new_key] = model_.state_dict()[key]
        model.load_state_dict(state)
        model = model.to(device)

    evaluate_split(model, processor, args, split='dev')
コード例 #2
0
ファイル: __main__.py プロジェクト: wilkinsondi/hedwig
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = AdamW(optimizer_grouped_parameters,
                          lr=args.lr,
                          weight_decay=0.01,
                          correct_bias=False)
        scheduler = WarmupLinearSchedule(optimizer,
                                         t_total=num_train_optimization_steps,
                                         warmup_steps=args.warmup_proportion *
                                         num_train_optimization_steps)

    trainer = BertTrainer(model, optimizer, processor, scheduler, tokenizer,
                          args)

    if not args.trained_model:
        trainer.train()
        model = torch.load(trainer.snapshot_path)

    else:
        model = BertForSequenceClassification.from_pretrained(
            pretrained_model_path, num_labels=args.num_labels)
        model_ = torch.load(args.trained_model,
                            map_location=lambda storage, loc: storage)
        state = {}
        for key in model_.state_dict().keys():
            new_key = key.replace("module.", "")
            state[new_key] = model_.state_dict()[key]
        model.load_state_dict(state)
コード例 #3
0
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = AdamW(optimizer_grouped_parameters,
                          lr=args.lr,
                          weight_decay=0.01,
                          correct_bias=False)
        scheduler = WarmupLinearSchedule(optimizer,
                                         t_total=num_train_optimization_steps,
                                         warmup_steps=args.warmup_proportion *
                                         num_train_optimization_steps)

    trainer = BertTrainer(model, optimizer, processor, scheduler, tokenizer,
                          args, False, save_path)

    if not args.trained_model:
        start_time = time.time()
        trainer.train()
        model = torch.load(trainer.snapshot_path)
        elapsed_time = time.time() - start_time

    else:
        model = BertForSequenceClassification.from_pretrained(
            pretrained_model_path,
            num_labels=args.num_labels,
            prune_mask=args.prune_weight)
        model_ = torch.load(args.trained_model,
                            map_location=lambda storage, loc: storage)
        state = {}
コード例 #4
0
ファイル: util.py プロジェクト: MichaelCaohn/Prune-Testing
        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.lr,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = AdamW(optimizer_grouped_parameters,
                          lr=args.lr,
                          weight_decay=0.01,
                          correct_bias=False)
        scheduler = WarmupLinearSchedule(optimizer,
                                         t_total=num_train_optimization_steps,
                                         warmup_steps=args.warmup_proportion *
                                         num_train_optimization_steps)

    trainer = BertTrainer(pruned_model, optimizer, processor, scheduler,
                          tokenizer, args, True, save_path)
    trainer.train()
    torch.save(pruned_model, trainer.snapshot_path)

    # Retest the accuracy
    print("--- After Retraining ---")
    evaluate_split(pruned_model, processor, tokenizer, args, split='dev')
    evaluate_split(pruned_model, processor, tokenizer, args, split='test')
    util.print_nonzeros(model)
コード例 #5
0
ファイル: __main__.py プロジェクト: elisaF/hedwig
def run_main(args):
    print('Args: ', args)
    device = torch.device(
        "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
    n_gpu = torch.cuda.device_count()

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    metrics_dev_json = args.metrics_json + '_dev'
    metrics_test_json = args.metrics_json + '_test'

    dataset_map = {
        'SST-2': SST2Processor,
        'Reuters': ReutersProcessor,
        'CongressionalHearing': CongressionalHearingProcessor,
        'CongressionalHearingBinary': CongressionalHearingBinaryProcessor,
        'IMDB': IMDBProcessor,
        'AAPD': AAPDProcessor,
        'AGNews': AGNewsProcessor,
        'Yelp2014': Yelp2014Processor,
        'Sogou': SogouProcessor
    }

    model_map = {
        'bert': BertForSequenceClassification,
        'electra': ElectraForSequenceClassification,
        'xlnet': XLNetForSequenceClassification,
        'roberta': RobertaForSequenceClassification,
        'albert': AlbertForSequenceClassification
    }

    tokenizer_map = {
        'bert': BertTokenizer,
        'electra': ElectraTokenizer,
        'xlnet': XLNetTokenizer,
        'roberta': RobertaTokenizer,
        'albert': AlbertTokenizer
    }

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    if args.is_regression:
        args.num_labels = 1
        args.is_multilabel = False
    else:
        args.num_labels = dataset_map[args.dataset].NUM_CLASSES
        args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL
    args.is_hierarchical = False

    processor = dataset_map[args.dataset](args)

    if not args.trained_model:
        save_path = os.path.join(args.save_path, processor.NAME)
        os.makedirs(save_path, exist_ok=True)

    pretrained_vocab_path = args.model

    train_examples = None
    num_train_optimization_steps = None
    if not args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir)
        num_train_optimization_steps = int(
            len(train_examples) / args.batch_size /
            args.gradient_accumulation_steps) * args.epochs

    pretrained_model_path = args.model

    tokenizer = tokenizer_map[args.model_family].from_pretrained(
        pretrained_vocab_path)
    model = model_map[args.model_family].from_pretrained(
        pretrained_model_path, num_labels=args.num_labels)

    # hacky fix for error in transformers code
    # that triggers error "Assertion srcIndex < srcSelectDimSize failed"
    # https://github.com/huggingface/transformers/issues/1538#issuecomment-570260748
    if args.model_family == 'roberta' and args.use_second_input:
        model.roberta.config.type_vocab_size = 2
        single_emb = model.roberta.embeddings.token_type_embeddings
        model.roberta.embeddings.token_type_embeddings = torch.nn.Embedding(
            2, single_emb.embedding_dim)
        model.roberta.embeddings.token_type_embeddings.weight = torch.nn.Parameter(
            single_emb.weight.repeat([2, 1]))

    if args.fp16:
        model.half()
    model.to(device)

    if n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        args.weight_decay
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError("Please install NVIDIA Apex for FP16 training")

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.lr,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = AdamW(optimizer_grouped_parameters,
                          lr=args.lr,
                          correct_bias=False)
        scheduler = get_linear_schedule_with_warmup(
            optimizer,
            num_training_steps=num_train_optimization_steps,
            num_warmup_steps=args.warmup_proportion *
            num_train_optimization_steps)

    trainer = BertTrainer(model, optimizer, processor, scheduler, tokenizer,
                          args)

    if not args.trained_model:
        trainer.train()
        model = torch.load(trainer.snapshot_path)

    else:
        model = BertForSequenceClassification.from_pretrained(
            pretrained_model_path, num_labels=args.num_labels)
        model_ = torch.load(args.trained_model,
                            map_location=lambda storage, loc: storage)
        state = {}
        for key in model_.state_dict().keys():
            new_key = key.replace("module.", "")
            state[new_key] = model_.state_dict()[key]
        model.load_state_dict(state)
        model = model.to(device)

    if trainer.training_converged:
        if args.evaluate_dev:
            evaluate_split(model,
                           processor,
                           tokenizer,
                           args,
                           metrics_dev_json,
                           split='dev')
        if args.evaluate_test:
            evaluate_split(model,
                           processor,
                           tokenizer,
                           args,
                           metrics_test_json,
                           split='test')

    return trainer.training_converged
コード例 #6
0
def main():
    #Set default configuration in args.py
    args = get_args()
    dataset_map = {'AAPD': AAPDProcessor}

    output_modes = {"rte": "classification"}

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info(
        "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))
    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')
    args.device = device
    args.n_gpu = n_gpu  # 1
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES  # 12
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL  # True
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train and not args.do_eval:
        raise ValueError(
            "At least one of `do_train` or `do_eval` must be True.")

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()

    train_examples = None
    num_train_optimization_steps = None
    if args.do_train:
        train_examples = processor.get_train_examples(args.data_dir)

        num_train_optimization_steps = int(
            len(train_examples) / args.train_batch_size /
            args.gradient_accumulation_steps) * args.num_train_epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    # Prepare model
    MultiHeadedAttention1 = MultiHeadedAttention(16, 768)
    PositionwiseFeedForward1 = PositionwiseFeedForward(768, 3072)
    EncoderLayer_1 = EncoderLayer(768, MultiHeadedAttention1,
                                  PositionwiseFeedForward1, 0.1)
    Encoder1 = Encoder(EncoderLayer_1, 3)

    pretrain_model_dir = '/home/ltf/code/data/scibert_scivocab_uncased/'
    model = ClassifyModel(pretrain_model_dir,
                          num_labels=args.num_labels,
                          Encoder1=EncoderLayer_1,
                          is_lock=False)
    tokenizer = BertTokenizer.from_pretrained(pretrain_model_dir,
                                              do_lower_case=args.do_lower_case)

    if args.fp16:
        model.half()
    model.to(device)

    if n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    if not args.trained_model:
        if args.fp16:
            try:
                from apex.optimizers import FP16_Optimizer
                from apex.optimizers import FusedAdam
            except ImportError:
                raise ImportError(
                    "Please install NVIDIA Apex for FP16 training")

            optimizer = FusedAdam(optimizer_grouped_parameters,
                                  lr=args.lr,
                                  bias_correction=False,
                                  max_grad_norm=1.0)
            if args.loss_scale == 0:
                optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
            else:
                optimizer = FP16_Optimizer(optimizer,
                                           static_loss_scale=args.loss_scale)
        else:
            optimizer = AdamW(optimizer_grouped_parameters,
                              lr=args.lr,
                              weight_decay=0.01,
                              correct_bias=False)
            scheduler = WarmupLinearSchedule(
                optimizer,
                t_total=num_train_optimization_steps,
                warmup_steps=args.warmup_proportion *
                num_train_optimization_steps)

        trainer = BertTrainer(model, optimizer, processor, scheduler,
                              tokenizer, args)
        trainer.train()
        model = torch.load(trainer.snapshot_path)
    else:
        model = BertForSequenceClassification.from_pretrained(
            pretrain_model_dir, num_labels=args.num_labels)
        model_ = torch.load(args.trained_model,
                            map_location=lambda storage, loc: storage)
        state = {}
        for key in model_.state_dict().keys():
            new_key = key.replace("module.", "")
            state[new_key] = model_.state_dict()[key]
        model.load_state_dict(state)
        model = model.to(device)

    evaluate_split(model, processor, tokenizer, args, split='dev')
    evaluate_split(model, processor, tokenizer, args, split='test')
コード例 #7
0
def do_main():
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    dataset_map = {
        'SST-2': SST2Processor,
        'Reuters': ReutersProcessor,
        'IMDB': IMDBProcessor,
        'AAPD': AAPDProcessor,
        'AGNews': AGNewsProcessor,
        'Yelp2014': Yelp2014Processor,
        'Sogou': SogouProcessor,
        'Personality': PersonalityProcessor,
        'News_art': News_artProcessor,
        'News': News_Processor,
        'UCI_yelp': UCI_yelpProcessor,
        'Procon': ProconProcessor,
        'Style': StyleProcessor,
        'ProconDual': ProconDualProcessor,
        'Pan15': Pan15_Processor,
        'Pan14E': Pan14E_Processor,
        'Pan14N': Pan14N_Processor,
        'Perspectrum': PerspectrumProcessor
    }

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = False
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    train_examples = None
    num_train_optimization_steps = None
    if not args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir,
                                                      args.train_name)
        num_train_optimization_steps = int(
            len(train_examples) / args.batch_size /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    cache_dir = args.cache_dir if args.cache_dir else os.path.join(
        str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(
            args.local_rank))
    model = BertForSequenceClassification.from_pretrained(
        args.model, cache_dir=cache_dir, num_labels=args.num_labels)

    if args.fp16:
        model.half()
    model.to(device)

    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Install NVIDIA Apex to use distributed and FP16 training.")
        model = DDP(model)
    '''elif n_gpu > 1: changed by marjan

        model = torch.nn.DataParallel(model)'''

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install NVIDIA Apex for distributed and FP16 training")

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.lr,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.lr,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)

    trainer = BertTrainer(model, optimizer, processor, args)

    if not args.trained_model:
        trainer.train()
        model = torch.load(trainer.snapshot_path)
    else:
        model = BertForSequenceClassification.from_pretrained(
            args.model, num_labels=args.num_labels)
        model_ = torch.load(args.trained_model,
                            map_location=lambda storage, loc: storage)
        state = {}
        for key in model_.state_dict().keys():
            new_key = key.replace("module.", "")
            state[new_key] = model_.state_dict()[key]
        model.load_state_dict(state)
        model = model.to(device)

    evaluate_split(model, processor, args, split=args.dev_name)
    evaluate_split(model, processor, args, split=args.test_name)
コード例 #8
0
def do_main():
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    dataset_map = {'News_art': News_artProcessor, 'News': News_Processor}

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = False
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    train_examples = None
    num_train_optimization_steps = None
    if args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir,
                                                      args.train_name)
        num_train_optimization_steps = int(
            math.ceil(len(train_examples) / args.batch_size) /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    cache_dir = args.cache_dir if args.cache_dir else os.path.join(
        str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(
            args.local_rank))
    model = BertForSequenceClassification.from_pretrained(
        args.model, num_labels=2)  # creating news model!
    #model = BertForSequenceClassification.from_pretrained(args.model, cache_dir=cache_dir, num_labels=args.num_labels)

    if args.fp16:
        model.half()
    model.to(device)

    #model = BertForSequenceClassification.from_pretrained(args.model, num_labels=args.num_labels)
    model_ = torch.load(
        args.trained_model,
        map_location=lambda storage, loc: storage)  # load personality model
    state = {}
    for key in model_.state_dict().keys():
        new_key = key.replace("module.", "")
        state[new_key] = model_.state_dict()[key]

    del state['classifier.weight']  # removing  personality classifier!
    del state['classifier.bias']
    model.load_state_dict(state, strict=False)
    model = model.to(device)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    print('t_total :', num_train_optimization_steps)
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=args.lr,
                         warmup=args.warmup_proportion,
                         t_total=num_train_optimization_steps)
    args.freez_bert = False
    trainer = BertTrainer(model, optimizer, processor, args)

    trainer.train()
    model = torch.load(trainer.snapshot_path)

    evaluate_split(model, processor, args, split=args.dev_name)
    evaluate_split(model, processor, args, split=args.test_name)