Beispiel #1
0
def eval_model(model,
               data,
               metric_meta,
               use_cuda=True,
               with_label=True,
               label_mapper=None):
    if use_cuda:
        model.cuda()
    predictions = []
    golds = []
    scores = []
    ids = []
    metrics = {}
    for batch_info, batch_data in data:
        batch_info, batch_data = Collater.patch_data(use_cuda, batch_info,
                                                     batch_data)
        score, pred, gold = model.predict(batch_info, batch_data)
        predictions.extend(pred)
        golds.extend(gold)
        scores.extend(score)
        ids.extend(batch_info['uids'])
    if with_label:
        metrics = calc_metrics(metric_meta, golds, predictions, scores,
                               label_mapper)
    return metrics, predictions, scores, golds, ids
Beispiel #2
0
def eval_model(model,
               data,
               metric_meta,
               use_cuda=True,
               with_label=True,
               label_mapper=None,
               task_type=TaskType.Classification):
    if use_cuda:
        model.cuda()
    predictions = []
    golds = []
    scores = []
    ids = []
    metrics = {}
    for idx, (batch_info, batch_data) in enumerate(data):
        # if idx % 100 == 0:
        #     print("predicting {}".format(idx))
        batch_info, batch_data = Collater.patch_data(use_cuda, batch_info,
                                                     batch_data)
        score, pred, gold = model.predict(batch_info, batch_data)
        predictions.extend(pred)
        golds.extend(gold)
        scores.extend(score)
        ids.extend(batch_info['uids'])

    if task_type == TaskType.Span:
        from experiments.squad import squad_utils
        golds = squad_utils.merge_answers(ids, golds)
        predictions, scores = squad_utils.select_answers(
            ids, predictions, scores)
    if with_label:
        metrics = calc_metrics(metric_meta, golds, predictions, scores,
                               label_mapper)
    return metrics, predictions, scores, golds, ids
Beispiel #3
0
def eval_model(model,
               data,
               metric_meta,
               device,
               with_label=True,
               label_mapper=None,
               task_type=TaskType.Classification):
    predictions = []
    golds = []
    scores = []
    ids = []
    metrics = {}
    for (batch_info, batch_data) in data:
        batch_info, batch_data = Collater.patch_data(device, batch_info,
                                                     batch_data)
        score, pred, gold = model.predict(batch_info, batch_data)
        predictions.extend(pred)
        golds.extend(gold)
        scores.extend(score)
        ids.extend(batch_info['uids'])

    if task_type == TaskType.Span:
        from experiments.squad import squad_utils
        golds = squad_utils.merge_answers(ids, golds)
        predictions, scores = squad_utils.select_answers(
            ids, predictions, scores)
    if with_label:
        metrics = calc_metrics(metric_meta, golds, predictions, scores,
                               label_mapper)
    return metrics, predictions, scores, golds, ids
Beispiel #4
0
def eval_model(
    model,
    data,
    metric_meta,
    device,
    with_label=True,
    label_mapper=None,
    task_type=TaskType.Classification,
):
    predictions = []
    golds = []
    scores = []
    ids = []
    metrics = {}
    for (batch_info, batch_data) in tqdm(data, total=len(data)):
        batch_info, batch_data = Collater.patch_data(device, batch_info,
                                                     batch_data)
        score, pred, gold = model.predict(batch_info, batch_data)
        scores = merge(score, scores)
        golds = merge(gold, golds)
        predictions = merge(pred, predictions)
        ids = merge(batch_info["uids"], ids)

    if task_type == TaskType.Span:
        predictions, golds = postprocess_qa_predictions(
            golds, scores, version_2_with_negative=False)
    elif task_type == TaskType.SpanYN:
        predictions, golds = postprocess_qa_predictions(
            golds, scores, version_2_with_negative=True)

    if with_label:
        metrics = calc_metrics(metric_meta, golds, predictions, scores,
                               label_mapper)
    return metrics, predictions, scores, golds, ids
Beispiel #5
0
def main():
    parser = argparse.ArgumentParser()
    model_config(parser)
    set_config(parser)
    train_config(parser)
    args = parser.parse_args()
    layer_indexes = [int(x) for x in args.layers.split(",")]
    set_environment(args.seed)
    # process data
    data, is_single_sentence = process_data(args)
    data_type = DataFormat.PremiseOnly if is_single_sentence else DataFormat.PremiseAndOneHypothesis
    collater = Collater(gpu=args.cuda, is_train=False, data_type=data_type)
    batcher = DataLoader(data, batch_size=args.batch_size, collate_fn=collater.collate_fn, pin_memory=args.cuda)
    opt = vars(args)
    # load model
    if os.path.exists(args.checkpoint):
        state_dict = torch.load(args.checkpoint)
        config = state_dict['config']
        config['dump_feature'] = True
        opt.update(config)
    else:
        logger.error('#' * 20)
        logger.error(
            'Could not find the init model!\n The parameters will be initialized randomly!')
        logger.error('#' * 20)
        return
    num_all_batches = len(batcher)
    model = MTDNNModel(
        opt,
        state_dict=state_dict,
        num_train_step=num_all_batches)
    if args.cuda:
        model.cuda()

    features_dict = {}
    for batch_meta, batch_data in batcher:
        batch_meta, batch_data = Collater.patch_data(args.cuda, batch_meta, batch_data)
        all_encoder_layers, _ = model.extract(batch_meta, batch_data)
        embeddings = [all_encoder_layers[idx].detach().cpu().numpy()
                      for idx in layer_indexes]
        uids = batch_meta['uids']
        masks = batch_data[batch_meta['mask']].detach().cpu().numpy().tolist()
        for idx, uid in enumerate(uids):
            slen = sum(masks[idx])
            features = {}
            for yidx, layer in enumerate(layer_indexes):
                features[layer] = str(embeddings[yidx][idx][:slen].tolist())
            features_dict[uid] = features

    # save features
    with open(args.foutput, 'w', encoding='utf-8') as writer:
        for sample in data:
            uid = sample['uid']
            tokens = sample['tokens']
            feature = features_dict[uid]
            feature['tokens'] = tokens
            feature['uid'] = uid
            writer.write('{}\n'.format(json.dumps(feature)))
Beispiel #6
0
def extract_encoding(model, data, use_cuda=True):
    if use_cuda:
        model.cuda()
    sequence_outputs = []
    max_seq_len = 0
    for idx, (batch_info, batch_data) in enumerate(data):
        batch_info, batch_data = Collater.patch_data(use_cuda, batch_info, batch_data)
        sequence_output = model.encode(batch_info, batch_data)
        sequence_outputs.append(sequence_output)
        max_seq_len = max(max_seq_len, sequence_output.shape[1])
    
    new_sequence_outputs = []
    for sequence_output in sequence_outputs:
        new_sequence_output = torch.zeros(sequence_output.shape[0], max_seq_len, sequence_output.shape[2])
        new_sequence_output[:, :sequence_output.shape[1], :] = sequence_output
        new_sequence_outputs.append(new_sequence_output)

    return torch.cat(new_sequence_outputs)
Beispiel #7
0
def eval_model(model,
               data,
               metric_meta,
               device,
               with_label=True,
               label_mapper=None,
               task_type=TaskType.Classification):
    predictions = []
    golds = []
    scores = []
    ids = []
    metrics = {}
    print("****device={}".format(device))
    for (batch_info, batch_data) in data:
        batch_info, batch_data = Collater.patch_data(device, batch_info,
                                                     batch_data)
        score, pred, gold = model.predict(batch_info, batch_data)
        predictions.extend(pred)
        golds.extend(gold)
        scores.extend(score)
        ids.extend(batch_info['uids'])

    if task_type == TaskType.Span:
        from experiments.squad import squad_utils
        golds = squad_utils.merge_answers(ids, golds)
        predictions, scores = squad_utils.select_answers(
            ids, predictions, scores)
    if with_label:
        metrics = calc_metrics(metric_meta, golds, predictions, scores,
                               label_mapper)
    for i in range(min(len(ids), 10)):
        print("{}\t{}\t{}\t{}\n".format(ids[i], predictions[i], scores[2 * i],
                                        scores[2 * i + 1]))

    #print("score heads={}".format(scores[:10]))
    return metrics, predictions, scores, golds, ids
Beispiel #8
0
def main():
    logger.info('Launching the MT-DNN training')
    opt = vars(args)
    # update data dir
    opt['data_dir'] = data_dir
    batch_size = args.batch_size

    tasks = {}
    task_def_list = []
    dropout_list = []

    train_datasets = []
    for dataset in args.train_datasets:
        prefix = dataset.split('_')[0]
        if prefix in tasks:
            continue
        task_id = len(tasks)
        tasks[prefix] = task_id
        task_def = task_defs.get_task_def(prefix)
        task_def_list.append(task_def)

        train_path = os.path.join(data_dir, '{}_train.json'.format(dataset))
        logger.info('Loading {} as task {}'.format(train_path, task_id))
        train_data_set = SingleTaskDataset(train_path,
                                           True,
                                           maxlen=args.max_seq_len,
                                           task_id=task_id,
                                           task_def=task_def)
        train_datasets.append(train_data_set)
    train_collater = Collater(dropout_w=args.dropout_w,
                              encoder_type=encoder_type,
                              soft_label=args.mkd_opt > 0)
    multi_task_train_dataset = MultiTaskDataset(train_datasets)
    multi_task_batch_sampler = MultiTaskBatchSampler(train_datasets,
                                                     args.batch_size,
                                                     args.mix_opt, args.ratio)
    multi_task_train_data = DataLoader(multi_task_train_dataset,
                                       batch_sampler=multi_task_batch_sampler,
                                       collate_fn=train_collater.collate_fn,
                                       pin_memory=args.cuda)

    opt['task_def_list'] = task_def_list

    dev_data_list = []
    test_data_list = []
    test_collater = Collater(is_train=False, encoder_type=encoder_type)
    for dataset in args.test_datasets:
        prefix = dataset.split('_')[0]
        task_def = task_defs.get_task_def(prefix)
        task_id = tasks[prefix]
        task_type = task_def.task_type
        data_type = task_def.data_type

        dev_path = os.path.join(data_dir, '{}_dev.json'.format(dataset))
        dev_data = None
        if os.path.exists(dev_path):
            dev_data_set = SingleTaskDataset(dev_path,
                                             False,
                                             maxlen=args.max_seq_len,
                                             task_id=task_id,
                                             task_def=task_def)
            dev_data = DataLoader(dev_data_set,
                                  batch_size=args.batch_size_eval,
                                  collate_fn=test_collater.collate_fn,
                                  pin_memory=args.cuda)
        dev_data_list.append(dev_data)

        test_path = os.path.join(data_dir, '{}_test.json'.format(dataset))
        test_data = None
        if os.path.exists(test_path):
            test_data_set = SingleTaskDataset(test_path,
                                              False,
                                              maxlen=args.max_seq_len,
                                              task_id=task_id,
                                              task_def=task_def)
            test_data = DataLoader(test_data_set,
                                   batch_size=args.batch_size_eval,
                                   collate_fn=test_collater.collate_fn,
                                   pin_memory=args.cuda)
        test_data_list.append(test_data)

    logger.info('#' * 20)
    logger.info(opt)
    logger.info('#' * 20)

    # div number of grad accumulation.
    num_all_batches = args.epochs * len(
        multi_task_train_data) // args.grad_accumulation_step
    logger.info('############# Gradient Accumulation Info #############')
    logger.info('number of step: {}'.format(args.epochs *
                                            len(multi_task_train_data)))
    logger.info('number of grad grad_accumulation step: {}'.format(
        args.grad_accumulation_step))
    logger.info('adjusted number of step: {}'.format(num_all_batches))
    logger.info('############# Gradient Accumulation Info #############')

    init_model = args.init_checkpoint
    state_dict = None

    if os.path.exists(init_model):
        state_dict = torch.load(init_model)
        config = state_dict['config']
    else:
        if opt['encoder_type'] not in EncoderModelType._value2member_map_:
            raise ValueError("encoder_type is out of pre-defined types")
        literal_encoder_type = EncoderModelType(
            opt['encoder_type']).name.lower()
        config_class, model_class, tokenizer_class = MODEL_CLASSES[
            literal_encoder_type]
        config = config_class.from_pretrained(
            init_model, output_hidden_states=True).to_dict(
            )  # change here to enable multi-layer output

    config['output_hidden_states'] = True
    config['attention_probs_dropout_prob'] = args.bert_dropout_p
    config['hidden_dropout_prob'] = args.bert_dropout_p
    config['multi_gpu_on'] = opt["multi_gpu_on"]
    if args.num_hidden_layers != -1:
        config['num_hidden_layers'] = args.num_hidden_layers
    opt.update(config)

    model = MTDNNModel(opt,
                       state_dict=state_dict,
                       num_train_step=num_all_batches)
    if args.resume and args.model_ckpt:
        logger.info('loading model from {}'.format(args.model_ckpt))
        model.load(args.model_ckpt)

    #### model meta str
    headline = '############# Model Arch of MT-DNN #############'
    ### print network
    logger.info('\n{}\n{}\n'.format(headline, model.network))

    # dump config
    config_file = os.path.join(output_dir, 'config.json')
    with open(config_file, 'w', encoding='utf-8') as writer:
        writer.write('{}\n'.format(json.dumps(opt)))
        writer.write('\n{}\n{}\n'.format(headline, model.network))

    logger.info("Total number of params: {}".format(model.total_param))

    # tensorboard
    if args.tensorboard:
        args.tensorboard_logdir = os.path.join(args.output_dir,
                                               args.tensorboard_logdir)
        tensorboard = SummaryWriter(log_dir=args.tensorboard_logdir)

    if args.encode_mode:
        for idx, dataset in enumerate(args.test_datasets):
            prefix = dataset.split('_')[0]
            test_data = test_data_list[idx]
            with torch.no_grad():
                encoding = extract_encoding(model,
                                            test_data,
                                            use_cuda=args.cuda)
            torch.save(
                encoding,
                os.path.join(output_dir, '{}_encoding.pt'.format(dataset)))
        return

    for epoch in range(0, args.epochs):
        logger.warning('At epoch {}'.format(epoch))
        start = datetime.now()

        for i, (batch_meta, batch_data) in enumerate(multi_task_train_data):
            batch_meta, batch_data = Collater.patch_data(
                args.cuda, batch_meta, batch_data)
            task_id = batch_meta['task_id']
            model.update(batch_meta, batch_data)
            if (model.local_updates) % (args.log_per_updates *
                                        args.grad_accumulation_step
                                        ) == 0 or model.local_updates == 1:
                ramaining_time = str(
                    (datetime.now() - start) / (i + 1) *
                    (len(multi_task_train_data) - i - 1)).split('.')[0]
                logger.info(
                    'Task [{0:2}] updates[{1:6}] train loss[{2:.5f}] remaining[{3}]'
                    .format(task_id, model.updates, model.train_loss.avg,
                            ramaining_time))
                if args.tensorboard:
                    tensorboard.add_scalar('train/loss',
                                           model.train_loss.avg,
                                           global_step=model.updates)

            if args.save_per_updates_on and (
                (model.local_updates) %
                (args.save_per_updates * args.grad_accumulation_step) == 0):
                model_file = os.path.join(
                    output_dir, 'model_{}_{}.pt'.format(epoch, model.updates))
                logger.info('Saving mt-dnn model to {}'.format(model_file))
                model.save(model_file)

        for idx, dataset in enumerate(args.test_datasets):
            prefix = dataset.split('_')[0]
            task_def = task_defs.get_task_def(prefix)
            label_dict = task_def.label_vocab
            dev_data = dev_data_list[idx]
            if dev_data is not None:
                with torch.no_grad():
                    dev_metrics, dev_predictions, scores, golds, dev_ids = eval_model(
                        model,
                        dev_data,
                        metric_meta=task_def.metric_meta,
                        use_cuda=args.cuda,
                        label_mapper=label_dict,
                        task_type=task_def.task_type)
                for key, val in dev_metrics.items():
                    if args.tensorboard:
                        tensorboard.add_scalar('dev/{}/{}'.format(
                            dataset, key),
                                               val,
                                               global_step=epoch)
                    if isinstance(val, str):
                        logger.warning(
                            'Task {0} -- epoch {1} -- Dev {2}:\n {3}'.format(
                                dataset, epoch, key, val))
                    else:
                        logger.warning(
                            'Task {0} -- epoch {1} -- Dev {2}: {3:.3f}'.format(
                                dataset, epoch, key, val))
                score_file = os.path.join(
                    output_dir, '{}_dev_scores_{}.json'.format(dataset, epoch))
                results = {
                    'metrics': dev_metrics,
                    'predictions': dev_predictions,
                    'uids': dev_ids,
                    'scores': scores
                }
                dump(score_file, results)
                if args.glue_format_on:
                    from experiments.glue.glue_utils import submit
                    official_score_file = os.path.join(
                        output_dir,
                        '{}_dev_scores_{}.tsv'.format(dataset, epoch))
                    submit(official_score_file, results, label_dict)

            # test eval
            test_data = test_data_list[idx]
            if test_data is not None:
                with torch.no_grad():
                    test_metrics, test_predictions, scores, golds, test_ids = eval_model(
                        model,
                        test_data,
                        metric_meta=task_def.metric_meta,
                        use_cuda=args.cuda,
                        with_label=False,
                        label_mapper=label_dict,
                        task_type=task_def.task_type)
                score_file = os.path.join(
                    output_dir,
                    '{}_test_scores_{}.json'.format(dataset, epoch))
                results = {
                    'metrics': test_metrics,
                    'predictions': test_predictions,
                    'uids': test_ids,
                    'scores': scores
                }
                dump(score_file, results)
                if args.glue_format_on:
                    from experiments.glue.glue_utils import submit
                    official_score_file = os.path.join(
                        output_dir,
                        '{}_test_scores_{}.tsv'.format(dataset, epoch))
                    submit(official_score_file, results, label_dict)
                logger.info('[new test scores saved.]')

        model_file = os.path.join(output_dir, 'model_{}.pt'.format(epoch))
        model.save(model_file)
    if args.tensorboard:
        tensorboard.close()
Beispiel #9
0
def main():
    # set up dist
    device = torch.device("cuda")
    if args.local_rank > -1:
        device = initialize_distributed(args)
    elif torch.cuda.is_available():
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    opt = vars(args)
    # update data dir
    opt['data_dir'] = data_dir
    batch_size = args.batch_size
    print_message(logger, 'Launching the MT-DNN training')
    #return
    tasks = {}
    task_def_list = []
    dropout_list = []
    printable = args.local_rank in [-1, 0]

    train_datasets = []
    for dataset in args.train_datasets:
        prefix = dataset.split('_')[0]
        if prefix in tasks:
            continue
        task_id = len(tasks)
        tasks[prefix] = task_id
        task_def = task_defs.get_task_def(prefix)
        task_def_list.append(task_def)
        train_path = os.path.join(data_dir, '{}_train.json'.format(dataset))
        print_message(logger,
                      'Loading {} as task {}'.format(train_path, task_id))
        train_data_set = SingleTaskDataset(train_path,
                                           True,
                                           maxlen=args.max_seq_len,
                                           task_id=task_id,
                                           task_def=task_def,
                                           printable=printable)
        train_datasets.append(train_data_set)
    train_collater = Collater(dropout_w=args.dropout_w,
                              encoder_type=encoder_type,
                              soft_label=args.mkd_opt > 0,
                              max_seq_len=args.max_seq_len,
                              do_padding=args.do_padding)
    multi_task_train_dataset = MultiTaskDataset(train_datasets)
    if args.local_rank != -1:
        multi_task_batch_sampler = DistMultiTaskBatchSampler(
            train_datasets,
            args.batch_size,
            args.mix_opt,
            args.ratio,
            rank=args.local_rank,
            world_size=args.world_size)
    else:
        multi_task_batch_sampler = MultiTaskBatchSampler(
            train_datasets,
            args.batch_size,
            args.mix_opt,
            args.ratio,
            bin_on=args.bin_on,
            bin_size=args.bin_size,
            bin_grow_ratio=args.bin_grow_ratio)
    multi_task_train_data = DataLoader(multi_task_train_dataset,
                                       batch_sampler=multi_task_batch_sampler,
                                       collate_fn=train_collater.collate_fn,
                                       pin_memory=args.cuda)

    opt['task_def_list'] = task_def_list

    dev_data_list = []
    test_data_list = []
    test_collater = Collater(is_train=False,
                             encoder_type=encoder_type,
                             max_seq_len=args.max_seq_len,
                             do_padding=args.do_padding)
    for dataset in args.test_datasets:
        prefix = dataset.split('_')[0]
        task_def = task_defs.get_task_def(prefix)
        task_id = tasks[prefix]
        task_type = task_def.task_type
        data_type = task_def.data_type

        dev_path = os.path.join(data_dir, '{}_dev.json'.format(dataset))
        dev_data = None
        if os.path.exists(dev_path):
            dev_data_set = SingleTaskDataset(dev_path,
                                             False,
                                             maxlen=args.max_seq_len,
                                             task_id=task_id,
                                             task_def=task_def,
                                             printable=printable)
            if args.local_rank != -1:
                dev_data_set = DistTaskDataset(dev_data_set, task_id)
                single_task_batch_sampler = DistSingleTaskBatchSampler(
                    dev_data_set,
                    args.batch_size_eval,
                    rank=args.local_rank,
                    world_size=args.world_size)
                dev_data = DataLoader(dev_data_set,
                                      batch_sampler=single_task_batch_sampler,
                                      collate_fn=test_collater.collate_fn,
                                      pin_memory=args.cuda)
            else:
                dev_data = DataLoader(dev_data_set,
                                      batch_size=args.batch_size_eval,
                                      collate_fn=test_collater.collate_fn,
                                      pin_memory=args.cuda)
        dev_data_list.append(dev_data)

        test_path = os.path.join(data_dir, '{}_test.json'.format(dataset))
        test_data = None
        if os.path.exists(test_path):
            test_data_set = SingleTaskDataset(test_path,
                                              False,
                                              maxlen=args.max_seq_len,
                                              task_id=task_id,
                                              task_def=task_def,
                                              printable=printable)
            if args.local_rank != -1:
                test_data_set = DistTaskDataset(test_data_set, task_id)
                single_task_batch_sampler = DistSingleTaskBatchSampler(
                    test_data_set,
                    args.batch_size_eval,
                    rank=args.local_rank,
                    world_size=args.world_size)
                test_data = DataLoader(test_data_set,
                                       batch_sampler=single_task_batch_sampler,
                                       collate_fn=test_collater.collate_fn,
                                       pin_memory=args.cuda)
            else:
                test_data = DataLoader(test_data_set,
                                       batch_size=args.batch_size_eval,
                                       collate_fn=test_collater.collate_fn,
                                       pin_memory=args.cuda)
        test_data_list.append(test_data)

    print_message(logger, '#' * 20)
    print_message(logger, opt)
    print_message(logger, '#' * 20)

    # div number of grad accumulation.
    num_all_batches = args.epochs * len(
        multi_task_train_data) // args.grad_accumulation_step
    print_message(logger,
                  '############# Gradient Accumulation Info #############')
    print_message(
        logger,
        'number of step: {}'.format(args.epochs * len(multi_task_train_data)))
    print_message(
        logger, 'number of grad grad_accumulation step: {}'.format(
            args.grad_accumulation_step))
    print_message(logger,
                  'adjusted number of step: {}'.format(num_all_batches))
    print_message(logger,
                  '############# Gradient Accumulation Info #############')

    init_model = args.init_checkpoint
    state_dict = None

    if os.path.exists(init_model):
        if encoder_type == EncoderModelType.BERT or \
            encoder_type == EncoderModelType.DEBERTA or \
            encoder_type == EncoderModelType.ELECTRA:
            state_dict = torch.load(init_model, map_location=device)
            config = state_dict['config']
        elif encoder_type == EncoderModelType.ROBERTA or encoder_type == EncoderModelType.XLM:
            model_path = '{}/model.pt'.format(init_model)
            state_dict = torch.load(model_path, map_location=device)
            arch = state_dict['args'].arch
            arch = arch.replace('_', '-')
            if encoder_type == EncoderModelType.XLM:
                arch = "xlm-{}".format(arch)
            # convert model arch
            from data_utils.roberta_utils import update_roberta_keys
            from data_utils.roberta_utils import patch_name_dict
            state = update_roberta_keys(
                state_dict['model'], nlayer=state_dict['args'].encoder_layers)
            state = patch_name_dict(state)
            literal_encoder_type = EncoderModelType(
                opt['encoder_type']).name.lower()
            config_class, model_class, tokenizer_class = MODEL_CLASSES[
                literal_encoder_type]
            config = config_class.from_pretrained(arch).to_dict()
            state_dict = {'state': state}
    else:
        if opt['encoder_type'] not in EncoderModelType._value2member_map_:
            raise ValueError("encoder_type is out of pre-defined types")
        literal_encoder_type = EncoderModelType(
            opt['encoder_type']).name.lower()
        config_class, model_class, tokenizer_class = MODEL_CLASSES[
            literal_encoder_type]
        config = config_class.from_pretrained(init_model).to_dict()

    config['attention_probs_dropout_prob'] = args.bert_dropout_p
    config['hidden_dropout_prob'] = args.bert_dropout_p
    config['multi_gpu_on'] = opt["multi_gpu_on"]
    if args.num_hidden_layers > 0:
        config['num_hidden_layers'] = args.num_hidden_layers

    opt.update(config)

    model = MTDNNModel(opt,
                       device=device,
                       state_dict=state_dict,
                       num_train_step=num_all_batches)
    if args.resume and args.model_ckpt:
        print_message(logger, 'loading model from {}'.format(args.model_ckpt))
        model.load(args.model_ckpt)

    #### model meta str
    headline = '############# Model Arch of MT-DNN #############'
    ### print network
    print_message(logger, '\n{}\n{}\n'.format(headline, model.network))

    # dump config
    config_file = os.path.join(output_dir, 'config.json')
    with open(config_file, 'w', encoding='utf-8') as writer:
        writer.write('{}\n'.format(json.dumps(opt)))
        writer.write('\n{}\n{}\n'.format(headline, model.network))

    print_message(logger,
                  "Total number of params: {}".format(model.total_param))

    # tensorboard
    tensorboard = None
    if args.tensorboard:
        args.tensorboard_logdir = os.path.join(args.output_dir,
                                               args.tensorboard_logdir)
        tensorboard = SummaryWriter(log_dir=args.tensorboard_logdir)

    if args.encode_mode:
        for idx, dataset in enumerate(args.test_datasets):
            prefix = dataset.split('_')[0]
            test_data = test_data_list[idx]
            with torch.no_grad():
                encoding = extract_encoding(model,
                                            test_data,
                                            use_cuda=args.cuda)
            torch.save(
                encoding,
                os.path.join(output_dir, '{}_encoding.pt'.format(dataset)))
        return

    for epoch in range(0, args.epochs):
        print_message(logger, 'At epoch {}'.format(epoch), level=1)
        start = datetime.now()

        for i, (batch_meta, batch_data) in enumerate(multi_task_train_data):
            batch_meta, batch_data = Collater.patch_data(
                device, batch_meta, batch_data)
            task_id = batch_meta['task_id']
            model.update(batch_meta, batch_data)

            if (model.updates) % (
                    args.log_per_updates) == 0 or model.updates == 1:
                ramaining_time = str(
                    (datetime.now() - start) / (i + 1) *
                    (len(multi_task_train_data) - i - 1)).split('.')[0]
                if args.adv_train and args.debug:
                    debug_info = ' basic loss[%.5f] adv loss[%.5f] emb val[%.8f] noise val[%.8f] noise grad val[%.8f] no proj noise[%.8f] ' % (
                        model.basic_loss.avg, model.adv_loss.avg,
                        model.emb_val.avg, model.noise_val.avg,
                        model.noise_grad_val.avg, model.no_proj_noise_val.avg)
                else:
                    debug_info = ' '
                print_message(
                    logger,
                    'Task [{0:2}] updates[{1:6}] train loss[{2:.5f}]{3}remaining[{4}]'
                    .format(task_id, model.updates, model.train_loss.avg,
                            debug_info, ramaining_time))
                if args.tensorboard:
                    tensorboard.add_scalar('train/loss',
                                           model.train_loss.avg,
                                           global_step=model.updates)

            if args.save_per_updates_on and (
                (model.local_updates) %
                (args.save_per_updates * args.grad_accumulation_step)
                    == 0) and args.local_rank in [-1, 0]:
                model_file = os.path.join(
                    output_dir, 'model_{}_{}.pt'.format(epoch, model.updates))
                evaluation(model,
                           args.test_datasets,
                           dev_data_list,
                           task_defs,
                           output_dir,
                           epoch,
                           n_updates=args.save_per_updates,
                           with_label=True,
                           tensorboard=tensorboard,
                           glue_format_on=args.glue_format_on,
                           test_on=False,
                           device=device,
                           logger=logger)
                evaluation(model,
                           args.test_datasets,
                           test_data_list,
                           task_defs,
                           output_dir,
                           epoch,
                           n_updates=args.save_per_updates,
                           with_label=False,
                           tensorboard=tensorboard,
                           glue_format_on=args.glue_format_on,
                           test_on=True,
                           device=device,
                           logger=logger)
                print_message(logger,
                              'Saving mt-dnn model to {}'.format(model_file))
                model.save(model_file)

        evaluation(model,
                   args.test_datasets,
                   dev_data_list,
                   task_defs,
                   output_dir,
                   epoch,
                   with_label=True,
                   tensorboard=tensorboard,
                   glue_format_on=args.glue_format_on,
                   test_on=False,
                   device=device,
                   logger=logger)
        evaluation(model,
                   args.test_datasets,
                   test_data_list,
                   task_defs,
                   output_dir,
                   epoch,
                   with_label=False,
                   tensorboard=tensorboard,
                   glue_format_on=args.glue_format_on,
                   test_on=True,
                   device=device,
                   logger=logger)
        print_message(logger, '[new test scores at {} saved.]'.format(epoch))
        if args.local_rank in [-1, 0]:
            model_file = os.path.join(output_dir, 'model_{}.pt'.format(epoch))
            model.save(model_file)
    if args.tensorboard:
        tensorboard.close()
Beispiel #10
0
def load_model_for_viz_1(task_def_path,
                         checkpoint_path,
                         input_path,
                         model_type='bert-base-cased',
                         do_lower_case=False,
                         use_cuda=True):
    # load task info
    task = os.path.splitext(os.path.basename(task_def_path))[0]
    task_defs = TaskDefs(task_def_path)
    assert task in task_defs._task_type_map
    assert task in task_defs._data_type_map
    assert task in task_defs._metric_meta_map
    prefix = task.split('_')[0]
    task_def = task_defs.get_task_def(prefix)
    data_type = task_defs._data_type_map[task]
    task_type = task_defs._task_type_map[task]
    metric_meta = task_defs._metric_meta_map[task]
    # load model
    assert os.path.exists(checkpoint_path)
    state_dict = torch.load(checkpoint_path)
    config = state_dict['config']
    config["cuda"] = use_cuda
    device = torch.device("cuda" if use_cuda else "cpu")
    task_def = task_defs.get_task_def(prefix)
    task_def_list = [task_def]
    config['task_def_list'] = task_def_list
    ## temp fix
    config['fp16'] = False
    config['answer_opt'] = 0
    config['adv_train'] = False
    #del state_dict['optimizer']
    config['output_attentions'] = True
    config['local_rank'] = -1
    model = MTDNNModel(config, device, state_dict=state_dict)
    encoder_type = config.get('encoder_type', EncoderModelType.BERT)
    root = os.path.basename(task_def_path)
    literal_model_type = model_type.split('-')[0].upper()
    encoder_model = EncoderModelType[literal_model_type]
    literal_model_type = literal_model_type.lower()
    mt_dnn_suffix = literal_model_type
    if 'base' in model_type:
        mt_dnn_suffix += "_base"
    elif 'large' in model_type:
        mt_dnn_suffix += "_large"
    # load tokenizer
    config_class, model_class, tokenizer_class = MODEL_CLASSES[
        literal_model_type]
    tokenizer = tokenizer_class.from_pretrained(model_type,
                                                do_lower_case=do_lower_case)
    # load data
    prep_input = input_path
    test_data_set = SingleTaskDataset(prep_input,
                                      False,
                                      maxlen=512,
                                      task_id=0,
                                      task_def=task_def)
    collater = Collater(is_train=False, encoder_type=encoder_type)
    test_data = DataLoader(test_data_set,
                           batch_size=1,
                           collate_fn=collater.collate_fn,
                           pin_memory=True)
    idx = 0
    results = []
    for batch_meta, batch_data in tqdm(test_data):
        if idx < 360:
            idx += 1
            continue
        batch_meta, batch_data = Collater.patch_data(device, batch_meta,
                                                     batch_data)
        model.network.eval()
        task_id = batch_meta['task_id']
        task_def = TaskDef.from_dict(batch_meta['task_def'])
        task_type = task_def.task_type
        task_obj = tasks.get_task_obj(task_def)
        inputs = batch_data[:batch_meta['input_len']]
        if len(inputs) == 3:
            inputs.append(None)
            inputs.append(None)
        inputs.append(task_id)
        input_ids = inputs[0]
        token_type_ids = inputs[1]
        attention = model.mnetwork.module.bert(
            input_ids, token_type_ids=token_type_ids)[-1]
        batch_size = batch_data[0].shape[0]
        for i in range(batch_size):
            attention = tuple([item[i:i + 1, :, :, :] for item in attention])
            input_id_list = input_ids[i].tolist()
            tokens = tokenizer.convert_ids_to_tokens(input_id_list)
            idx_sep = listRightIndex(tokens, '[SEP]') + 1
            tokens = tokens[:idx_sep]
            attention = tuple(
                [item[:, :, :idx_sep, :idx_sep] for item in attention])
            results.append((attention, tokens))
        idx += batch_size
    return results
Beispiel #11
0
def main():
    logger.info('Launching the MT-DNN training')
    opt = vars(args)
    # update data dir
    opt['data_dir'] = data_dir
    batch_size = args.batch_size
    tasks = {}
    tasks_class = {}
    nclass_list = []
    decoder_opts = []
    task_types = []
    dropout_list = []
    loss_types = []
    kd_loss_types = []

    train_datasets = []
    for dataset in args.train_datasets:
        prefix = dataset.split('_')[0]
        if prefix in tasks: continue
        assert prefix in task_defs.n_class_map
        assert prefix in task_defs.data_type_map
        data_type = task_defs.data_type_map[prefix]
        nclass = task_defs.n_class_map[prefix]
        task_id = len(tasks)
        if args.mtl_opt > 0:
            task_id = tasks_class[nclass] if nclass in tasks_class else len(
                tasks_class)

        task_type = task_defs.task_type_map[prefix]

        dopt = generate_decoder_opt(task_defs.enable_san_map[prefix],
                                    opt['answer_opt'])
        if task_id < len(decoder_opts):
            decoder_opts[task_id] = min(decoder_opts[task_id], dopt)
        else:
            decoder_opts.append(dopt)
        task_types.append(task_type)
        loss_types.append(task_defs.loss_map[prefix])
        kd_loss_types.append(task_defs.kd_loss_map[prefix])

        if prefix not in tasks:
            tasks[prefix] = len(tasks)
            if args.mtl_opt < 1: nclass_list.append(nclass)

        if (nclass not in tasks_class):
            tasks_class[nclass] = len(tasks_class)
            if args.mtl_opt > 0: nclass_list.append(nclass)

        dropout_p = task_defs.dropout_p_map.get(prefix, args.dropout_p)
        dropout_list.append(dropout_p)

        train_path = os.path.join(data_dir, '{}_train.json'.format(dataset))
        logger.info('Loading {} as task {}'.format(train_path, task_id))
        train_data_set = SingleTaskDataset(train_path,
                                           True,
                                           maxlen=args.max_seq_len,
                                           task_id=task_id,
                                           task_type=task_type,
                                           data_type=data_type)
        train_datasets.append(train_data_set)
    train_collater = Collater(dropout_w=args.dropout_w,
                              encoder_type=encoder_type)
    multi_task_train_dataset = MultiTaskDataset(train_datasets)
    multi_task_batch_sampler = MultiTaskBatchSampler(train_datasets,
                                                     args.batch_size,
                                                     args.mix_opt, args.ratio)
    multi_task_train_data = DataLoader(multi_task_train_dataset,
                                       batch_sampler=multi_task_batch_sampler,
                                       collate_fn=train_collater.collate_fn,
                                       pin_memory=args.cuda)

    opt['answer_opt'] = decoder_opts
    opt['task_types'] = task_types
    opt['tasks_dropout_p'] = dropout_list
    opt['loss_types'] = loss_types
    opt['kd_loss_types'] = kd_loss_types

    args.label_size = ','.join([str(l) for l in nclass_list])
    logger.info(args.label_size)
    dev_data_list = []
    test_data_list = []
    test_collater = Collater(is_train=False, encoder_type=encoder_type)
    for dataset in args.test_datasets:
        prefix = dataset.split('_')[0]
        task_id = tasks_class[
            task_defs.
            n_class_map[prefix]] if args.mtl_opt > 0 else tasks[prefix]
        task_type = task_defs.task_type_map[prefix]

        pw_task = False
        if task_type == TaskType.Ranking:
            pw_task = True

        assert prefix in task_defs.data_type_map
        data_type = task_defs.data_type_map[prefix]

        dev_path = os.path.join(data_dir, '{}_dev.json'.format(dataset))
        dev_data = None
        if os.path.exists(dev_path):
            dev_data_set = SingleTaskDataset(dev_path,
                                             False,
                                             maxlen=args.max_seq_len,
                                             task_id=task_id,
                                             task_type=task_type,
                                             data_type=data_type)
            dev_data = DataLoader(dev_data_set,
                                  batch_size=args.batch_size_eval,
                                  collate_fn=test_collater.collate_fn,
                                  pin_memory=args.cuda)
        dev_data_list.append(dev_data)

        test_path = os.path.join(data_dir, '{}_test.json'.format(dataset))
        test_data = None
        if os.path.exists(test_path):
            test_data_set = SingleTaskDataset(test_path,
                                              False,
                                              maxlen=args.max_seq_len,
                                              task_id=task_id,
                                              task_type=task_type,
                                              data_type=data_type)
            test_data = DataLoader(test_data_set,
                                   batch_size=args.batch_size_eval,
                                   collate_fn=test_collater.collate_fn,
                                   pin_memory=args.cuda)
        test_data_list.append(test_data)

    logger.info('#' * 20)
    logger.info(opt)
    logger.info('#' * 20)

    # div number of grad accumulation.
    num_all_batches = args.epochs * len(
        multi_task_train_data) // args.grad_accumulation_step
    logger.info('############# Gradient Accumulation Info #############')
    logger.info('number of step: {}'.format(args.epochs *
                                            len(multi_task_train_data)))
    logger.info('number of grad grad_accumulation step: {}'.format(
        args.grad_accumulation_step))
    logger.info('adjusted number of step: {}'.format(num_all_batches))
    logger.info('############# Gradient Accumulation Info #############')

    bert_model_path = args.init_checkpoint
    state_dict = None

    if encoder_type == EncoderModelType.BERT:
        if os.path.exists(bert_model_path):
            state_dict = torch.load(bert_model_path)
            config = state_dict['config']
            config['attention_probs_dropout_prob'] = args.bert_dropout_p
            config['hidden_dropout_prob'] = args.bert_dropout_p
            config['multi_gpu_on'] = opt["multi_gpu_on"]
            opt.update(config)
        else:
            logger.error('#' * 20)
            logger.error(
                'Could not find the init model!\n The parameters will be initialized randomly!'
            )
            logger.error('#' * 20)
            config = BertConfig(vocab_size_or_config_json_file=30522).to_dict()
            config['multi_gpu_on'] = opt["multi_gpu_on"]
            opt.update(config)
    elif encoder_type == EncoderModelType.ROBERTA:
        bert_model_path = '{}/model.pt'.format(bert_model_path)
        if os.path.exists(bert_model_path):
            new_state_dict = {}
            state_dict = torch.load(bert_model_path)
            for key, val in state_dict['model'].items():
                if key.startswith('decoder.sentence_encoder'):
                    key = 'bert.model.{}'.format(key)
                    new_state_dict[key] = val
                elif key.startswith('classification_heads'):
                    key = 'bert.model.{}'.format(key)
                    new_state_dict[key] = val
            state_dict = {'state': new_state_dict}

    model = MTDNNModel(opt,
                       state_dict=state_dict,
                       num_train_step=num_all_batches)
    if args.resume and args.model_ckpt:
        logger.info('loading model from {}'.format(args.model_ckpt))
        model.load(args.model_ckpt)

    #### model meta str
    headline = '############# Model Arch of MT-DNN #############'
    ### print network
    logger.info('\n{}\n{}\n'.format(headline, model.network))

    # dump config
    config_file = os.path.join(output_dir, 'config.json')
    with open(config_file, 'w', encoding='utf-8') as writer:
        writer.write('{}\n'.format(json.dumps(opt)))
        writer.write('\n{}\n{}\n'.format(headline, model.network))

    logger.info("Total number of params: {}".format(model.total_param))

    # tensorboard
    if args.tensorboard:
        args.tensorboard_logdir = os.path.join(args.output_dir,
                                               args.tensorboard_logdir)
        tensorboard = SummaryWriter(log_dir=args.tensorboard_logdir)

    for epoch in range(0, args.epochs):
        logger.warning('At epoch {}'.format(epoch))
        start = datetime.now()

        for i, (batch_meta, batch_data) in enumerate(multi_task_train_data):
            batch_meta, batch_data = Collater.patch_data(
                args.cuda, batch_meta, batch_data)
            task_id = batch_meta['task_id']
            model.update(batch_meta, batch_data)
            if (model.local_updates) % (args.log_per_updates *
                                        args.grad_accumulation_step
                                        ) == 0 or model.local_updates == 1:
                ramaining_time = str(
                    (datetime.now() - start) / (i + 1) *
                    (len(multi_task_train_data) - i - 1)).split('.')[0]
                logger.info(
                    'Task [{0:2}] updates[{1:6}] train loss[{2:.5f}] remaining[{3}]'
                    .format(task_id, model.updates, model.train_loss.avg,
                            ramaining_time))
                if args.tensorboard:
                    tensorboard.add_scalar('train/loss',
                                           model.train_loss.avg,
                                           global_step=model.updates)

            if args.save_per_updates_on and (
                (model.local_updates) %
                (args.save_per_updates * args.grad_accumulation_step) == 0):
                model_file = os.path.join(
                    output_dir, 'model_{}_{}.pt'.format(epoch, model.updates))
                logger.info('Saving mt-dnn model to {}'.format(model_file))
                model.save(model_file)

        for idx, dataset in enumerate(args.test_datasets):
            prefix = dataset.split('_')[0]
            label_dict = task_defs.global_map.get(prefix, None)
            dev_data = dev_data_list[idx]
            if dev_data is not None:
                with torch.no_grad():
                    dev_metrics, dev_predictions, scores, golds, dev_ids = eval_model(
                        model,
                        dev_data,
                        metric_meta=task_defs.metric_meta_map[prefix],
                        use_cuda=args.cuda,
                        label_mapper=label_dict,
                        task_type=task_defs.task_type_map[prefix])
                for key, val in dev_metrics.items():
                    if args.tensorboard:
                        tensorboard.add_scalar('dev/{}/{}'.format(
                            dataset, key),
                                               val,
                                               global_step=epoch)
                    if isinstance(val, str):
                        logger.warning(
                            'Task {0} -- epoch {1} -- Dev {2}:\n {3}'.format(
                                dataset, epoch, key, val))
                    else:
                        logger.warning(
                            'Task {0} -- epoch {1} -- Dev {2}: {3:.3f}'.format(
                                dataset, epoch, key, val))
                score_file = os.path.join(
                    output_dir, '{}_dev_scores_{}.json'.format(dataset, epoch))
                results = {
                    'metrics': dev_metrics,
                    'predictions': dev_predictions,
                    'uids': dev_ids,
                    'scores': scores
                }
                dump(score_file, results)
                if args.glue_format_on:
                    from experiments.glue.glue_utils import submit
                    official_score_file = os.path.join(
                        output_dir,
                        '{}_dev_scores_{}.tsv'.format(dataset, epoch))
                    submit(official_score_file, results, label_dict)

            # test eval
            test_data = test_data_list[idx]
            if test_data is not None:
                with torch.no_grad():
                    test_metrics, test_predictions, scores, golds, test_ids = eval_model(
                        model,
                        test_data,
                        metric_meta=task_defs.metric_meta_map[prefix],
                        use_cuda=args.cuda,
                        with_label=False,
                        label_mapper=label_dict,
                        task_type=task_defs.task_type_map[prefix])
                score_file = os.path.join(
                    output_dir,
                    '{}_test_scores_{}.json'.format(dataset, epoch))
                results = {
                    'metrics': test_metrics,
                    'predictions': test_predictions,
                    'uids': test_ids,
                    'scores': scores
                }
                dump(score_file, results)
                if args.glue_format_on:
                    from experiments.glue.glue_utils import submit
                    official_score_file = os.path.join(
                        output_dir,
                        '{}_test_scores_{}.tsv'.format(dataset, epoch))
                    submit(official_score_file, results, label_dict)
                logger.info('[new test scores saved.]')

        model_file = os.path.join(output_dir, 'model_{}.pt'.format(epoch))
        model.save(model_file)
    if args.tensorboard:
        tensorboard.close()
Beispiel #12
0
def compute_heads_importance(args,
                             model,
                             eval_dataloader,
                             compute_entropy=True,
                             compute_importance=True,
                             head_mask=None,
                             actually_pruned=False,
                             verbose=True):
    """This method shows how to compute:
    - head attention entropy
    - head importance scores according to http://arxiv.org/abs/1905.10650
    """
    # Prepare our tensors
    device = torch.device("cuda" if args['cuda'] else "cpu")
    n_layers = model.mnetwork.module.bert.config.num_hidden_layers
    n_heads = model.mnetwork.module.bert.config.num_attention_heads
    head_importance = torch.zeros(n_layers, n_heads).to(device)
    attn_entropy = torch.zeros(n_layers, n_heads).to(device)

    if head_mask is None:
        head_mask = torch.ones(n_layers, n_heads).to(device)

    head_mask.requires_grad_(requires_grad=True)
    # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
    if actually_pruned:
        head_mask = None

    preds = None
    labels = None
    tot_tokens = 0.0

    for batch_meta, batch_data in tqdm(eval_dataloader):
        for i in range(len(batch_data[1])):
            batch_data[1][i] = batch_data[1][i].to(device)
        y = batch_data[batch_meta['label']]
        y = model._to_cuda(y)

        # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
        batch_meta, batch_data = Collater.patch_data(device, batch_meta,
                                                     batch_data)
        logits, attention = model.update(batch_meta,
                                         batch_data,
                                         head_mask=head_mask)

        if compute_entropy:
            for layer, attn in enumerate(attention):
                masked_entropy = entropy(attn.detach()) * batch_data[
                    batch_meta['mask']].float().unsqueeze(1)
                attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach()

        if compute_importance:
            head_importance += head_mask.grad.abs().detach()

        # Also store our logits/labels if we want to compute metrics afterwards
        if preds is None:
            preds = logits.detach().cpu().numpy()
            labels = y.detach().cpu().numpy()
        else:
            preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
            labels = np.append(labels, y.detach().cpu().numpy(), axis=0)

        tot_tokens += batch_data[
            batch_meta['mask']].float().detach().sum().data

    # Normalize
    attn_entropy /= tot_tokens
    head_importance /= tot_tokens
    # Layerwise importance normalization
    if not args['dont_normalize_importance_by_layer']:
        exponent = 2
        norm_by_layer = torch.pow(
            torch.pow(head_importance, exponent).sum(-1), 1 / exponent)
        head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20

    if not args['dont_normalize_global_importance']:
        head_importance = (head_importance - head_importance.min()) / (
            head_importance.max() - head_importance.min())

    # Print/save matrices
    np.save(os.path.join(args['output_dir'], "attn_entropy.npy"),
            attn_entropy.detach().cpu().numpy())
    np.save(os.path.join(args['output_dir'], "head_importance.npy"),
            head_importance.detach().cpu().numpy())

    if verbose:
        print("Attention entropies")
        print_2d_tensor(attn_entropy)
        print("Head importance scores")
        print_2d_tensor(head_importance)
        print("Head ranked by importance scores")
    head_ranks = torch.zeros(head_importance.numel(),
                             dtype=torch.long,
                             device=device)
    head_ranks[head_importance.view(-1).sort(
        descending=True)[1]] = torch.arange(head_importance.numel(),
                                            device=device)
    head_ranks = head_ranks.view_as(head_importance)
    if verbose:
        print_2d_tensor(head_ranks)

    return attn_entropy, head_importance, preds, labels
Beispiel #13
0
def main():
    task_def_path = 'data_complex/lcp.yml'
    task = os.path.splitext(os.path.basename(task_def_path))[0]
    task_defs = TaskDefs(task_def_path)
    prefix = task.split('_')[0]
    task_def = task_defs.get_task_def(prefix)
    parser = argparse.ArgumentParser()
    model_config(parser)
    set_config(parser)
    train_config(parser)
    args = parser.parse_args()
    encoder_type = args.encoder_type
    layer_indexes = [int(x) for x in args.layers.split(",")]
    set_environment(args.seed)
    # process data
    data, is_single_sentence = process_data(args)
    data_type = DataFormat.PremiseOnly if is_single_sentence else DataFormat.PremiseAndOneHypothesis
    fout_temp = '{}.tmp'.format(args.finput)
    dump_data(data, fout_temp)
    collater = Collater(is_train=False, encoder_type=encoder_type)
    dataset = SingleTaskDataset(fout_temp, False, maxlen=args.max_seq_length, task_def=task_def)#, data_type=data_type)
    batcher = DataLoader(dataset, batch_size=args.batch_size, collate_fn=collater.collate_fn, pin_memory=args.cuda)
    opt = vars(args)
    # load model
    if os.path.exists(args.checkpoint):
        state_dict = torch.load(args.checkpoint)
        config = state_dict['config']
        config['dump_feature'] = True
        config['local_rank'] = -1
        opt.update(config)
    else:
        logger.error('#' * 20)
        logger.error(
            'Could not find the init model!\n The parameters will be initialized randomly!')
        logger.error('#' * 20)
        return
    num_all_batches = len(batcher)
    model = MTDNNModel(
        opt,
        state_dict=state_dict,
        num_train_step=num_all_batches)
    if args.cuda:
        model.cuda()

    features_dict = {}
    for batch_meta, batch_data in batcher:
        batch_meta, batch_data = Collater.patch_data(args.cuda, batch_meta, batch_data)
        all_encoder_layers, _ = model.extract(batch_meta, batch_data)
        embeddings = [all_encoder_layers[idx].detach().cpu().numpy()
                      for idx in layer_indexes]

        #import pdb; pdb.set_trace()
        uids = batch_meta['uids']
        masks = batch_data[batch_meta['mask']].detach().cpu().numpy().tolist()
        for idx, uid in enumerate(uids):
            slen = sum(masks[idx])
            features = {}
            for yidx, layer in enumerate(layer_indexes):
                features[layer] = str(embeddings[yidx][idx][:slen].tolist())
            features_dict[uid] = features

    # save features
    with open(args.foutput, 'w', encoding='utf-8') as writer:
        for sample in data:
            uid = sample['uid']
            tokens = sample['tokens']
            feature = features_dict[uid]
            feature['tokens'] = tokens
            feature['uid'] = uid
            writer.write('{}\n'.format(json.dumps(feature)))
def main():
    logger.info('Launching the MT-DNN training')
    opt = vars(args)
    # update data dir
    opt['data_dir'] = data_dir
    batch_size = args.batch_size

    # tensorboard
    tensorboard = None
    if args.tensorboard:
        args.tensorboard_logdir = os.path.join(args.output_dir,
                                               args.tensorboard_logdir)
        tensorboard = SummaryWriter(log_dir=args.tensorboard_logdir)

    json_logfile = os.path.join(args.output_dir, "runtime_log.json")

    tasks = {}
    tasks_class = {}
    nclass_list = []
    decoder_opts = []
    task_types = []
    dropout_list = []
    loss_types = []
    kd_loss_types = []

    train_datasets = []
    for dataset in args.train_datasets:
        prefix = dataset.split('_')[0]
        if prefix in tasks: continue
        assert prefix in task_defs.n_class_map
        assert prefix in task_defs.data_type_map
        data_type = task_defs.data_type_map[prefix]
        nclass = task_defs.n_class_map[prefix]
        task_id = len(tasks)
        if args.mtl_opt > 0:
            task_id = tasks_class[nclass] if nclass in tasks_class else len(
                tasks_class)

        task_type = task_defs.task_type_map[prefix]

        dopt = generate_decoder_opt(task_defs.enable_san_map[prefix],
                                    opt['answer_opt'])
        if task_id < len(decoder_opts):
            decoder_opts[task_id] = min(decoder_opts[task_id], dopt)
        else:
            decoder_opts.append(dopt)
        task_types.append(task_type)
        loss_types.append(task_defs.loss_map[prefix])
        kd_loss_types.append(task_defs.kd_loss_map[prefix])

        if prefix not in tasks:
            tasks[prefix] = len(tasks)
            if args.mtl_opt < 1: nclass_list.append(nclass)

        if (nclass not in tasks_class):
            tasks_class[nclass] = len(tasks_class)
            if args.mtl_opt > 0: nclass_list.append(nclass)

        dropout_p = task_defs.dropout_p_map.get(prefix, args.dropout_p)
        dropout_list.append(dropout_p)

        train_path = os.path.join(data_dir, '{}_train.json'.format(dataset))
        logger.info('Loading {} as task {}'.format(train_path, task_id))
        train_data_set = SingleTaskDataset(train_path,
                                           True,
                                           maxlen=args.max_seq_len,
                                           task_id=task_id,
                                           task_type=task_type,
                                           data_type=data_type)
        train_datasets.append(train_data_set)
    train_collater = Collater(dropout_w=args.dropout_w,
                              encoder_type=encoder_type)
    multi_task_train_dataset = MultiTaskDataset(train_datasets)
    # MTSampler = SAMPLERS[args.sampler]
    n_tasks = len(tasks)
    dataset_sizes = [len(dataset) for dataset in train_datasets]
    if "random" in args.controller:
        controller = CONTROLLERS[args.controller](
            n_task=n_tasks,
            dataset_names=args.train_datasets,
            dataset_sizes=dataset_sizes,
            batch_size=args.batch_size,
            rebatch_size=args.batch_size_train,
            tensorboard=tensorboard,
            log_filename=json_logfile)
    else:
        controller = CONTROLLERS[args.controller](
            n_task=n_tasks,
            phi=args.phi,
            K=args.concurrent_cnt,
            dataset_names=args.train_datasets,
            dataset_sizes=dataset_sizes,
            max_cnt=args.max_queue_cnt,
            batch_size=args.batch_size,
            rebatch_size=args.batch_size_train,
            tensorboard=tensorboard,
            log_filename=json_logfile)

    multi_task_batch_sampler = ACLSampler(train_datasets,
                                          args.batch_size,
                                          controller=controller)
    # controller.max_step = len(multi_task_batch_sampler)
    multi_task_train_data = DataLoader(multi_task_train_dataset,
                                       batch_sampler=multi_task_batch_sampler,
                                       collate_fn=train_collater.collate_fn,
                                       pin_memory=args.cuda)

    opt['answer_opt'] = decoder_opts
    opt['task_types'] = task_types
    opt['tasks_dropout_p'] = dropout_list
    opt['loss_types'] = loss_types
    opt['kd_loss_types'] = kd_loss_types

    args.label_size = ','.join([str(l) for l in nclass_list])
    logger.info(args.label_size)
    dev_data_list = []
    test_data_list = []
    test_collater = Collater(is_train=False, encoder_type=encoder_type)
    for dataset in args.test_datasets:
        prefix = dataset.split('_')[0]
        task_id = tasks_class[
            task_defs.
            n_class_map[prefix]] if args.mtl_opt > 0 else tasks[prefix]
        task_type = task_defs.task_type_map[prefix]

        pw_task = False
        if task_type == TaskType.Ranking:
            pw_task = True

        assert prefix in task_defs.data_type_map
        data_type = task_defs.data_type_map[prefix]

        dev_path = os.path.join(data_dir, '{}_dev.json'.format(dataset))
        dev_data = None
        if os.path.exists(dev_path):
            dev_data_set = SingleTaskDataset(dev_path,
                                             False,
                                             maxlen=args.max_seq_len,
                                             task_id=task_id,
                                             task_type=task_type,
                                             data_type=data_type)
            dev_data = DataLoader(dev_data_set,
                                  batch_size=args.batch_size_eval,
                                  collate_fn=test_collater.collate_fn,
                                  pin_memory=args.cuda)
        dev_data_list.append(dev_data)

        test_path = os.path.join(data_dir, '{}_test.json'.format(dataset))
        test_data = None
        if os.path.exists(test_path):
            test_data_set = SingleTaskDataset(test_path,
                                              False,
                                              maxlen=args.max_seq_len,
                                              task_id=task_id,
                                              task_type=task_type,
                                              data_type=data_type)
            test_data = DataLoader(test_data_set,
                                   batch_size=args.batch_size_eval,
                                   collate_fn=test_collater.collate_fn,
                                   pin_memory=args.cuda)
        test_data_list.append(test_data)

    logger.info('#' * 20)
    logger.info(opt)
    logger.info('#' * 20)

    # div number of grad accumulation.
    num_all_batches = args.epochs * len(
        multi_task_train_data) // args.grad_accumulation_step
    logger.info('############# Gradient Accumulation Info #############')
    logger.info('number of step: {}'.format(args.epochs *
                                            len(multi_task_train_data)))
    logger.info('number of grad grad_accumulation step: {}'.format(
        args.grad_accumulation_step))
    logger.info('adjusted number of step: {}'.format(num_all_batches))
    logger.info('############# Gradient Accumulation Info #############')

    bert_model_path = args.init_checkpoint
    state_dict = None

    if encoder_type == EncoderModelType.BERT:
        if os.path.exists(bert_model_path):
            state_dict = torch.load(bert_model_path)
            config = state_dict['config']
            config['attention_probs_dropout_prob'] = args.bert_dropout_p
            config['hidden_dropout_prob'] = args.bert_dropout_p
            config['multi_gpu_on'] = opt["multi_gpu_on"]
            opt.update(config)
        else:
            logger.error('#' * 20)
            logger.error(
                'Could not find the init model!\n The parameters will be initialized randomly!'
            )
            logger.error('#' * 20)
            config = BertConfig(vocab_size_or_config_json_file=30522).to_dict()
            config['multi_gpu_on'] = opt["multi_gpu_on"]
            opt.update(config)
    elif encoder_type == EncoderModelType.ROBERTA:
        bert_model_path = '{}/model.pt'.format(bert_model_path)
        if os.path.exists(bert_model_path):
            new_state_dict = {}
            state_dict = torch.load(bert_model_path)
            for key, val in state_dict['model'].items():
                if key.startswith('decoder.sentence_encoder'):
                    key = 'bert.model.{}'.format(key)
                    new_state_dict[key] = val
                elif key.startswith('classification_heads'):
                    key = 'bert.model.{}'.format(key)
                    new_state_dict[key] = val
            state_dict = {'state': new_state_dict}

    # add score history
    score_history = [[] for _ in range(len(args.test_datasets))]
    total_scores = []

    model = MTDNNModel(opt,
                       state_dict=state_dict,
                       num_train_step=num_all_batches)
    if args.resume and args.model_ckpt:
        logger.info('loading model from {}'.format(args.model_ckpt))
        model.load(args.model_ckpt)

    #### model meta str
    headline = '############# Model Arch of MT-DNN #############'
    ### print network
    logger.info('\n{}\n{}\n'.format(headline, model.network))

    # dump config
    config_file = os.path.join(output_dir, 'config.json')
    with open(config_file, 'w', encoding='utf-8') as writer:
        writer.write('{}\n'.format(json.dumps(opt)))
        writer.write('\n{}\n{}\n'.format(headline, model.network))

    logger.info("Total number of params: {}".format(model.total_param))

    for epoch in range(0, args.epochs):
        logger.warning('At epoch {0}/{1}'.format(epoch + 1, args.epochs))
        start = datetime.now()
        total_len = len(controller)
        controller.set_epoch(epoch)
        for i, (batch_meta, batch_data) in enumerate(multi_task_train_data):
            batch_meta, batch_data = Collater.patch_data(
                args.cuda, batch_meta, batch_data)
            task_id = batch_meta['task_id']
            loss = model.calculate_loss(batch_meta, batch_data)
            controller.insert(task_id, (batch_meta, batch_data), loss.item())

            if i % args.log_per_updates == 0:
                ramaining_time = str(
                    (datetime.now() - start) / (controller.cur_step + 1) *
                    (total_len - controller.cur_step - 1)).split('.')[0]
                logger.info("Epoch {0} Progress {1} / {2}  ({3:.2%})".format(
                    epoch + 1, controller.cur_step, total_len,
                    controller.cur_step * 1.0 / total_len))
                # logger.info("Progress {0} / {1}  ({2:.2f}%)".format(i, total_len, i*100.0/total_len))
                logger.info(
                    'Task [{0:2}] updates[{1:6}] train loss[{2:.5f}] remaining[{3}]'
                    .format(task_id, model.updates, model.train_loss.avg,
                            ramaining_time))

                summary_str = controller.summary()
                for line in summary_str.split("\n"):
                    logger.info(line)

                # avg_loss, out_loss, loss_change, min_loss, min_out_loss = controller.get_loss()
                # logger.info('List of loss {}'.format(",".join(avg_loss)))
                # logger.info('List of out_loss {}'.format(",".join(out_loss)))
                # logger.info('List of loss_change {}'.format(",".join(loss_change)))
                # logger.info('List of min_loss {}'.format(",".join(min_loss)))
                # logger.info('List of min_out_loss {}'.format(",".join(min_out_loss)))
                # chosen = [ "%s:%.3f "%(k,v) for k, v in controller.scaled_dict.items()]
                # logger.info('List of Scaled Choosen time {}'.format(",".join(chosen)))

                if args.tensorboard:
                    tensorboard.add_scalar('train/loss',
                                           model.train_loss.avg,
                                           global_step=model.updates)

            controller.step(model=model)

            if args.save_per_updates_on and (
                (model.local_updates) %
                (args.save_per_updates * args.grad_accumulation_step) == 0):
                model_file = os.path.join(
                    output_dir, 'model_{}_{}.pt'.format(epoch, model.updates))
                logger.info('Saving mt-dnn model to {}'.format(model_file))
                model.save(model_file)

        total_average_score = 0.0
        scoring_cnt = 0
        score_dict = dict()
        scoring_datasets = "cola,sst,mrpc,stsb,qqp,mnli,qnli,rte,wnli".split(
            ",")
        logger.info('Start Testing')
        for idx, dataset in enumerate(args.test_datasets):
            prefix = dataset.split('_')[0]
            label_dict = task_defs.global_map.get(prefix, None)
            dev_data = dev_data_list[idx]
            if dev_data is not None:
                with torch.no_grad():
                    dev_metrics, dev_predictions, scores, golds, dev_ids = eval_model(
                        model,
                        dev_data,
                        metric_meta=task_defs.metric_meta_map[prefix],
                        use_cuda=args.cuda,
                        label_mapper=label_dict,
                        task_type=task_defs.task_type_map[prefix])
                task_score = 0.0
                for key, val in dev_metrics.items():
                    if args.tensorboard:
                        tensorboard.add_scalar('dev/{}/{}'.format(
                            dataset, key),
                                               val,
                                               global_step=epoch)
                    if isinstance(val, str):
                        logger.warning(
                            'Task {0} -- epoch {1} -- Dev {2}:\n {3}'.format(
                                dataset, epoch + 1, key, val))
                    else:
                        logger.warning(
                            'Task {0} -- epoch {1} -- Dev {2}: {3:.2f}'.format(
                                dataset, epoch + 1, key, val))
                    task_score += val
                if len(dev_metrics) > 1:
                    task_score /= len(dev_metrics)
                    logger.warning(
                        'Task {0} -- epoch {1} -- Dev {2}: {3:.2f}'.format(
                            dataset, epoch + 1, "Average", task_score))
                if prefix in scoring_datasets:
                    scoring_cnt += 1
                    if prefix not in score_dict:
                        score_dict[prefix] = task_score
                    else:
                        score_dict[prefix] = (score_dict[prefix] +
                                              task_score) / 2
                    total_average_score += task_score

                score_history[idx].append("%.2f" % task_score)
                logger.warning('Task {0} -- epoch {1} -- Dev {2}: {3}'.format(
                    dataset, epoch + 1, "History", score_history[idx]))

                score_file = os.path.join(
                    output_dir, '{}_dev_scores_{}.json'.format(dataset, epoch))
                results = {
                    'metrics': dev_metrics,
                    'predictions': dev_predictions,
                    'uids': dev_ids,
                    'scores': scores
                }
                dump(score_file, results)
                if args.glue_format_on:
                    from experiments.glue.glue_utils import submit
                    official_score_file = os.path.join(
                        output_dir,
                        '{}_dev_scores_{}.tsv'.format(dataset, epoch))
                    submit(official_score_file, results, label_dict)

            # test eval
            test_data = test_data_list[idx]
            if test_data is not None:
                with torch.no_grad():
                    test_metrics, test_predictions, scores, golds, test_ids = eval_model(
                        model,
                        test_data,
                        metric_meta=task_defs.metric_meta_map[prefix],
                        use_cuda=args.cuda,
                        with_label=False,
                        label_mapper=label_dict,
                        task_type=task_defs.task_type_map[prefix])
                score_file = os.path.join(
                    output_dir,
                    '{}_test_scores_{}.json'.format(dataset, epoch))
                results = {
                    'metrics': test_metrics,
                    'predictions': test_predictions,
                    'uids': test_ids,
                    'scores': scores
                }
                dump(score_file, results)
                if args.glue_format_on:
                    from experiments.glue.glue_utils import submit
                    official_score_file = os.path.join(
                        output_dir,
                        '{}_test_scores_{}.tsv'.format(dataset, epoch))
                    submit(official_score_file, results, label_dict)
                logger.info('[new test scores saved.]')
        scoreing_cnt = len(score_dict)
        if scoreing_cnt > 0:
            mean_value = np.mean([v for k, v in score_dict.items()])
            logger.warning(
                'Epoch {0} -- Dev {1} Tasks, Average Score : {2:.3f}'.format(
                    epoch + 1, scoring_cnt, mean_value))
            score_dict['avg'] = mean_value
            total_scores.append(score_dict)

        model_file = os.path.join(output_dir, 'model_{}.pt'.format(epoch))
        model.save(model_file)
    for i, total_score in enumerate(total_scores):
        logger.info(total_score)

    if args.tensorboard:
        tensorboard.close()
Beispiel #15
0
def main():
    parser = argparse.ArgumentParser()
    model_config(parser)
    set_config(parser)
    train_config(parser)
    args = parser.parse_args()
    encoder_type = args.encoder_type
    layer_indexes = [int(x) for x in args.layers.split(",")]
    set_environment(args.seed)
    # process data
    data, is_single_sentence = process_data(args)
    data_type = (DataFormat.PremiseOnly
                 if is_single_sentence else DataFormat.PremiseAndOneHypothesis)
    fout_temp = "{}.tmp".format(args.finput)
    dump_data(data, fout_temp)
    collater = Collater(is_train=False, encoder_type=encoder_type)
    dataset = SingleTaskDataset(
        fout_temp,
        False,
        maxlen=args.max_seq_length,
    )
    batcher = DataLoader(
        dataset,
        batch_size=args.batch_size,
        collate_fn=collater.collate_fn,
        pin_memory=args.cuda,
    )
    opt = vars(args)
    # load model
    if os.path.exists(args.checkpoint):
        state_dict = torch.load(args.checkpoint)
        config = state_dict["config"]
        config["dump_feature"] = True
        opt.update(config)
    else:
        logger.error("#" * 20)
        logger.error(
            "Could not find the init model!\n The parameters will be initialized randomly!"
        )
        logger.error("#" * 20)
        return
    num_all_batches = len(batcher)
    model = MTDNNModel(opt,
                       state_dict=state_dict,
                       num_train_step=num_all_batches)
    if args.cuda:
        model.cuda()

    features_dict = {}
    for batch_meta, batch_data in batcher:
        batch_meta, batch_data = Collater.patch_data(args.cuda, batch_meta,
                                                     batch_data)
        all_encoder_layers, _ = model.extract(batch_meta, batch_data)
        embeddings = [
            all_encoder_layers[idx].detach().cpu().numpy()
            for idx in layer_indexes
        ]

        uids = batch_meta["uids"]
        masks = batch_data[batch_meta["mask"]].detach().cpu().numpy().tolist()
        for idx, uid in enumerate(uids):
            slen = sum(masks[idx])
            features = {}
            for yidx, layer in enumerate(layer_indexes):
                features[layer] = str(embeddings[yidx][idx][:slen].tolist())
            features_dict[uid] = features

    # save features
    with open(args.foutput, "w", encoding="utf-8") as writer:
        for sample in data:
            uid = sample["uid"]
            feature = features_dict[uid]
            feature["uid"] = uid
            writer.write("{}\n".format(json.dumps(feature)))