Example #1
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
    parser.add_argument("--ernie_model", default=None, type=str, required=True,
                        help="Ernie pre-trained model")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The output directory where the model predictions and checkpoints will be written.")

    ## Other parameters
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_train",
                        default=False,
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        default=False,
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--do_lower_case",
                        default=False,
                        action='store_true',
                        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        #default=5e-2,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        default=False,
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument('--fp16',
                        default=False,
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
                        type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                             "0 (default value): dynamic loss scaling.\n"
                             "Positive power of 2: static loss scaling value.\n")
    parser.add_argument('--threshold', type=float, default=.3)
    ##########ADD##
    parser.add_argument("--K_V_dim",
                        type=int,
                        default=100,
                        help="Key and Value dim == KG representation dim")

    parser.add_argument("--Q_dim",
                        type=int,
                        default=768,
                        help="Query dim == Bert six output layer representation dim")
    parser.add_argument('--graphsage',
                        default=False,
                        action='store_true',
                        help="Whether to use Attention GraphSage instead of GAT")
    parser.add_argument('--self_att',
                        default=True,
                        action='store_true',
                        help="Whether to use GAT")
    parser.add_argument('--data_token',
                        type=str,
                        default='None',
                        help="Using token ids")
    ###############

    args = parser.parse_args()

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        print(n_gpu)
        print(device)
        #exit()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))

    args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    os.makedirs(args.output_dir, exist_ok=True)

    processor = TypingProcessor()

    #tokenizer_label = BertTokenizer_label.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case)
    #tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case)
    tokenizer_label = RobertaTokenizer_label.from_pretrained(args.ernie_model)
    tokenizer = RobertaTokenizer.from_pretrained(args.ernie_model)

    train_examples = None
    num_train_steps = None
    train_examples, label_list, d = processor.get_train_examples(args.data_dir)
    label_list = sorted(label_list)
    #class_weight = [min(d[x], 100) for x in label_list]
    #logger.info(class_weight)
    S = []
    for l in label_list:
        s = []
        for ll in label_list:
            if ll in l:
                s.append(1.)
            else:
                s.append(0.)
        S.append(s)
    num_train_steps = int(
            len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)

    # Prepare model
    model, _ = BertForEntityTyping.from_pretrained(args.ernie_model,
              cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),
              num_labels = len(label_list), args=args)
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_grad = ['bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent']
    param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)]
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]
    t_total = num_train_steps
    if args.local_rank != -1:
        t_total = t_total // torch.distributed.get_world_size()
    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=t_total)


    global_step = 0

    if args.do_train:
        train_features = convert_examples_to_features(
            train_examples, label_list, args.max_seq_length, tokenizer_label, tokenizer, args.threshold)
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
        all_input_ent = torch.tensor([f.input_ent for f in train_features], dtype=torch.long)
        all_ent_mask = torch.tensor([f.ent_mask for f in train_features], dtype=torch.long)
        all_labels = torch.tensor([f.labels for f in train_features], dtype=torch.float)
        train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_input_ent, all_ent_mask, all_labels)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)

        output_loss_file = os.path.join(args.output_dir, "loss")
        loss_fout = open(output_loss_file, 'w')
        model.train()
        #######
        eval_loss, eval_accuracy = 0, 0
        nb_eval_steps, nb_eval_examples = 0, 0
        pred = []
        true = []
        #######
        for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch))
                input_ids, input_mask, segment_ids, input_ent, ent_mask, labels = batch

                input_ent +=1
                k_1, v_1, k_2, v_2 = load_k_v_queryR_small(input_ent)

                #loss = model(input_ids, segment_ids, input_mask, input_ent.float(), ent_mask, labels.half(), k_1.half(), v_1.half(), k_2.half(), v_2.half())
                ###

                #######
                loss, logits = model(input_ids, segment_ids, input_mask, input_ent.float(), ent_mask, labels.half(), k_1.half(), v_1.half(), k_2.half(), v_2.half())
                #loss, logits = model(input_ids, segment_ids, input_mask, input_ent, ent_mask, labels, k_1.half(), v_1.half(), k_2.half(), v_2.half())
                tmp_eval_accuracy, tmp_pred, tmp_true = accuracy(logits, labels)
                pred.extend(tmp_pred)
                true.extend(tmp_true)

                eval_loss += loss.mean().item()
                eval_accuracy += tmp_eval_accuracy

                nb_eval_examples += input_ids.size(0)
                nb_eval_steps += 1
                #######

                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

                loss_fout.write("{}\n".format(loss.item()*args.gradient_accumulation_steps))
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    # modify learning rate with special warm up BERT uses
                    lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1
                    if global_step % 150 == 0 and global_step > 0:
                        model_to_save = model.module if hasattr(model, 'module') else model
                        output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(global_step))
                        torch.save(model_to_save.state_dict(), output_model_file)


            model_to_save = model.module if hasattr(model, 'module') else model
            output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(epoch))
            torch.save(model_to_save.state_dict(), output_model_file)

            ######################
            def f1(p, r):
                if r == 0.:
                    return 0.
                return 2 * p * r / float( p + r )
            def loose_macro(true, pred):
                num_entities = len(true)
                p = 0.
                r = 0.
                for true_labels, predicted_labels in zip(true, pred):
                    if len(predicted_labels) > 0:
                        p += len(set(predicted_labels).intersection(set(true_labels))) / float(len(predicted_labels))
                    if len(true_labels):
                        r += len(set(predicted_labels).intersection(set(true_labels))) / float(len(true_labels))
                precision = p / num_entities
                recall = r / num_entities
                return precision, recall, f1( precision, recall)
            def loose_micro(true, pred):
                num_predicted_labels = 0.
                num_true_labels = 0.
                num_correct_labels = 0.
                for true_labels, predicted_labels in zip(true, pred):
                    num_predicted_labels += len(predicted_labels)
                    num_true_labels += len(true_labels)
                    num_correct_labels += len(set(predicted_labels).intersection(set(true_labels)))
                if num_predicted_labels > 0:
                    precision = num_correct_labels / num_predicted_labels
                else:
                    precision = 0.
                recall = num_correct_labels / num_true_labels
                return precision, recall, f1( precision, recall)
            ######################
            #######
            eval_loss = eval_loss / nb_eval_steps
            eval_accuracy = eval_accuracy / nb_eval_examples
            print("============")
            print("loss:",eval_loss)
            print("acc:",eval_accuracy)
            print('macro:', loose_macro(true, pred))
            print('micro:', loose_micro(true, pred))
            print("============")
            #######


        '''
        ####################################################
        ####################################################
        ####################################################
        print("####################################################")
        print("####################################################")
        print("################Eval on Train data##################")
        print("####################################################")
        eval_loss, eval_accuracy = 0, 0
        nb_eval_steps, nb_eval_examples = 0, 0
        pred = []
        true = []
        #for epoch in trange(int(args.num_train_epochs), desc="Epoch"): #same eval values!
        for epoch in trange(int(2), desc="Epoch"): #same eval values!
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch))
                input_ids, input_mask, segment_ids, input_ent, ent_mask, labels = batch

                input_ent +=1
                k_1, v_1, k_2, v_2 = load_k_v_queryR_small(input_ent)

                #loss = model(input_ids, segment_ids, input_mask, input_ent.float(), ent_mask, labels.half(), k_1.half(), v_1.half(), k_2.half(), v_2.half())
                ###

                #######
                loss, logits = model(input_ids, segment_ids, input_mask, input_ent.float(), ent_mask, labels.half(), k_1.half(), v_1.half(), k_2.half(), v_2.half())
                tmp_eval_accuracy, tmp_pred, tmp_true = accuracy(logits, labels)
                pred.extend(tmp_pred)
                true.extend(tmp_true)

                eval_loss += loss.mean().item()
                eval_accuracy += tmp_eval_accuracy

                nb_eval_examples += input_ids.size(0)
                nb_eval_steps += 1
                #######

                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1


            eval_loss = eval_loss / nb_eval_steps
            eval_accuracy = eval_accuracy / nb_eval_examples
            print("============")
            print("loss:",eval_loss)
            print("acc:",eval_accuracy)
            print('macro:', loose_macro(true, pred))
            print('micro:', loose_micro(true, pred))
            print("============")
        ####################################################
        ####################################################
        ####################################################
        '''

    exit(0)
Example #2
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task."
    )
    parser.add_argument("--ernie_model",
                        default=None,
                        type=str,
                        required=True,
                        help="Ernie pre-trained model")
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model predictions and checkpoints will be written."
    )

    ## Other parameters
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--do_train",
                        default=False,
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        default=False,
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--do_lower_case",
        default=False,
        action='store_true',
        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=16,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        default=False,
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )
    parser.add_argument(
        '--fp16',
        default=False,
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument(
        '--loss_scale',
        type=float,
        default=0,
        help=
        "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
        "0 (default value): dynamic loss scaling.\n"
        "Positive power of 2: static loss scaling value.\n")
    parser.add_argument('--threshold', type=float, default=.3)
    parser.add_argument("--vec_file",
                        default=None,
                        type=str,
                        required=True,
                        help="File with embeddings")
    parser.add_argument("--qid_file",
                        default=None,
                        type=str,
                        required=True,
                        help="File with qid mapping")
    parser.add_argument("--use_lim_ents",
                        default=None,
                        type=str,
                        required=True,
                        help="Whether to use limited entities")

    args = parser.parse_args()

    processors = FewrelProcessor

    num_labels_task = 80

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info(
        "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    args.train_batch_size = int(args.train_batch_size /
                                args.gradient_accumulation_steps)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train:
        raise ValueError(
            "At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(args.output_dir) and os.listdir(
            args.output_dir) and args.do_train:
        raise ValueError(
            "Output directory ({}) already exists and is not empty.".format(
                args.output_dir))
    os.makedirs(args.output_dir, exist_ok=True)

    processor = processors()
    num_labels = num_labels_task
    label_list = None

    tokenizer = BertTokenizer.from_pretrained(args.ernie_model,
                                              do_lower_case=args.do_lower_case)

    train_examples = None
    num_train_steps = None
    train_examples, label_list = processor.get_train_examples(args.data_dir)
    num_train_steps = int(
        len(train_examples) / args.train_batch_size /
        args.gradient_accumulation_steps * args.num_train_epochs)

    # Prepare model
    model, _ = BertForSequenceClassification.from_pretrained(
        args.ernie_model,
        cache_dir=PYTORCH_PRETRAINED_BERT_CACHE /
        'distributed_{}'.format(args.local_rank),
        num_labels=num_labels)
    # if args.fp16:
    #     model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_grad = [
        'bert.encoder.layer.11.output.dense_ent',
        'bert.encoder.layer.11.output.LayerNorm_ent'
    ]
    param_optimizer = [(n, p) for n, p in param_optimizer
                       if not any(nd in n for nd in no_grad)]
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    t_total = num_train_steps
    if args.local_rank != -1:
        t_total = t_total // torch.distributed.get_world_size()
    if args.fp16:
        try:
            #from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
            from apex import amp
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False)
        if args.loss_scale == 0:
            model, optimizer = amp.initialize(model, optimizer, opt_level="O2")
            # optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            model, optimizer = amp.initialize(model,
                                              optimizer,
                                              opt_level="O2",
                                              loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=t_total)
    global_step = 0
    if args.do_train:
        train_features = convert_examples_to_features(
            train_examples, label_list, args.max_seq_length, tokenizer,
            args.threshold, args.qid_file)

        # check for limited ents
        lim_ents = []
        lim_check = (args.use_lim_ents == "y")
        if lim_check:
            lim_ents = lim_ent_map(0, "kg_embeddings/dbp_eid_2_wd_eid.txt")
            logger.info(
                "Limited entities flag is on. Count of unique entities considered: "
                + str(len(lim_ents)))

        vecs = []
        vecs.append([0] * 100)  # CLS
        lineindex = 1
        uid_map = {}
        logger.info("Reading embeddings file.")
        with open(args.vec_file, 'r') as fin:
            for line in fin:
                vec = line.strip().split('\t')
                # first element is unique id
                uniqid = int(vec[0])
                # map line index to unique id
                uid_map[uniqid] = lineindex
                # increment line index
                lineindex = lineindex + 1
                if (lim_check and (uniqid in lim_ents)) or not lim_check:
                    vec = [float(x) for x in vec[1:101]]
                else:
                    vec = vecs[0]
                vecs.append(vec)
        embed = torch.FloatTensor(vecs)
        embed = torch.nn.Embedding.from_pretrained(embed)
        #embed = torch.nn.Embedding(5041175, 100)

        logger.info("Shape of entity embedding: " + str(embed.weight.size()))
        del vecs

        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        all_input_ids = torch.tensor([f.input_ids for f in train_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features],
                                       dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in train_features],
                                     dtype=torch.long)
        all_ent = torch.tensor([f.input_ent for f in train_features],
                               dtype=torch.long)
        all_ent_masks = torch.tensor([f.ent_mask for f in train_features],
                                     dtype=torch.long)
        train_data = TensorDataset(all_input_ids, all_input_mask,
                                   all_segment_ids, all_ent, all_ent_masks,
                                   all_label_ids)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)

        output_loss_file = os.path.join(args.output_dir, "loss")
        loss_fout = open(output_loss_file, 'w')
        model.train()
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(
                    t.to(device) if i != 3 else t for i, t in enumerate(batch))
                input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids = batch
                input_ent = embed(input_ent + 1).to(device)  # -1 -> 0
                loss = model(input_ids, segment_ids, input_mask,
                             input_ent.half(), ent_mask, label_ids)
                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                if args.fp16:
                    try:
                        from apex import amp
                    except ImportError:
                        raise ImportError(
                            "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
                        )
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                else:
                    loss.backward()

                loss_fout.write("{}\n".format(loss.item()))
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    # modify learning rate with special warm up BERT uses
                    lr_this_step = args.learning_rate * warmup_linear(
                        global_step / t_total, args.warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1
            model_to_save = model.module if hasattr(model, 'module') else model
            output_model_file = os.path.join(
                args.output_dir, "pytorch_model.bin_{}".format(global_step))
            torch.save(model_to_save.state_dict(), output_model_file)

        # Save a trained model
        model_to_save = model.module if hasattr(
            model, 'module') else model  # Only save the model it-self
        output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
        torch.save(model_to_save.state_dict(), output_model_file)
Example #3
0
    def train_model(model, train_dataloader, validation_dataloader, epochs,
                    device, loss_fn, embed):

        # Prepare optimizer
        param_optimizer = list(model.named_parameters())
        no_grad = [
            'bert.encoder.layer.11.output.dense_ent',
            'bert.encoder.layer.11.output.LayerNorm_ent'
        ]
        param_optimizer = [(n, p) for n, p in param_optimizer
                           if not any(nd in n for nd in no_grad)]
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [{
            'params': [
                p for n, p in param_optimizer
                if not any(nd in n for nd in no_decay)
            ],
            'weight_decay':
            0.01
        }, {
            'params':
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            'weight_decay':
            0.0
        }]

        # Total number of training steps is number of batches * number of epochs.
        total_steps = len(train_dataloader) * epochs

        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=2e-5,
                             warmup=0.1,
                             t_total=total_steps)

        # Measure the total training time for the whole run.
        total_t0 = time.time()

        history = defaultdict(list)
        best_accuracy = 0

        for epoch in range(epochs):

            # ========================================
            #               Training
            # ========================================

            print('')
            print('======== Epoch {:} / {:} ========'.format(
                epoch + 1, epochs))
            # print(f'======== Epoch {epoch + 1} / {epochs} ========')
            print('Training...')

            # Measure how long the training epoch takes.
            t0 = time.time()

            ErnieModel.train_epoch(model, optimizer, train_dataloader, device,
                                   embed, total_steps)

            print('Epoch {:} took {:} minutes'.format(epoch + 1,
                                                      (time.time() - t0) / 60))

            # ========================================
            #               Validation
            # ========================================

            print('')
            print("Running Validation...")

            val_acc, val_loss = ErnieModel.eval_model(model,
                                                      validation_dataloader,
                                                      device, embed)
            print('Validation loss: {:}, accuracy: {:}'.format(
                val_loss, val_acc))
            print('')

            history['val_acc'].append(val_acc)
            history['val_loss'].append(val_loss)

            if val_acc > best_accuracy:
                torch.save(model.state_dict(), 'best_model_state.bin')
                best_accuracy = val_acc

        print('')
        print('Total Training took: {:} minutes'.format(
            (time.time() - total_t0) / 60))
        print('Best validation accuracy: {:}'.format(best_accuracy))
        return history
Example #4
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task."
    )
    parser.add_argument("--ernie_model",
                        default=None,
                        type=str,
                        required=True,
                        help="Ernie pre-trained model")
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model predictions and checkpoints will be written."
    )

    ## Other parameters
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--do_train",
                        default=False,
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        default=False,
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--do_lower_case",
        default=False,
        action='store_true',
        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        default=False,
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )
    parser.add_argument(
        '--fp16',
        default=False,
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument(
        '--loss_scale',
        type=float,
        default=0,
        help=
        "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
        "0 (default value): dynamic loss scaling.\n"
        "Positive power of 2: static loss scaling value.\n")
    parser.add_argument('--threshold', type=float, default=.1)

    args = parser.parse_args()

    processors = SemevalProcessor

    num_labels_task = 3

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info(
        "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    args.train_batch_size = int(args.train_batch_size /
                                args.gradient_accumulation_steps)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train and not args.do_eval:
        raise ValueError(
            "At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(args.output_dir) and os.listdir(
            args.output_dir) and args.do_train:
        raise ValueError(
            "Output directory ({}) already exists and is not empty.".format(
                args.output_dir))
    os.makedirs(args.output_dir, exist_ok=True)

    processor = processors()
    num_labels = num_labels_task
    label_list = None

    tokenizer = BertTokenizer.from_pretrained(args.ernie_model,
                                              do_lower_case=args.do_lower_case)

    train_examples = None
    num_train_steps = None
    train_examples, label_list = processor.get_train_examples(args.data_dir)
    num_train_steps = int(
        len(train_examples) / args.train_batch_size /
        args.gradient_accumulation_steps * args.num_train_epochs)

    # Prepare model
    model, _ = BertForSequenceClassification.from_pretrained(
        args.ernie_model,
        cache_dir=PYTORCH_PRETRAINED_BERT_CACHE /
        'distributed_{}'.format(args.local_rank),
        num_labels=num_labels)
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_grad = [
        'bert.encoder.layer.11.output.dense_ent',
        'bert.encoder.layer.11.output.LayerNorm_ent'
    ]
    param_optimizer = [(n, p) for n, p in param_optimizer
                       if not any(nd in n for nd in no_grad)]
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    t_total = num_train_steps
    if args.local_rank != -1:
        t_total = t_total // torch.distributed.get_world_size()
    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=t_total)
    global_step = 0
    if args.do_train:
        train_features = convert_examples_to_features(train_examples,
                                                      label_list,
                                                      args.max_seq_length,
                                                      tokenizer,
                                                      args.threshold)

        vecs = []
        vecs.append([0] * 100)
        logger.info("Loading entity embedding.")
        with open("kg_embed/entity2vec.vec", 'r') as fin:
            for line in fin:
                vec = line.strip().split('\t')
                vec = [float(x) for x in vec]
                vecs.append(vec)
        embed = torch.FloatTensor(vecs)
        embed = torch.nn.Embedding.from_pretrained(embed)
        #         embed = torch.nn.Embedding(5041175, 100)

        logger.info("Shape of entity embedding: " + str(embed.weight.size()))
        del vecs

        if args.do_eval:
            eval_examples = processor.get_dev_examples(args.data_dir)
            dev = convert_examples_to_features(eval_examples, label_list,
                                               args.max_seq_length, tokenizer,
                                               args.threshold)

            eval_features = dev

            logger.info("Eval  Num examples = %d", len(eval_examples))
            logger.info("Eval  Batch size = %d", args.train_batch_size)
            all_input_ids = torch.tensor([f.input_ids for f in eval_features],
                                         dtype=torch.long)
            all_input_mask = torch.tensor(
                [f.input_mask for f in eval_features], dtype=torch.long)
            all_segment_ids = torch.tensor(
                [f.segment_ids for f in eval_features], dtype=torch.long)
            all_label_ids = torch.tensor([f.label_id for f in eval_features],
                                         dtype=torch.long)
            all_ent = torch.tensor([f.input_ent for f in eval_features],
                                   dtype=torch.long)
            all_ent_masks = torch.tensor([f.ent_mask for f in eval_features],
                                         dtype=torch.long)
            eval_data = TensorDataset(all_input_ids, all_input_mask,
                                      all_segment_ids, all_ent, all_ent_masks,
                                      all_label_ids)
            # Run prediction for full data
            eval_sampler = SequentialSampler(eval_data)
            eval_dataloader = DataLoader(eval_data,
                                         sampler=eval_sampler,
                                         batch_size=args.train_batch_size)

        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        all_input_ids = torch.tensor([f.input_ids for f in train_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features],
                                       dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in train_features],
                                     dtype=torch.long)
        all_ent = torch.tensor([f.input_ent for f in train_features],
                               dtype=torch.long)
        all_ent_masks = torch.tensor([f.ent_mask for f in train_features],
                                     dtype=torch.long)
        train_data = TensorDataset(all_input_ids, all_input_mask,
                                   all_segment_ids, all_ent, all_ent_masks,
                                   all_label_ids)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)

        output_loss_file = os.path.join(args.output_dir, "loss")
        loss_fout = open(output_loss_file, 'w')
        model.train()
        max_acc = 0
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            model.train()
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(train_dataloader):
                batch = tuple(
                    t.to(device) if i != 3 else t for i, t in enumerate(batch))
                input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids = batch
                input_ent = embed(input_ent + 1).to(device)  # -1 -> 0
                loss = model(input_ids, segment_ids, input_mask,
                             input_ent.half(), ent_mask, label_ids)
                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

                loss_fout.write("{}\n".format(loss.item()))
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    # modify learning rate with special warm up BERT uses
                    lr_this_step = args.learning_rate * warmup_linear(
                        global_step / t_total, args.warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

            if args.do_eval:
                logger.info("***** Running evaluation *****")
                output_eval_file = os.path.join(
                    args.output_dir, "eval_results_{}.txt".format(global_step))
                model.eval()
                eval_loss, eval_accuracy = 0, 0
                nb_eval_steps, nb_eval_examples = 0, 0
                for input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids in eval_dataloader:
                    input_ent = embed(input_ent + 1)  # -1 -> 0
                    input_ids = input_ids.to(device)
                    input_mask = input_mask.to(device)
                    segment_ids = segment_ids.to(device)
                    input_ent = input_ent.to(device)
                    ent_mask = ent_mask.to(device)
                    label_ids = label_ids.to(device)

                    with torch.no_grad():
                        tmp_eval_loss = model(input_ids, segment_ids,
                                              input_mask, input_ent, ent_mask,
                                              label_ids)
                        logits = model(input_ids, segment_ids, input_mask,
                                       input_ent, ent_mask)

                    logits = logits.detach().cpu().numpy()
                    label_ids = label_ids.to('cpu').numpy()
                    tmp_eval_accuracy = accuracy(logits, label_ids)

                    eval_loss += tmp_eval_loss.mean().item()
                    eval_accuracy += tmp_eval_accuracy

                    nb_eval_examples += input_ids.size(0)
                    nb_eval_steps += 1

                eval_loss = eval_loss / nb_eval_steps
                eval_accuracy = eval_accuracy / nb_eval_examples
                max_acc = max(max_acc, eval_accuracy)

                result = {
                    'eval_loss': eval_loss,
                    'eval_accuracy': eval_accuracy,
                    'max_accuracy': max_acc
                }

                with open(output_eval_file, "w") as writer:
                    logger.info("***** Eval results *****")
                    for key in sorted(result.keys()):
                        logger.info("  %s = %s", key, str(result[key]))
                        writer.write("%s = %s\n" % (key, str(result[key])))
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
    parser.add_argument("--bert_model", default=None, type=str, required=True,
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The output directory where the model predictions and checkpoints will be written.")

    ## Other parameters
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_train",
                        default=False,
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        default=False,
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--do_lower_case",
                        default=False,
                        action='store_true',
                        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=16,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=1.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        default=False,
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument('--fp16',
                        default=False,
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
                        type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                             "0 (default value): dynamic loss scaling.\n"
                             "Positive power of 2: static loss scaling value.\n")
    parser.add_argument("--vec_file",
                        default=None,
                        type=str,
                        required=True,
                        help="File with embeddings")
    parser.add_argument("--use_lim_ents",
                        default=None,
                        type=str,
                        required=True,
                        help="Whether to use limited entities")

    args = parser.parse_args()
    master_ip = os.environ['MASTER_ADDR']
    master_port = os.environ['MASTER_PORT']
    world_size = os.environ['WORLD_SIZE']
    rank = os.environ['RANK']
    logger.info("Master node's IP Address: {}, port: {}, world_size: {}, rank: {}".format(master_ip, master_port, world_size, rank))
    logger.info ("Local rank received by launch  utility: {}".format(args.local_rank))
    logger.info("Process is being blocked until all nodes are ready.")
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info("All nodes ready, unblocking process.\n\n")
    logger.info("Global rank: {}, device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        torch.distributed.get_rank(), device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))

    args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    os.makedirs(args.output_dir, exist_ok=True)

    task_name = args.task_name.lower()

    #check for limited ents
    lim_ents = []
    lim_check = (args.use_lim_ents == "y")
    if lim_check:
        lim_ents = lim_ent_map(0,"kg_embeddings/dbp_eid_2_wd_eid.txt")
        logger.info("Limited entities flag is on. Count of unique entities considered: "+str(len(lim_ents)))
    
    vecs = []
    vecs.append([0]*100) # CLS
    lineindex = 1
    uid_map = {}
    logger.info("Reading embeddings file.")
    with open(args.vec_file, 'r') as fin:
        for line in fin:
            vec = line.strip().split('\t')
            #first element is unique id
            uniqid = int(vec[0])
            #map line index to unique id
            uid_map[uniqid] = lineindex
            #increment line index
            lineindex = lineindex + 1
            if (lim_check and (uniqid in lim_ents)) or not lim_check:
                vec = [float(x) for x in vec[1:101]]
            else:
                vec = vecs[0]
            vecs.append(vec)
    embed = torch.FloatTensor(vecs)
    embed = torch.nn.Embedding.from_pretrained(embed)
    #embed = torch.nn.Embedding(5041175, 100)

    logger.info("Shape of entity embedding: "+str(embed.weight.size()))
    del vecs

    train_data = None
    num_train_steps = None
    if args.do_train:
        # TODO
        import indexed_dataset
        from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler,BatchSampler
        import iterators
        #train_data = indexed_dataset.IndexedCachedDataset(args.data_dir)
        train_data = indexed_dataset.IndexedDataset(args.data_dir, fix_lua_indexing=True)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_sampler = BatchSampler(train_sampler, args.train_batch_size, True)
        def collate_fn(x):
            #logger.info("Data for collate\n" + str(x))
            x = torch.LongTensor([xx for xx in x])

            entity_idx = x[:, 4*args.max_seq_length:5*args.max_seq_length]
            #logger.info("Entity ids:\n" + str(x))
            #fetch the line index for the unique id
            entarr = []
            global keys_found
            global keys_missed
            for elarr in entity_idx:
                temp_arr = []
                for uniqid in elarr:
                    lval = uniqid.item()
                    if lval in uid_map:
                        temp_arr.append(uid_map[lval])
                        keys_found = keys_found + 1
                    else:
                        temp_arr.append(0)
                        keys_missed = keys_missed + 1
                entarr.append(temp_arr)
            entarr = torch.LongTensor(entarr)
            #logger.info("Entity array for current line: "+str(entarr.numpy()))
            # Build candidate
            uniq_idx = np.unique(entarr.numpy())
            ent_candidate = embed(torch.LongTensor(uniq_idx))
            ent_candidate = ent_candidate.repeat([n_gpu, 1])
            # build entity labels
            d = {}
            dd = []
            for i, idx in enumerate(uniq_idx):
                d[idx] = i
                dd.append(idx)
            ent_size = len(uniq_idx)-1
            def map(x):
                if x == -1 or x == 0:
                    return 0
                else:
                    rnd = random.uniform(0, 1)
                    if rnd < 0.05:
                        return dd[random.randint(1, ent_size)]
                    elif rnd < 0.2:
                        return 0
                    else:
                        return x
            ent_labels = entarr.clone()
            d[-1] = -1
            ent_labels = ent_labels.apply_(lambda x: d[x])

            entarr.apply_(map)
            ent_emb = embed(entarr)
            mask = entarr.clone()
            mask.apply_(lambda x: 0 if (x == -1 or x ==0) else 1)
            mask[:,0] = 1

            return x[:,:args.max_seq_length], x[:,args.max_seq_length:2*args.max_seq_length], x[:,2*args.max_seq_length:3*args.max_seq_length], x[:,3*args.max_seq_length:4*args.max_seq_length], ent_emb, mask, x[:,6*args.max_seq_length:], ent_candidate, ent_labels
        train_iterator = iterators.EpochBatchIterator(train_data, collate_fn, train_sampler)
        num_train_steps = int(
            len(train_data) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)

    # Prepare model
    model, missing_keys = BertForPreTraining.from_pretrained(args.bert_model,
              cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank))

    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_linear = ['layer.2.output.dense_ent', 'layer.2.intermediate.dense_1', 'bert.encoder.layer.2.intermediate.dense_1_ent', 'layer.2.output.LayerNorm_ent']
    no_linear = [x.replace('2', '11') for x in no_linear]
    param_optimizer = [(n, p) for n, p in param_optimizer if not any(nl in n for nl in no_linear)]
    #param_optimizer = [(n, p) for n, p in param_optimizer if not any(nl in n for nl in missing_keys)]
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight', 'LayerNorm_ent.bias', 'LayerNorm_ent.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]
    t_total = num_train_steps
    if args.local_rank != -1:
        t_total = t_total // torch.distributed.get_world_size()
    if args.fp16:
        try:
            from apex.contrib.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
            from apex import amp
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False)
        if args.loss_scale == 0:
            model, optimizer = amp.initialize(model, optimizer, opt_level="O2")
            # optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale)
        #logger.info(dir(optimizer))
        #op_path = os.path.join(args.bert_model, "pytorch_op.bin")
        #optimizer.load_state_dict(torch.load(op_path))

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=t_total)

    global_step = 0
    if args.do_train:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_data))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        model.train()
        import datetime
        fout = open(os.path.join(args.output_dir, "loss.{}".format(datetime.datetime.now())), 'w')
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(tqdm(train_iterator.next_epoch_itr(), desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)

                input_ids, input_mask, segment_ids, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels = batch
                loss, original_loss = model(input_ids, segment_ids, input_mask, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels)


                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
                    original_loss = original_loss.mean()
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                if args.fp16:
                    try:
                        from apex import amp
                    except ImportError:
                        raise ImportError(
                            "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                else:
                    loss.backward()

                fout.write("{} {}\n".format(loss.item()*args.gradient_accumulation_steps, original_loss.item()))
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    # modify learning rate with special warm up BERT uses
                    lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                        optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1
                    #if global_step % 1000 == 0:
                    #    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
                    #    output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(global_step))
                    #    torch.save(model_to_save.state_dict(), output_model_file)
        fout.close()

    logger.info("Saving data")
    # Save a trained model
    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
    output_model_file = os.path.join(args.output_dir, "pytorch_model_"+str(torch.distributed.get_rank())+str(args.local_rank)+".bin")
    torch.save(model_to_save.state_dict(), output_model_file)
    logger.info("Training complete.\n Total number of entity matches in embeddings: ", keys_found, "\n Missed matches: ", keys_missed)
Example #6
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
    parser.add_argument("--bert_model", default=None, type=str, required=True,
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The output directory where the model predictions and checkpoints will be written.")

    ## Other parameters
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_train",
                        default=False,
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        default=False,
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--do_lower_case",
                        default=False,
                        action='store_true',
                        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        #default=3.0,
                        default=1.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        default=False,
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument('--fp16',
                        default=False,
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
                        type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                             "0 (default value): dynamic loss scaling.\n"
                             "Positive power of 2: static loss scaling value.\n")
    ##########ADD##
    parser.add_argument("--K_V_dim",
                        type=int,
                        default=100,
                        help="Key and Value dim == KG representation dim")

    parser.add_argument("--Q_dim",
                        type=int,
                        default=768,
                        help="Query dim == Bert six output layer representation dim")
    parser.add_argument('--graphsage',
                        default=False,
                        action='store_true',
                        help="Whether to use Attention GraphSage instead of GAT")
    parser.add_argument('--self_att',
                        default=True,
                        action='store_true',
                        help="Whether to use GAT")
    ###############

    args = parser.parse_args()


    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))

    args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    os.makedirs(args.output_dir, exist_ok=True)

    task_name = args.task_name.lower()


    train_data = None
    num_train_steps = None
    if args.do_train:
        # TODO
        import indexed_dataset
        from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, BatchSampler
        import iterators
        #train_data = indexed_dataset.IndexedCachedDataset(args.data_dir)
        train_data = indexed_dataset.IndexedDataset(args.data_dir, fix_lua_indexing=True)
        #print(train_data)
        #print("-----------")
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_sampler = BatchSampler(train_sampler, args.train_batch_size, True)
        def collate_fn(x):
            x = torch.LongTensor([xx for xx in x])
            #x = torch.LongTensor([xx%9 for xx in x]) ##

            ###
            #entity_idx = x[:, 4*args.max_seq_length:5*args.max_seq_length]
            ###
            ###
            entity_idx = x[:, 3*args.max_seq_length:4*args.max_seq_length]
            ###
            #entity_idx = x[:, 4*args.max_seq_length]
            #print(entity_idx)
            #print(entity_idx.shape)

            # Build candidate
            #
            #print(entity_idx)
            uniq_idx = np.unique(entity_idx.numpy())
            #print(uniq_idx)
            #print(uniq_idx.shape)
            #exit()
            #ent_candidate = embed(torch.LongTensor(uniq_idx+1))
            #print(ent_candidate)
            #print(ent_candidate.shape)
            ent_candidate = torch.LongTensor(uniq_idx+1) #del
            #print(ent_candidate)
            #print(ent_candidate.shape)
            #print(ent_candidate)
            #print(ent_candidate.shape)
            #print(ent_candidate)
            #print(ent_candidate.shape)
            #print("================")
            ent_candidate = ent_candidate.repeat([n_gpu, 1]) #batch
            #print(ent_candidate)
            #print(ent_candidate.shape)
            #exit()
            #print(ent_candidate)
            #print(ent_candidate.shape)
            #exit()
            #ent_candidate = embed(torch.LongTensor(uniq_idx+1)) #del
            #print(ent_candidate)
            #print(ent_candidate.shape)
            #print(ent_candidate.size())
            #exit()
            #
            #ent_candidate = torch.LongTensor(ent_candidate+1) #single
            #! --> return uniq_idx =>all entity in batch

            # build entity labels
            d = {}
            dd = []
            for i, idx in enumerate(uniq_idx):
                d[idx] = i
                dd.append(idx)
            ###
            '''
            ent_size = len(uniq_idx)-1
            def map(x):
                if x == -1:
                    return -1
                else:
                    rnd = random.uniform(0, 1)
                    if rnd < 0.05:
                        return dd[random.randint(1, ent_size)]
                    elif rnd < 0.2:
                        return -1
                    else:
                        return x
            '''
            ###
            ent_labels = entity_idx.clone()
            d[-1] = -1
            ent_labels = ent_labels.apply_(lambda x: d[x])

            ###
            '''
            entity_idx.apply_(map)

            #ent_emb = embed(entity_idx+1)
            ent_emb = entity_idx+1 ##
            #! --> return entity+1 => input_ent

            mask = entity_idx.clone()
            mask.apply_(lambda x: 0 if x == -1 else 1)
            mask[:,0] = 1
            '''
            ###

            ###
            # entity_idx.apply_(map)
            #mask = entity_idx.clone()
            #mask.apply_(lambda x: 0 if x == -1 else 1)
            mask = x[:, 4*args.max_seq_length:5*args.max_seq_length]
            mask[:,0] = 1

            entity_idx = entity_idx * mask
            ###
            entity_idx[entity_idx == 0] = -1
            ###
            ent_emb = entity_idx+1
            ###

            ###
            #return x[:,:args.max_seq_length], x[:,args.max_seq_length:2*args.max_seq_length], x[:,2*args.max_seq_length:3*args.max_seq_length], x[:,3*args.max_seq_length:4*args.max_seq_length], ent_emb, mask, x[:,6*args.max_seq_length:], ent_candidate, ent_labels
            ###
            ###
            return x[:,:args.max_seq_length], x[:,args.max_seq_length:2*args.max_seq_length], x[:,2*args.max_seq_length:3*args.max_seq_length], ent_emb, mask, x[:,5*args.max_seq_length:], ent_candidate, ent_labels
            ###

        train_iterator = iterators.EpochBatchIterator(train_data, collate_fn, train_sampler)
        num_train_steps = int(
            len(train_data) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)

    # Prepare model
    #model, missing_keys = BertForPreTraining.from_pretrained(args.bert_model, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank))
    model, missing_keys = BertForPreTraining.from_pretrained(args.bert_model, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank), args=args)
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_linear = ['layer.2.output.dense_ent', 'layer.2.intermediate.dense_1', 'bert.encoder.layer.2.intermediate.dense_1_ent', 'layer.2.output.LayerNorm_ent']
    no_linear = [x.replace('2', '11') for x in no_linear]
    param_optimizer = [(n, p) for n, p in param_optimizer if not any(nl in n for nl in no_linear)]
    #param_optimizer = [(n, p) for n, p in param_optimizer if not any(nl in n for nl in missing_keys)]
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight', 'LayerNorm_ent.bias', 'LayerNorm_ent.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]
    t_total = num_train_steps
    if args.local_rank != -1:
        t_total = t_total // torch.distributed.get_world_size()
    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
        #logger.info(dir(optimizer))
        #op_path = os.path.join(args.bert_model, "pytorch_op.bin")
        #optimizer.load_state_dict(torch.load(op_path))

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=t_total)

    global_step = 0
    if args.do_train:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_data))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        model.train()
        import datetime
        fout = open(os.path.join(args.output_dir, "loss.{}".format(datetime.datetime.now())), 'w')
        more_than_one_2 = 0
        less_than_one_2 = 0
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0

            ###
            '''
            for step, batch in enumerate(tqdm(train_iterator.next_epoch_itr(), desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)

                input_ids, input_mask, segment_ids, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels = batch
            '''
            ###

            ###
            if args.local_rank == 0 or args.local_rank == -1:
                iters = tqdm(train_iterator.next_epoch_itr(), desc="Iteration")
            else:
                iters = train_iterator.next_epoch_itr()

            for step, batch in enumerate(iters):
                batch = tuple(t.to(device) for t in batch)

                input_ids, input_mask, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels = batch
                ###
                ###
                #print(len(input_ids[input_ids==2]))
                if len(input_ids[input_ids==2]) != args.train_batch_size:
                    for i_th_1, input_id in enumerate(input_ids):
                        print(input_id[input_id==2])
                        print(len(input_id[input_id==2]))
                        if len(input_id[input_id==2]) > 1:
                            for i_th_2 ,id in enumerate(input_id):
                                if id == 2:
                                    print("Befor:",input_id)
                                    input_ids[i_th_1][i_th_2] = 0
                                    more_than_one_2 += 1
                                    print("more_than_one_2:",more_than_one_2)
                                    print("After:",input_id)
                                    if len(input_id[input_id==2] == 1):
                                        break
                        elif len(input_id[input_id==2]) < 1:
                            print("Error!! Have no id=2 </s>")
                            less_than_one_2 += 1
                            print("less_than_one_2:",less_than_one_2)
                            print(input_id)
                            input_ids[i_th_1][-1] = 2
                        else:
                            print("ids_2 == 1")
                ###
                ###

                #start_time_1 = time.time()
                k_1, v_1, k_2, v_2, k_cand_1, v_cand_1, k_cand_2, v_cand_2, cand_pos_tensor = load_k_v_queryR_small(input_ent,ent_candidate)
                #k, v = load_k_v_queryR(input_ent,device)
                #input_ent_neighbor_emb, input_ent_r_emb, input_ent_outORin_emb = load_k_v_queryR(input_ent)
                #end_time_1 = time.time()
                #print("load_k_v_queryR:{}".format(end_time_1-start_time_1))
                #print(ent_candidate)
                #print(ent_candidate.shape)
                #exit()
                #k_cand, v_cand = load_k_v_queryR_small(ent_candidate,"candidate")
                #k_cand, v_cand = load_k_v_queryR(ent_candidate,device)
                #input_ent_neighbor_emb_c, input_ent_r_emb_c, input_ent_outORin_emb_c = load_k_v_queryR(candidate)
                #end_time_2 = time.time()
                #print("load_cand:{}".format(end_time_2-end_time_1))

                #k, v = load_batch_k_v_queryE(input_ent,500)
                #k_cand, v_cand = load_batch_k_v_queryE(ent_candidate,500)
                #k, v = load_batch_k_v_queryR(input_ent,300)
                #k_cand, v_cand = load_batch_k_v_queryR(ent_candidate,300)

                if args.fp16:
                    #loss, original_loss = model(input_ids, segment_ids, input_mask, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels, k_1.half(), v_1.half(), k_2.half(), v_2.half(),  k_cand_1.half(), v_cand_1.half(), k_cand_2.half(), v_cand_2.half(), cand_pos_tensor)
                    loss, original_loss = model(input_ids, None, input_mask, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels, k_1.half(), v_1.half(), k_2.half(), v_2.half(),  k_cand_1.half(), v_cand_1.half(), k_cand_2.half(), v_cand_2.half(), cand_pos_tensor)
                else:
                    #loss, original_loss = model(input_ids, segment_ids, input_mask, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels, k_1, v_1, k_2, v_2, k_cand_1, v_cand_1, k_cand_2, v_cand_2, cand_pos_tensor)
                    loss, original_loss = model(input_ids, None, input_mask, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels, k_1, v_1, k_2, v_2, k_cand_1, v_cand_1, k_cand_2, v_cand_2, cand_pos_tensor)


                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
                    original_loss = original_loss.mean()
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

                end_time_4 = time.time()
                #print("bp time:{}".format(end_time_4))
                #print("=====================================")

                fout.write("{} {}\n".format(loss.item()*args.gradient_accumulation_steps, original_loss.item()))
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    # modify learning rate with special warm up BERT uses
                    lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

                    if global_step % 100000 == 0:
                        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
                        output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(global_step))
                        torch.save(model_to_save.state_dict(), output_model_file)

        fout.close()

    # Save a trained model
    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
    output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
    torch.save(model_to_save.state_dict(), output_model_file)
Example #7
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
    parser.add_argument("--bert_model", default=None, type=str, required=True,
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The output directory where the model predictions and checkpoints will be written.")

    ## Other parameters
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_train",
                        default=False,
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        default=False,
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--do_lower_case",
                        default=False,
                        action='store_true',
                        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")

    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")

    parser.add_argument('--loss_scale',
                        type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                             "0 (default value): dynamic loss scaling.\n"
                             "Positive power of 2: static loss scaling value.\n")

    args = parser.parse_args()
    args.local_rank = -1
    device = torch.device("cpu")
    n_gpu = 0
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), 'false'))

    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))

    args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    os.makedirs(args.output_dir, exist_ok=True)

    task_name = args.task_name.lower()
    
    vecs = []
    vecs.append([0]*100) # CLS
    with open("kg_embed/entity2vec.vec", 'r') as fin:
        for line in fin:
            vec = line.strip().split('\t')
            vec = [float(x) for x in vec]
            vecs.append(vec)
    embed = torch.FloatTensor(vecs)
    embed = torch.nn.Embedding.from_pretrained(embed)
    #embed = torch.nn.Embedding(5041175, 100)

    logger.info("Shape of entity embedding: "+str(embed.weight.size()))
    del vecs

    train_data = None
    num_train_steps = None
    if args.do_train:
        import indexed_dataset
        from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler,BatchSampler
        import iterators
        #train_data = indexed_dataset.IndexedCachedDataset(args.data_dir)
        train_data = indexed_dataset.IndexedDataset(args.data_dir, fix_lua_indexing=True)

        train_sampler = RandomSampler(train_data)
        train_sampler = BatchSampler(train_sampler, args.train_batch_size, True)
        def collate_fn(x):
            x = torch.LongTensor([xx for xx in x])

            entity_idx = x[:, 4*args.max_seq_length:5*args.max_seq_length]
            # Build candidate
            uniq_idx = np.unique(entity_idx.numpy())
            ent_candidate = embed(torch.LongTensor(uniq_idx+1))
            ent_candidate = ent_candidate.repeat([n_gpu, 1])
            # build entity labels
            d = {}
            dd = []
            for i, idx in enumerate(uniq_idx):
                d[idx] = i
                dd.append(idx)
            ent_size = len(uniq_idx)-1
            def map(x):
                if x == -1:
                    return -1
                else:
                    rnd = random.uniform(0, 1)
                    if rnd < 0.05:
                        return dd[random.randint(1, ent_size)]
                    elif rnd < 0.2:
                        return -1
                    else:
                        return x
            ent_labels = entity_idx.clone()
            d[-1] = -1
            ent_labels = ent_labels.apply_(lambda x: d[x])

            entity_idx.apply_(map)
            ent_emb = embed(entity_idx+1)
            mask = entity_idx.clone()
            mask.apply_(lambda x: 0 if x == -1 else 1)
            mask[:,0] = 1

            return x[:,:args.max_seq_length], x[:,args.max_seq_length:2*args.max_seq_length], x[:,2*args.max_seq_length:3*args.max_seq_length], x[:,3*args.max_seq_length:4*args.max_seq_length], ent_emb, mask, x[:,6*args.max_seq_length:], ent_candidate, ent_labels
        train_iterator = iterators.EpochBatchIterator(train_data, collate_fn, train_sampler)
        num_train_steps = int(
            len(train_data) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)

    # Prepare model
    model, missing_keys = BertForPreTraining.from_pretrained(args.bert_model,
              cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(-1))

    model.to(device)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_linear = ['layer.2.output.dense_ent', 'layer.2.intermediate.dense_1', 'bert.encoder.layer.2.intermediate.dense_1_ent', 'layer.2.output.LayerNorm_ent']
    no_linear = [x.replace('2', '11') for x in no_linear]
    param_optimizer = [(n, p) for n, p in param_optimizer if not any(nl in n for nl in no_linear)]
    #param_optimizer = [(n, p) for n, p in param_optimizer if not any(nl in n for nl in missing_keys)]
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight', 'LayerNorm_ent.bias', 'LayerNorm_ent.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]
    t_total = num_train_steps
    optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=t_total)

    global_step = 0
    if args.do_train:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_data))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        model.train()
        import datetime
        fout = open(os.path.join(args.output_dir, "loss.{}".format(datetime.datetime.now())), 'w')
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(tqdm(train_iterator.next_epoch_itr(), desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)

                input_ids, input_mask, segment_ids, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels = batch

                loss, original_loss = model(input_ids, segment_ids, input_mask, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels)

                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps


                loss.backward()

                fout.write("{} {}\n".format(loss.item()*args.gradient_accumulation_steps, original_loss.item()))
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    # modify learning rate with special warm up BERT uses
                    lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1
        fout.close()

    # Save a trained model
    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
    output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
    torch.save(model_to_save.state_dict(), output_model_file)
Example #8
0
def train(data_obj, dname, args, embed, model):

    data_path = args.data_dir + dname + '_mention_rank'
    local_rep_path = args.local_rep_dir + dname + '_local_rep_mention_rank.npy'
    local_fea_path = args.local_rep_dir + dname + '_local_fea_mention_rank.npy'
    group_path = args.group_path

    mentions, entities, local_feas, ment_names, ment_sents, ment_offsets, ent_ids, mtypes, etypes, pems, labels = \
        data_obj.process_global_data(dname, data_path, local_rep_path, group_path, local_fea_path, args.seq_len, args.candidate_entity_num)

    mention_seq_np, entity_seq_np, local_fea_np, entid_seq_np, pem_seq_np, mtype_seq_np, etype_seq_np, label_seq_np = \
        data_obj.get_local_feature_input(mentions, entities, local_feas, ent_ids, mtypes, etypes, pems, labels, args.seq_len, args.candidate_entity_num)

    seq_tokens_np, seq_tokens_mask_np, seq_tokens_segment_np, seq_ents_np, seq_ents_mask_np, seq_ents_index_np, seq_label_np = \
        data_obj.get_global_feature_input(ment_names, ment_sents, ment_offsets, ent_ids, labels, args.seq_len, args.candidate_entity_num)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())

    no_grad = [
        'bert.encoder.layer.11.output.dense_ent',
        'bert.encoder.layer.11.output.LayerNorm_ent'
    ]
    param_optimizer = [(n, p) for n, p in param_optimizer
                       if not any(nd in n for nd in no_grad)]
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    num_train_steps = int(
        len(seq_tokens_np) / args.train_batch_size /
        args.gradient_accumulation_steps * args.num_train_epochs)
    t_total = num_train_steps
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=args.learning_rate,
                         warmup=args.warmup_proportion,
                         t_total=t_total)

    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(seq_tokens_np))
    logger.info("  Batch size = %d", args.train_batch_size)
    logger.info("  Num steps = %d", num_train_steps)

    all_seq_input_id = torch.tensor(seq_tokens_np,
                                    dtype=torch.long)  # (num_example, 256)
    all_seq_input_mask = torch.tensor(seq_tokens_mask_np,
                                      dtype=torch.long)  # (num_example, 256)
    all_seq_segment_id = torch.tensor(seq_tokens_segment_np,
                                      dtype=torch.long)  # (num_example, 256)
    all_seq_input_ent = torch.tensor(seq_ents_np,
                                     dtype=torch.long)  # (num_example, 256)
    all_seq_ent_mask = torch.tensor(seq_ents_mask_np,
                                    dtype=torch.long)  # (num_example, 256)

    all_seq_label = torch.tensor(
        seq_label_np, dtype=torch.long)  # (num_example, 3) # 用于hingeloss
    # all_seq_label = torch.tensor(label_seq_np, dtype=torch.long)     # (num_example, 3, 6) #用于BCEloss

    all_seq_mention_rep = torch.tensor(
        mention_seq_np, dtype=torch.float)  # (num_example, 3, 768)
    all_seq_entity_rep = torch.tensor(
        entity_seq_np, dtype=torch.float)  # (num_example, 3, 6, 768)
    all_seq_entid = torch.tensor(
        entid_seq_np, dtype=torch.long)  #(num_example, 3, 6)  候选实体的eid
    all_seq_ent_index = torch.tensor(
        seq_ents_index_np,
        dtype=torch.long)  # (num_example, 3) eg:[[1,81,141],[],]

    all_seq_pem = torch.tensor(pem_seq_np,
                               dtype=torch.float)  # (num_example, 3, 6)
    all_seq_mtype = torch.tensor(mtype_seq_np,
                                 dtype=torch.float)  #(num_example, 3, 6, 4)
    all_seq_etype = torch.tensor(etype_seq_np,
                                 dtype=torch.float)  # (num_example, 3, 6, 4)
    all_seq_local_fea = torch.tensor(local_fea_np, dtype=torch.float)

    train_data = TensorDataset(all_seq_input_id, all_seq_input_mask, all_seq_segment_id, all_seq_input_ent, \
        all_seq_ent_mask, all_seq_ent_index, all_seq_label, \
        all_seq_mention_rep, all_seq_entity_rep, all_seq_entid, all_seq_pem, all_seq_mtype, all_seq_etype, all_seq_local_fea)
    train_sampler = RandomSampler(train_data)
    train_dataloader = DataLoader(train_data,
                                  sampler=train_sampler,
                                  batch_size=args.train_batch_size)

    output_loss_file = os.path.join(args.output_dir, "loss")
    loss_fout = open(output_loss_file, 'w')

    output_f1_file = os.path.join(args.output_dir, "result_f1")
    f1_fout = open(output_f1_file, 'w')
    model.train()

    global_step = 0
    best_f1 = -1
    not_better_count = 0
    for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
        tr_loss, nb_tr_examples, nb_tr_steps = 0, 0, 0
        for batch in tqdm(train_dataloader, desc="Iteration"):
            batch = tuple(
                t.to(device) if i != 3 else t for i, t in enumerate(batch))
            seq_input_id, seq_input_mask, seq_segment_id, seq_input_ent, \
                seq_ent_mask, seq_ent_index, seq_label, \
                    seq_mention_rep, seq_entity_rep, seq_entid, \
                        seq_pem, seq_mtype, seq_etype, seq_local_fea = batch
            seq_input_ent_embed = embed(seq_input_ent + 1).to(device)

            # 加一层seq循环
            # 采样一个周期
            current_input_id_batch = seq_input_id  # shape(batch, ctx_len)
            current_input_mask_batch = seq_input_mask  # shape(b, c)
            current_segment_id_batch = seq_segment_id  # shape(b, c)
            current_input_ent_embed_batch = seq_input_ent_embed  # shape(b, c, dim)
            current_input_ent_batch = seq_input_ent  # shape(b, c)
            current_ent_mask_batch = seq_ent_mask  # shape(b, c)

            for mention_index in range(args.seq_len):
                current_label_batch = seq_label[:, mention_index]  # shape(b,)
                # current_label_batch = seq_label[:, mention_index, :]               # shape(b, 6)
                current_mention_rep_batch = seq_mention_rep[:,
                                                            mention_index, :]  # shape(b, 768)
                current_entity_rep_batch = seq_entity_rep[:,
                                                          mention_index, :, :]  # shape(b, 6, 768)

                current_pem_batch = seq_pem[:, mention_index, :]  # shape(b, 6)
                current_mtype_batch = seq_mtype[:,
                                                mention_index, :, :]  # shape(b, 6, 4)
                current_etype_batch = seq_etype[:,
                                                mention_index, :, :]  # shape(b, 6, 4)
                current_local_fea_batch = seq_local_fea[:, mention_index, :]

                current_entid_batch = seq_entid[:,
                                                mention_index, :]  # shape(b, 6)
                current_ent_index_batch = seq_ent_index[:,
                                                        mention_index]  # shape(b, )
                current_entid_embed_batch = embed(
                    current_entid_batch.cpu() + 1).to(
                        device)  # # shape(b, 6, dim)

                # 训练模型
                loss, scores = \
                    model(current_input_id_batch, current_segment_id_batch, current_input_mask_batch,\
                         current_input_ent_embed_batch, current_ent_mask_batch, current_entid_embed_batch,\
                         current_label_batch, current_mention_rep_batch, current_entity_rep_batch, \
                         current_pem_batch, current_mtype_batch, current_etype_batch, current_local_fea_batch)
                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                # 根据模型的score值,选择预测的实体,修改current_input_ent 和 current_ent_mask
                current_batch_size = current_input_id_batch.size(0)
                pred_ids = torch.argmax(
                    scores, dim=1)  # shape(b)    scores shape(b, 6)
                pred_ids = pred_ids.reshape(current_batch_size,
                                            1)  # shape(b, 1)

                pred_entid = torch.gather(current_entid_batch, 1,
                                          pred_ids)  # shape(b, 1)
                pred_entmask = torch.ones_like(pred_entid)  # shape(b, 1)

                alter_input_ent_batch = current_input_ent_batch.scatter(1, current_ent_index_batch.reshape(current_batch_size,1).cpu(), \
                    pred_entid.cpu())
                current_input_ent_embed_batch = embed(alter_input_ent_batch +
                                                      1).to(device)
                current_ent_mask_batch.scatter_(1, current_ent_index_batch.reshape(current_batch_size,1), \
                    pred_entmask)

                loss.backward()
                loss_fout.write("{}\n".format(
                    loss.item() * args.gradient_accumulation_steps))

                tr_loss += loss.item()
                nb_tr_examples += current_input_id_batch.size(0)
                nb_tr_steps += 1

                if (nb_tr_steps + 1) % args.gradient_accumulation_steps == 0:
                    # modify learning rate with special warm up BERT uses
                    lr_this_step = args.learning_rate * warmup_linear(
                        global_step / t_total, args.warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

                if global_step % 100 == 0:
                    print('global_step: ', global_step, 'global_step loss: ',
                          tr_loss / nb_tr_steps)
                    dev_f1 = 0
                    dname_list = [
                        'aida-A', 'aida-B', 'msnbc', 'aquaint', 'ace2004',
                        'clueweb', 'wikipedia'
                    ]

                    for di, dname in enumerate(dname_list):
                        # test model
                        f1 = predict(data_obj, dname, args, embed, model)
                        print(dname, '\033[92m' + 'micro F1: ' + str(f1) +
                              '\033[0m')  # 显色
                        f1_fout.write("{}, f1: {}, step: {}\n".format(
                            dname, f1, global_step))

                        if dname == 'aida-A':
                            dev_f1 = f1
                    if best_f1 < dev_f1:
                        not_better_count = 0
                        best_f1 = dev_f1
                        print('save best model ...')
                        output_model_file = os.path.join(
                            args.output_dir,
                            "pytorch_model_nolocal_{}.bin".format(global_step))
                        torch.save(model.state_dict(), output_model_file)
                    else:
                        not_better_count += 1
                if not_better_count > 3:  # 早停
                    exit(0)
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
    parser.add_argument("--bert_model", default=None, type=str, required=True,
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The output directory where the model predictions and checkpoints will be written.")

    ## Other parameters
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_train",
                        default=False,
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        default=False,
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--do_lower_case",
                        default=False,
                        action='store_true',
                        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        default=False,
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument('--fp16',
                        default=False,
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
                        type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                             "0 (default value): dynamic loss scaling.\n"
                             "Positive power of 2: static loss scaling value.\n")

    args = parser.parse_args()

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))

    args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    os.makedirs(args.output_dir, exist_ok=True)

    task_name = args.task_name.lower()
    
    vecs = []
    vecs.append([0]*200) # 扩充CLS的位置,其他所有索引向后+1.
    with open("config_data/kg_embed/entity2vec.vec", 'r') as fin:
    #with open("pretrain_data/config_data/entity2vec.vec", 'r') as fin:
        for line in fin:
            vec = line.strip().split('\t')
            #vec = [float(x) for x in vec if x != ""]
            vec = [float(x) for x in vec]
            vecs.append(vec)
    print("vecs_len=%s" % str(len(vecs)))
    print("vecs_dim=%s" % str(len(vecs[0])))
    ent_embed = torch.FloatTensor(vecs)
    ent_embed = torch.nn.Embedding.from_pretrained(ent_embed)
    #ent_embed = torch.nn.Embedding(5041175, 100)

    logger.info("Shape of entity embedding: "+str(ent_embed.weight.size()))

    vecs = []
    vecs.append([0] * 4096)  # 扩充CLS的位置,其他所有索引向后+1.
    with open("config_data/kg_embed/image2vec.vec", 'r') as fin:
    #with open("pretrain_data/image_vec/image2vec.vec", 'r') as fin:
        for line in fin:
            vec = line.strip().split('\t')
            vec = [float(x) for x in vec]
            vecs.append(vec)
    print("vecs_len=%s" % str(len(vecs)))
    print("vecs_dim=%s" % str(len(vecs[0])))
    img_embed = torch.FloatTensor(vecs)
    img_embed = torch.nn.Embedding.from_pretrained(img_embed)

    logger.info("Shape of image embedding: " + str(img_embed.weight.size()))
    del vecs

    train_data = None
    num_train_steps = None
    if args.do_train:
        # TODO
        import indexed_dataset
        from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler,BatchSampler
        import iterators
        #train_data = indexed_dataset.IndexedCachedDataset(args.data_dir)
        train_data = indexed_dataset.IndexedDataset(args.data_dir, fix_lua_indexing=True)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_sampler = BatchSampler(train_sampler, args.train_batch_size, True)

        def collate_fn(x):
            x = torch.LongTensor([xx for xx in x])

            entity_idx = x[:, 4 * args.max_seq_length:5 * args.max_seq_length]
            print("entity_idx=%s" % entity_idx)
            image_idx = x[:, 6 * args.max_seq_length:7 * args.max_seq_length]
            print("image_idx=%s" % image_idx)
            # Build candidate
            ent_uniq_idx = np.unique(entity_idx.numpy())
            print("ent_uniq_idx=%s" % str(ent_uniq_idx))
            img_uniq_idx = np.unique(image_idx.numpy())
            print("img_uniq_idx=%s" % str(img_uniq_idx))
            ent_candidate = ent_embed(torch.LongTensor(ent_uniq_idx + 1))
            ent_candidate = ent_candidate.repeat([n_gpu, 1])
            img_candidate = img_embed(torch.LongTensor(img_uniq_idx + 1))
            img_candidate = img_candidate.repeat([n_gpu, 1])
            # build entity labels
            ent_idx_dict = {}
            ent_idx_list = []
            for idx, idx_value in enumerate(ent_uniq_idx):
                ent_idx_dict[idx_value] = idx
                ent_idx_list.append(idx_value)
            ent_size = len(ent_uniq_idx)-1
            # build image labels
            img_idx_dict = {}
            img_idx_list = []
            for idx, idx_value in enumerate(img_uniq_idx):
                img_idx_dict[idx_value] = idx
                img_idx_list.append(idx_value)
            img_size = len(img_uniq_idx) - 1

            def ent_map(x):
                if x == -1:
                    return -1
                else:
                    rnd = random.uniform(0, 1)
                    if rnd < 0.05:
                        return ent_idx_list[random.randint(1, ent_size)]
                    elif rnd < 0.2:
                        return -1
                    else:
                        return x

            def img_map(x):
                if x == -1:
                    return -1
                else:
                    rnd = random.uniform(0, 1)
                    if rnd < 0.05:
                        return img_idx_list[random.randint(1, ent_size)]
                    elif rnd < 0.2:
                        return -1
                    else:
                        return x

            ent_labels = entity_idx.clone()
            ent_idx_dict[-1] = -1
            ent_labels = ent_labels.apply_(lambda x: ent_idx_dict[x])

            entity_idx.apply_(ent_map)
            ent_emb = ent_embed(entity_idx+1)
            ent_mask = entity_idx.clone()
            ent_mask.apply_(lambda x: 0 if x == -1 else 1)
            ent_mask[:,0] = 1

            img_labels = image_idx.clone()
            img_idx_dict[-1] = -1
            img_labels = img_labels.apply_(lambda x: img_idx_dict[x])

            image_idx.apply_(img_map)
            img_emb = img_embed(image_idx + 1)
            img_mask = image_idx.clone()
            img_mask.apply_(lambda x: 0 if x == -1 else 1)
            img_mask[:, 0] = 1

            input_ids = x[:,:args.max_seq_length]
            input_mask = x[:,args.max_seq_length:2*args.max_seq_length]
            segment_ids = x[:,2*args.max_seq_length:3*args.max_seq_length]
            masked_lm_labels = x[:,3*args.max_seq_length:4*args.max_seq_length]
            next_sentence_label = x[:,8*args.max_seq_length:]
            return input_ids, input_mask, segment_ids, masked_lm_labels, ent_emb, ent_mask, img_emb, img_mask, next_sentence_label, ent_candidate, ent_labels, img_candidate, img_labels

        train_iterator = iterators.EpochBatchIterator(train_data, collate_fn, train_sampler)
        num_train_steps = int(
            len(train_data) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)

    print ("len(train_data)=%s" % len(train_data))
    # Prepare model
    model, missing_keys = BertForPreTraining.from_pretrained(args.bert_model,
              cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank))
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    print ("param_optimizer:")
    #for param in model.named_parameters():
    #    print(param[0])

    #no_linear = ['layer.2.output.dense_ent', 'layer.2.intermediate.dense_1', 'bert.encoder.layer.2.intermediate.dense_1_ent', 'layer.2.output.LayerNorm_ent']
    #no_linear = [x.replace('2', '11') for x in no_linear]
    no_linear = ['layer.11.output.dense_entity', 'layer.11.output.LayerNorm_entity', 'layer.11.output.dense_image', 'layer.11.output.LayerNorm_entity']
    param_optimizer = [(n, p) for n, p in param_optimizer if not any(nl in n for nl in no_linear)]
    print ("param_optimizer--no_linear")
    #for param in param_optimizer:
    #    print (param[0])

    #param_optimizer = [(n, p) for n, p in param_optimizer if not any(nl in n for nl in missing_keys)]
    #no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight', 'LayerNorm_ent.bias', 'LayerNorm_ent.weight']
    #no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight', 'LayerNorm_ent.bias', 'LayerNorm_ent.weight']
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight', 'LayerNorm_token.bias', 'LayerNorm_token.weight', 'LayerNorm_entity.bias', 'LayerNorm_entity.weight', 'LayerNorm_image.bias', 'LayerNorm_image.weight']
    optimizer_grouped_parameters = [
        # weight decay to avoid overfitting 
        # source: https://blog.csdn.net/program_developer/article/details/80867468
        # source: https://blog.csdn.net/m0_37531129/article/details/101390592
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        # the decay of bias and normalization.weight has nothing to do with weight decay
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]
    # optimizer_grouped_parameters_display is only used to debug
#    optimizer_grouped_parameters_display = [
#        {'params': [(n,p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
#        {'params': [(n,p) for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
#        ]
#    print ("optimizer_grouped_parameters_display-0:")
#    for param in optimizer_grouped_parameters_display[0]['params']:
#        print (param[0])
#
#    print ("optimizer_grouped_parameters_display-1:")
#    for param in optimizer_grouped_parameters_display[1]['params']:
#        print (param[0])

    t_total = num_train_steps
    if args.local_rank != -1:
        t_total = t_total // torch.distributed.get_world_size()
    if args.fp16:
        try:
            #from apex.optimizers import FP16_Optimizer
            from apex.fp16_utils.fp16_optimizer import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

        #optimizer = FusedAdam(optimizer_grouped_parameters,
        #                      lr=args.learning_rate,
        #                      bias_correction=False,
        #                      max_grad_norm=1.0)
        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
        #logger.info(dir(optimizer))
        #op_path = os.path.join(args.bert_model, "pytorch_op.bin")
        #optimizer.load_state_dict(torch.load(op_path))

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=t_total)

    global_step = 0
    if args.do_train:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_data))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)

        model.train()
        import datetime
        fout = open(os.path.join(args.output_dir, "loss.{}".format(datetime.datetime.now())), 'w')
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(tqdm(train_iterator.next_epoch_itr(), desc="Iteration")):
                print ("step=%s" % str(step))
                print ("len(batch)=%s" % str(len(batch)))
                batch = tuple(t.to(device) for t in batch)

                input_ids, input_mask, segment_ids, masked_lm_labels, input_ent, ent_mask, input_img, img_mask, next_sentence_label, ent_candidate, ent_labels, img_candidate, img_labels = batch
                print ("\ninput_ids.size=%s" % str(input_ids.size()))
                print ("input_mask.size=%s" % str(input_mask.size()))
                print ("segment_ids.size=%s" % str(segment_ids.size()))
                print ("masked_lm_labels.size=%s" % str(masked_lm_labels.size()))
                print ("input_ent.size=%s" % str(input_ent.size()))
                print ("ent_mask.size=%s" % str(ent_mask.size()))
                print ("input_img.size=%s" % str(input_img.size()))
                print ("img_mask.size=%s" % str(img_mask.size()))
                print ("next_sentence_label.size=%s" % str(next_sentence_label.size()))
                print ("ent_candidate.size=%s" % str(ent_candidate.size()))
                print ("ent_labels.size=%s" % str(ent_labels.size()))
                print ("img_candidate.size=%s" % str(img_candidate.size()))
                print ("img_labels.size=%s" % str(img_labels.size()))

                if args.fp16:
                    loss, original_loss = model(input_ids, segment_ids, input_mask, masked_lm_labels,
                                                input_ent.half(), ent_mask, input_img.half(), img_mask,
                                                next_sentence_label, ent_candidate.half(), ent_labels,
                                                img_candidate.half(), img_labels)
                else:
                    loss, original_loss = model(input_ids, segment_ids, input_mask, masked_lm_labels,
                                                input_ent, ent_mask, input_img, img_mask,
                                                next_sentence_label, ent_candidate, ent_labels,
                                                img_candidate, img_labels)


                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
                    original_loss = original_loss.mean()
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                print("\nloss=%s\n" % str(loss))

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

                fout.write("{} {}\n".format(loss.item()*args.gradient_accumulation_steps, original_loss.item()))
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    # modify learning rate with special warm up BERT uses
                    # source: https://blog.csdn.net/m0_37531129/article/details/101390592
                    lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1
                    #if global_step % 1000 == 0:
                    #    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
                    #    output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(global_step))
                    #    torch.save(model_to_save.state_dict(), output_model_file)
        fout.close()

    # Save a trained model
    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
    output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
    torch.save(model_to_save.state_dict(), output_model_file)
Example #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help="The input data dir.",
    )
    parser.add_argument(
        "--model_name_or_path",
        default=None,
        type=str,
        required=True,
        help=
        "Path to pretrained model or model identifier from huggingface.co/models",
    )
    parser.add_argument(
        "--model_type",
        default=None,
        type=str,
        required=True,
        help="Type of model to train.",
    )
    parser.add_argument(
        "--model_save_name",
        default=None,
        type=str,
        required=True,
        help=
        "Path to pretrained model or model identifier from huggingface.co/models",
    )
    parser.add_argument(
        "--train_setting",
        default='relaxed',
        type=str,
        required=False,
        help=
        "Whether to train in strict setting or relaxed setting. Options: strict or relaxed",
    )
    parser.add_argument(
        "--do_lower_case",
        action="store_true",
        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--do_train",
                        action="store_true",
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        action="store_true",
                        help="Whether to run the model on the dev set.")
    parser.add_argument("--do_test",
                        action="store_true",
                        help="Whether to run the model on the test set.")
    parser.add_argument("--evaluate_during_training",
                        action="store_true",
                        help="Whether to evaluate during training.")
    parser.add_argument("--multi_task",
                        action="store_true",
                        help="Multi-task learning flag.")

    parser.add_argument("--train_batch_size",
                        default=20,
                        type=int,
                        help="Batch size per GPU/CPU for training.")
    parser.add_argument("--train_epochs",
                        default=5,
                        type=int,
                        help="Training epochs.")
    parser.add_argument("--GRAD_ACC",
                        default=1,
                        type=int,
                        help="Gradient accumulation steps.")
    parser.add_argument("--eval_batch_size",
                        default=20,
                        type=int,
                        help="Batch size per GPU/CPU for evaluation/testing.")
    parser.add_argument("--lr",
                        default=2e-5,
                        type=float,
                        help="Learning rate.")
    parser.add_argument("--auxiliary_task_wt",
                        default=0.3,
                        type=float,
                        help="Weight for the auxiliary task.")
    parser.add_argument("--weight_decay",
                        default=1e-4,
                        type=float,
                        help="Weight decay.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Warmup proportion.")
    parser.add_argument("--gpu",
                        default=0,
                        type=int,
                        help="which GPU is to be used for training.")

    args = parser.parse_args()

    data = pickle.load(open(args.data_dir, 'rb'))
    selected_sem_types = pickle.load(open('../data/selected_ents.pkl', 'rb'))
    print('Selected semantic types: ', selected_sem_types)

    if args.train_setting == 'strict':
        data = data['strict_split']
    else:
        data = data['split']

    entity2id = utils.prepare_entities_to_ix(selected_sem_types)
    logical2ix = utils.prepare_logical_forms_to_ix(data['train'])

    shuffle(data['train'])
    shuffle(data['dev'])
    shuffle(data['test'])
    print(entity2id)

    model_config = {
        'label_size': 2,
        'num_entities': len(selected_sem_types) + 1,
        'entity_dim': 100,
        'lr': args.lr,
        'weight_decay': args.weight_decay,
        'batch_size': args.train_batch_size,
        'data_path': args.data_dir,
        'model_name': args.model_save_name,
        'bert_model': args.model_name_or_path,
        'do_lower_case': True,
        'gradient_accumulation_steps': args.GRAD_ACC
    }

    if args.model_type == 'ernie':
        from knowledge_bert import modeling
        from knowledge_bert import BertTokenizer
        from knowledge_bert.optimization import BertAdam

        tokenizer = BertTokenizer.from_pretrained(
            model_config['bert_model'],
            do_lower_case=model_config['do_lower_case'])
        model, _ = modeling.BertForQuestionAnsweringEmrQA.from_pretrained(
            model_config['bert_model'],
            num_entities=model_config['num_entities'])
    elif args.model_type == 'bert':
        from pytorch_pretrained_bert import BertTokenizer, BertForQuestionAnswering
        from pytorch_pretrained_bert.optimization import BertAdam
        tokenizer = BertTokenizer.from_pretrained(
            model_config['bert_model'],
            do_lower_case=model_config['do_lower_case'])
        model = BertForQuestionAnswering.from_pretrained(
            model_config['bert_model'])

    num_train_optimization_steps = len(
        data['train']
    ) // model_config['gradient_accumulation_steps'] * args.train_epochs

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=model_config['lr'],
                         warmup=args.warmup_proportion,
                         t_total=num_train_optimization_steps)

    if args.do_train:
        model_trained = train(args,
                              model=model,
                              optimizer=optimizer,
                              tokenizer=tokenizer,
                              model_config=model_config,
                              data=data,
                              entity2id=entity2id,
                              logical2ix=logical2ix)

    # The start and end accuracy are just proxies, actual accuracy would be calculated from the pickle dump using the script of SQuAD evaluate: https://rajpurkar.github.io/SQuAD-explorer/
    ##### Evaluate the model if do_eval flag is on
    if args.do_eval:
        if args.model_type == 'ernie':
            if args.multi_task:
                device = torch.device("cuda:" + str(args.gpu))
                dev_vals = eval_plot.evaluate_bert_emrqa_ernie_multitask(
                    model_trained, data['dev'], args.eval_batch_size,
                    tokenizer, entity2id, logical2ix, device)
            else:
                dev_vals = eval_plot.evaluate_bert_emrqa_ernie(
                    model_trained, data['dev'], args.eval_batch_size,
                    tokenizer, entity2id, logical2ix)
        elif args.model_type == 'bert':
            dev_vals = eval_plot.evaluate_bert_emrqa(model_trained,
                                                     data['dev'],
                                                     args.eval_batch_size,
                                                     tokenizer)
        dict_ = {
            'start_accuracy': dev_vals[0],
            'end_accuracy': dev_vals[1],
            'actual_and_predicted_values': dev_vals[2]
        }
        file_name = '../results/' + model_config[
            'model_name'] + '_dev_results.pkl'
        pickle.dump(dict_, open(file_name, 'wb'))

    ##### Test the model
    if args.do_test:
        if args.model_type == 'ernie':
            if args.multi_task:
                device = torch.device("cuda:" + str(args.gpu))
                test_vals = eval_plot.evaluate_bert_emrqa_ernie_multitask(
                    model_trained, data['test'], args.eval_batch_size,
                    tokenizer, entity2id, logical2ix, device)
            else:
                test_vals = eval_plot.evaluate_bert_emrqa_ernie(
                    model_trained, data['test'], args.eval_batch_size,
                    tokenizer, entity2id, logical2ix)
        elif args.model_type == 'bert':
            test_vals = eval_plot.evaluate_bert_emrqa(model_trained,
                                                      data['dev'],
                                                      args.eval_batch_size,
                                                      tokenizer)
        dict_ = {
            'start_accuracy': test_vals[0],
            'end_accuracy': test_vals[1],
            'actual_and_predicted_values': test_vals[2]
        }
        file_name = '../results/' + model_config[
            'model_name'] + '_test_results.pkl'
        pickle.dump(dict_, open(file_name, 'wb'))
Example #11
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task."
    )
    parser.add_argument("--train_file", default=None, type=str, required=True)
    parser.add_argument("--ernie_model",
                        default=None,
                        type=str,
                        required=True,
                        help="Ernie pre-trained model")
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model predictions and checkpoints will be written."
    )
    parser.add_argument("--ckpt", default='None', type=str)
    ## Other parameters
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--do_train",
                        default=False,
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        default=False,
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--do_lower_case",
        default=False,
        action='store_true',
        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        default=False,
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )
    parser.add_argument(
        '--fp16',
        default=False,
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument(
        '--loss_scale',
        type=float,
        default=0,
        help=
        "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
        "0 (default value): dynamic loss scaling.\n"
        "Positive power of 2: static loss scaling value.\n")
    parser.add_argument('--mean_pool', type=float, default=1)
    parser.add_argument("--bert_model", type=str, default='bert')

    args = parser.parse_args()
    logger.info(args)
    print(args)

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info(
        "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    args.train_batch_size = int(args.train_batch_size /
                                args.gradient_accumulation_steps)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train and not args.do_eval:
        raise ValueError(
            "At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(args.output_dir) and os.listdir(
            args.output_dir) and args.do_train:
        raise ValueError(
            "Output directory ({}) already exists and is not empty.".format(
                args.output_dir))
    os.makedirs(args.output_dir, exist_ok=True)

    processor = TypingProcessor()

    tokenizer_label = BertTokenizer_label.from_pretrained(
        args.ernie_model, do_lower_case=args.do_lower_case)
    tokenizer = BertTokenizer.from_pretrained(args.ernie_model,
                                              do_lower_case=args.do_lower_case)
    if os.path.exists('***path_to_your_roberta***'):
        load_path = '***path_to_your_roberta***'
    else:
        load_path = '***path_to_your_roberta***'
    roberta_tokenizer = RobertaTokenizer.from_pretrained(load_path)
    bert_tokenizer_cased = BertTokenizer_cased.from_pretrained(
        '***path_to_your_bert_tokenizer_cased***')

    train_examples = None
    num_train_steps = None
    train_examples, label_list, d = processor.get_train_examples(
        args.data_dir, args.train_file)
    label_list = sorted(label_list)
    #class_weight = [min(d[x], 100) for x in label_list]
    #logger.info(class_weight)
    S = []
    for l in label_list:
        s = []
        for ll in label_list:
            if ll in l:
                s.append(1.)
            else:
                s.append(0.)
        S.append(s)
    num_train_steps = int(
        len(train_examples) / args.train_batch_size /
        args.gradient_accumulation_steps * args.num_train_epochs)

    # Prepare model
    if args.bert_model == 'bert' and args.do_lower_case:
        if os.path.exists('***path_to_your_bert_uncased***'):
            bert_model = BertModel.from_pretrained(
                '***path_to_your_bert_uncased***')
        else:
            bert_model = BertModel.from_pretrained(
                '***path_to_your_bert_uncased***')
        if args.ckpt != 'None':
            if os.path.exists('***path_to_your_bert_uncased***'):
                load_path = '***path_to_your_trained_checkpoint***' + args.ckpt
            else:
                load_path = '***path_to_your_trained_checkpoint***' + args.ckpt
            ckpt = torch.load(load_path)
            bert_model.load_state_dict(ckpt["bert-base"])
    elif args.bert_model == 'roberta':
        if os.path.exists('***path_to_your_roberta***'):
            bert_model = RobertaModel.from_pretrained(
                '***path_to_your_roberta***')
        else:
            bert_model = RobertaModel.from_pretrained(
                '***path_to_your_roberta***')
        if args.ckpt != 'None':
            if os.path.exists('***path_to_your_roberta***'):
                load_path = '***path_to_your_trained_checkpoint***' + args.ckpt
            else:
                load_path = '***path_to_your_trained_checkpoint***' + args.ckpt
            ckpt = torch.load(load_path)
            bert_model.load_state_dict(ckpt["bert-base"])
    else:
        bert_model = BertModel.from_pretrained(
            '***path_to_your_bert_model_cased***')
    model = BertForEntityTyping(bert_model, len(label_list))

    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_grad = [
        'bert.encoder.layer.11.output.dense_ent',
        'bert.encoder.layer.11.output.LayerNorm_ent'
    ]
    param_optimizer = [(n, p) for n, p in param_optimizer
                       if not any(nd in n for nd in no_grad)]
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    t_total = num_train_steps
    if args.local_rank != -1:
        t_total = t_total // torch.distributed.get_world_size()
    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=t_total)

    global_step = 0

    if args.do_train:
        if args.do_lower_case:
            if args.train_file == 'train.json' and os.path.exists(
                    'train_features_1.0'
            ) and 'FIGER' in args.data_dir and args.mean_pool == 1:
                train_features = torch.load('train_features_1.0')
            elif args.train_file == 'train.json' and os.path.exists(
                    'train_features_1.0_se'
            ) and 'FIGER' in args.data_dir and args.mean_pool == 0:
                train_features = torch.load('train_features_1.0_se')
            else:
                train_features = convert_examples_to_features(
                    train_examples, label_list, args.max_seq_length,
                    tokenizer_label, tokenizer, roberta_tokenizer,
                    bert_tokenizer_cased, args.mean_pool, args.bert_model,
                    args.do_lower_case)
        else:
            if args.train_file == 'train.json' and os.path.exists(
                    'train_features_1.0'
            ) and 'FIGER' in args.data_dir and args.mean_pool == 1:
                train_features = torch.load('train_features_cased')
            else:
                train_features = convert_examples_to_features(
                    train_examples, label_list, args.max_seq_length,
                    tokenizer_label, tokenizer, roberta_tokenizer,
                    bert_tokenizer_cased, args.mean_pool, args.bert_model,
                    args.do_lower_case)

        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        all_input_ids = torch.tensor([f.input_ids for f in train_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features],
                                       dtype=torch.long)
        all_span_mask = torch.tensor([f.span_mask for f in train_features],
                                     dtype=torch.float)
        all_labels = torch.tensor([f.labels for f in train_features],
                                  dtype=torch.float)
        train_data = TensorDataset(all_input_ids, all_input_mask,
                                   all_segment_ids, all_span_mask, all_labels)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)

        output_loss_file = os.path.join(args.output_dir, "loss")
        loss_fout = open(output_loss_file, 'w')
        model.train()
        for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(train_dataloader):
                batch = tuple(
                    t.to(device) if i != 3 else t for i, t in enumerate(batch))
                input_ids, input_mask, segment_ids, span_mask, labels = batch
                loss = model(input_ids, args.bert_model, segment_ids,
                             input_mask, span_mask, labels.half())
                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

                loss_fout.write("{}\n".format(
                    loss.item() * args.gradient_accumulation_steps))
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    # modify learning rate with special warm up BERT uses
                    lr_this_step = args.learning_rate * warmup_linear(
                        global_step / t_total, args.warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1
                    if global_step % 150 == 0 and global_step > 0:
                        model_to_save = model.module if hasattr(
                            model, 'module') else model
                        output_model_file = os.path.join(
                            args.output_dir,
                            "pytorch_model.bin_{}".format(global_step))
                        torch.save(model_to_save.state_dict(),
                                   output_model_file)
            model_to_save = model.module if hasattr(model, 'module') else model
            output_model_file = os.path.join(
                args.output_dir, "pytorch_model.bin_{}".format(epoch))
            torch.save(model_to_save.state_dict(), output_model_file)
            x = "pytorch_model.bin_{}".format(epoch)
            for mark in [True, False]:
                if mark:
                    eval_examples = processor.get_dev_examples(args.data_dir)
                else:
                    eval_examples = processor.get_test_examples(args.data_dir)
                eval_features = convert_examples_to_features(
                    eval_examples, label_list, args.max_seq_length,
                    tokenizer_label, tokenizer, roberta_tokenizer,
                    bert_tokenizer_cased, args.mean_pool, args.bert_model,
                    args.do_lower_case)
                logger.info("***** Running evaluation *****")
                logger.info("  Num examples = %d", len(eval_examples))
                logger.info("  Batch size = %d", args.eval_batch_size)

                all_input_ids = torch.tensor(
                    [f.input_ids for f in eval_features], dtype=torch.long)
                all_input_mask = torch.tensor(
                    [f.input_mask for f in eval_features], dtype=torch.long)
                all_segment_ids = torch.tensor(
                    [f.segment_ids for f in eval_features], dtype=torch.long)
                all_span_mask = torch.tensor(
                    [f.span_mask for f in eval_features], dtype=torch.float)
                all_labels = torch.tensor([f.labels for f in eval_features],
                                          dtype=torch.float)
                eval_data = TensorDataset(all_input_ids, all_input_mask,
                                          all_segment_ids, all_span_mask,
                                          all_labels)

                eval_sampler = SequentialSampler(eval_data)
                eval_dataloader = DataLoader(eval_data,
                                             sampler=eval_sampler,
                                             batch_size=args.eval_batch_size)

                model.eval()
                eval_loss, eval_accuracy = 0, 0
                nb_eval_steps, nb_eval_examples = 0, 0
                pred = []
                true = []
                for input_ids, input_mask, segment_ids, span_mask, labels in eval_dataloader:
                    input_ids = input_ids.to(device)
                    input_mask = input_mask.to(device)
                    segment_ids = segment_ids.to(device)
                    span_mask = span_mask.to(device)
                    labels = labels.to(device)

                    with torch.no_grad():
                        tmp_eval_loss = model(input_ids, args.bert_model,
                                              segment_ids, input_mask,
                                              span_mask, labels)
                        logits = model(input_ids, args.bert_model, segment_ids,
                                       input_mask, span_mask)

                    logits = logits.detach().cpu().numpy()
                    labels = labels.to('cpu').numpy()
                    tmp_eval_accuracy, tmp_pred, tmp_true = accuracy(
                        logits, labels)
                    pred.extend(tmp_pred)
                    true.extend(tmp_true)

                    eval_loss += tmp_eval_loss.mean().item()
                    eval_accuracy += tmp_eval_accuracy

                    nb_eval_examples += input_ids.size(0)
                    nb_eval_steps += 1

                eval_loss = eval_loss / nb_eval_steps
                eval_accuracy = eval_accuracy / nb_eval_examples

                def f1(p, r):
                    if r == 0.:
                        return 0.
                    return 2 * p * r / float(p + r)

                def loose_macro(true, pred):
                    num_entities = len(true)
                    p = 0.
                    r = 0.
                    for true_labels, predicted_labels in zip(true, pred):
                        if len(predicted_labels) > 0:
                            p += len(
                                set(predicted_labels).intersection(
                                    set(true_labels))) / float(
                                        len(predicted_labels))
                        if len(true_labels):
                            r += len(
                                set(predicted_labels).intersection(
                                    set(true_labels))) / float(
                                        len(true_labels))
                    precision = p / num_entities
                    recall = r / num_entities
                    return precision, recall, f1(precision, recall)

                def loose_micro(true, pred):
                    num_predicted_labels = 0.
                    num_true_labels = 0.
                    num_correct_labels = 0.
                    for true_labels, predicted_labels in zip(true, pred):
                        num_predicted_labels += len(predicted_labels)
                        num_true_labels += len(true_labels)
                        num_correct_labels += len(
                            set(predicted_labels).intersection(
                                set(true_labels)))
                    if num_predicted_labels > 0:
                        precision = num_correct_labels / num_predicted_labels
                    else:
                        precision = 0.
                    recall = num_correct_labels / num_true_labels
                    return precision, recall, f1(precision, recall)

                if False:
                    result = {
                        'eval_loss': eval_loss,
                        'eval_accuracy': eval_accuracy,
                        'macro': loose_macro(true, pred),
                        'micro': loose_micro(true, pred)
                    }
                else:
                    result = {
                        'eval_loss': eval_loss,
                        'eval_accuracy': eval_accuracy,
                        'macro': loose_macro(true, pred),
                        'micro': loose_micro(true, pred)
                    }

                if mark:
                    output_eval_file = os.path.join(
                        args.output_dir,
                        "eval_results_{}.txt".format(x.split("_")[-1]))
                else:
                    output_eval_file = os.path.join(
                        args.output_dir,
                        "test_results_{}.txt".format(x.split("_")[-1]))
                with open(output_eval_file, "w") as writer:
                    logger.info("***** Eval results *****")
                    for key in sorted(result.keys()):
                        logger.info("  %s = %s", key, str(result[key]))
                        writer.write("%s = %s\n" % (key, str(result[key])))

    exit(0)
def main():
    ## Required parameters
    path_to_ernie = ""
    data_dir = os.path.join(path_to_ernie, "pretrain_data/merge")
    bert_model = os.path.join(path_to_ernie, "ernie_base")
    task_name = "pretrain"
    output_dir = os.path.join(path_to_ernie, "pretrain_out")
    max_seq_length = 256
    do_train = True
    do_eval = False
    do_lower_case = False
    train_batch_size = 4
    eval_batch_size = 8
    learning_rate = 5e-5
    num_train_epochs = 3.0
    warmup_proportion = default = 0.1
    no_cuda = False
    local_rank = -1
    seed = 42
    gradient_accumulation_steps = 1
    fp16 = True
    loss_scale = 0.0

    if local_rank == -1 or no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(local_rank)
        device = torch.device("cuda", local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(local_rank != -1), fp16))

    if gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
            gradient_accumulation_steps))

    train_batch_size = int(train_batch_size / gradient_accumulation_steps)

    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(seed)

    if not do_train and not do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(output_dir) and os.listdir(output_dir):
        import shutil
        shutil.rmtree(output_dir)
        # raise ValueError("Output directory ({}) already exists and is not empty.".format(output_dir))
    os.makedirs(output_dir, exist_ok=True)

    task_name = task_name.lower()

    vecs = []
    vecs.append([0] * 100)  # CLS
    with open(os.path.join(path_to_ernie, "kg_embed/entity2vec.vec"), 'r') as fin:
        for line in fin:
            # vec = line.strip().split('\t')
            vec = line.strip().split(' ')
            vec = [float(x) for x in vec]
            vecs.append(vec)
    embed = torch.FloatTensor(vecs)
    embed = torch.nn.Embedding.from_pretrained(embed)
    # embed = torch.nn.Embedding(5041175, 100)

    logger.info("Shape of entity embedding: " + str(embed.weight.size()))
    del vecs

    train_data = None
    num_train_steps = None
    if do_train:
        # TODO
        import indexed_dataset
        from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, BatchSampler
        import iterators
        # train_data = indexed_dataset.IndexedCachedDataset(data_dir)
        train_data = indexed_dataset.IndexedDataset(data_dir, fix_lua_indexing=True)
        if local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_sampler = BatchSampler(train_sampler, train_batch_size, True)

        def collate_fn(x):
            x = torch.LongTensor([xx for xx in x])

            entity_idx = x[:, 4 * max_seq_length:5 * max_seq_length]
            # Build candidate
            uniq_idx = np.unique(entity_idx.numpy())
            ent_candidate = embed(torch.LongTensor(uniq_idx + 1))
            ent_candidate = ent_candidate.repeat([n_gpu, 1])
            # build entity labels
            d = {}
            dd = []
            for i, idx in enumerate(uniq_idx):
                d[idx] = i
                dd.append(idx)
            ent_size = len(uniq_idx) - 1

            def map(x):
                if x == -1:
                    return -1
                else:
                    rnd = random.uniform(0, 1)
                    if rnd < 0.05:
                        return dd[random.randint(1, ent_size)]
                    elif rnd < 0.2:
                        return -1
                    else:
                        return x

            ent_labels = entity_idx.clone()
            d[-1] = -1
            ent_labels = ent_labels.apply_(lambda x: d[x])

            entity_idx.apply_(map)
            ent_emb = embed(entity_idx + 1)
            mask = entity_idx.clone()
            mask.apply_(lambda x: 0 if x == -1 else 1)
            mask[:, 0] = 1

            return x[:, :max_seq_length], x[:, max_seq_length:2 * max_seq_length], x[:, 2 * max_seq_length:3 * max_seq_length], x[:,3 * max_seq_length:4 * max_seq_length], ent_emb, mask, x[:,6 * max_seq_length:], ent_candidate, ent_labels

        train_iterator = iterators.EpochBatchIterator(train_data, collate_fn, train_sampler)
        num_train_steps = int(
            len(train_data) / train_batch_size / gradient_accumulation_steps * num_train_epochs)

    # Prepare model
    model, missing_keys = BertForPreTraining.from_pretrained(bert_model,
                                                             cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(
                                                                 local_rank))
    # if fp16:
    #     model.half()
    model.to(device)
    if local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_linear = ['layer.2.output.dense_ent', 'layer.2.intermediate.dense_1',
                 'bert.encoder.layer.2.intermediate.dense_1_ent', 'layer.2.output.LayerNorm_ent']
    no_linear = [x.replace('2', '11') for x in no_linear]
    param_optimizer = [(n, p) for n, p in param_optimizer if not any(nl in n for nl in no_linear)]
    # param_optimizer = [(n, p) for n, p in param_optimizer if not any(nl in n for nl in missing_keys)]
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight', 'LayerNorm_ent.bias', 'LayerNorm_ent.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]
    t_total = num_train_steps
    if local_rank != -1:
        t_total = t_total // torch.distributed.get_world_size()
    if fp16:
        try:
            # from apex.optimizers import FP16_Optimizer
            # from apex.optimizers import FusedAdam
            # from apex.contrib.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
            import apex.amp as amp
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

        # optimizer = FusedAdam(optimizer_grouped_parameters,
        #                       lr=learning_rate,
        #                       bias_correction=False,
        #                       max_grad_norm=1.0)
        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=learning_rate,
                              bias_correction=False)
        model, optimizer = amp.initialize(model, optimizer, opt_level="O3")
        # if loss_scale == 0:
        #     optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        # else:
        #     optimizer = FP16_Optimizer(optimizer, static_loss_scale=loss_scale)
        # logger.info(dir(optimizer))
        # op_path = os.path.join(bert_model, "pytorch_op.bin")
        # optimizer.load_state_dict(torch.load(op_path))

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=learning_rate,
                             warmup=warmup_proportion,
                             t_total=t_total)

    global_step = 0
    if do_train:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_data))
        logger.info("  Batch size = %d", train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        model.train()
        import datetime
        fout = open(os.path.join(output_dir, "loss.{}".format(datetime.datetime.now())), 'w')
        for _ in trange(int(num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(tqdm(train_iterator.next_epoch_itr(), desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)

                input_ids, input_mask, segment_ids, masked_lm_labels, input_ent, ent_mask, next_sentence_label, ent_candidate, ent_labels = batch
                if fp16:
                    loss, original_loss = model(input_ids, segment_ids, input_mask, masked_lm_labels, input_ent.half(),
                                                ent_mask, next_sentence_label, ent_candidate.half(), ent_labels)
                else:
                    loss, original_loss = model(input_ids, segment_ids, input_mask, masked_lm_labels, input_ent,
                                                ent_mask, next_sentence_label, ent_candidate, ent_labels)

                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                    original_loss = original_loss.mean()
                if gradient_accumulation_steps > 1:
                    loss = loss / gradient_accumulation_steps

                if fp16:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                    # optimizer.backward(loss)
                else:
                    loss.backward()

                fout.write("{} {}\n".format(loss.item() * gradient_accumulation_steps, original_loss.item()))
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % gradient_accumulation_steps == 0:
                    # modify learning rate with special warm up BERT uses
                    lr_this_step = learning_rate * warmup_linear(global_step / t_total, warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1
                    # if global_step % 1000 == 0:
                    #    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
                    #    output_model_file = os.path.join(output_dir, "pytorch_model.bin_{}".format(global_step))
                    #    torch.save(model_to_save.state_dict(), output_model_file)
        fout.close()

    # Save a trained model
    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
    output_model_file = os.path.join(output_dir, "pytorch_model.bin")
    torch.save(model_to_save.state_dict(), output_model_file)