Esempio n. 1
0
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file,
                                     pytorch_dump_path):
    config_path = os.path.abspath(bert_config_file)
    tf_path = os.path.abspath(tf_checkpoint_path)
    print("Converting TensorFlow checkpoint from {} with config at {}".format(
        tf_path, config_path))
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
    names = []
    arrays = []
    for name, shape in init_vars:
        print("Loading TF weight {} with shape {}".format(name, shape))
        array = tf.train.load_variable(tf_path, name)
        names.append(name)
        arrays.append(array)

    # Initialise PyTorch model
    config = BertConfig.from_json_file(bert_config_file)
    print("Building PyTorch model from configuration: {}".format(str(config)))
    model = BertForPreTraining(config)

    for name, array in zip(names, arrays):
        name = name.split('/')
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
        # which are not required for using pretrained model
        if any(n in ["adam_v", "adam_m"] for n in name):
            print("Skipping {}".format("/".join(name)))
            continue
        pointer = model
        for m_name in name:
            if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
                l = re.split(r'_(\d+)', m_name)
            else:
                l = [m_name]
            if l[0] == 'kernel' or l[0] == 'gamma':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'output_bias' or l[0] == 'beta':
                pointer = getattr(pointer, 'bias')
            elif l[0] == 'output_weights':
                pointer = getattr(pointer, 'weight')
            else:
                pointer = getattr(pointer, l[0])
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        if m_name[-11:] == '_embeddings':
            pointer = getattr(pointer, 'weight')
        elif m_name == 'kernel':
            array = np.transpose(array)
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        print("Initialize PyTorch weight {}".format(name))
        pointer.data = torch.from_numpy(array)

    # Save pytorch-model
    print("Save PyTorch model to {}".format(pytorch_dump_path))
    torch.save(model.state_dict(), pytorch_dump_path)
def convert_tmp_to_pytorch(bert_config_file, pytorch_dump_path):
    import torch
    from modeling import BertConfig, BertForPreTraining
    import pickle

    with open("tmp_names", "rb") as fp:  # Unpickling
        # names = pickle.load(fp, encoding='iso-8859-1')
        names = pickle.load(fp)
    with open("tmp_arrays", "rb") as fp:  # Unpickling
        # arrays = pickle.load(fp, encoding='iso-8859-1')
        arrays = pickle.load(fp)

    # Initialise PyTorch model
    config = BertConfig.from_json_file(bert_config_file)
    print("Building PyTorch model from configuration: {}".format(str(config)))
    model = BertForPreTraining(config)

    for name, array in zip(names, arrays):
        name = name.split('/')
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
        # which are not required for using pretrained model
        if name[-1] in ["adam_v", "adam_m", 'global_step']:
            print("Skipping {}".format("/".join(name)))
            continue
        pointer = model
        for m_name in name:
            if fullmatch(r'[A-Za-z]+_\d+', m_name):
                # if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
                l = re.split(r'_(\d+)', m_name)
            else:
                l = [m_name]
            if l[0] == 'kernel':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'output_bias':
                pointer = getattr(pointer, 'bias')
            elif l[0] == 'output_weights':
                pointer = getattr(pointer, 'weight')
            else:
                pointer = getattr(pointer, l[0])
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        if m_name[-11:] == '_embeddings':
            pointer = getattr(pointer, 'weight')
        elif m_name == 'kernel':
            array = np.transpose(array)
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        print("Initialize PyTorch weight {}".format(name))
        pointer.data = torch.from_numpy(array)

    # Save pytorch-model
    print("Save PyTorch model to {}".format(pytorch_dump_path))
    torch.save(model.state_dict(), pytorch_dump_path)
Esempio n. 3
0
    def prepare_model_and_optimizer(self):
        # Prepare model
        self.config = BertConfig.from_json_file(self.args.config_file)

        # Padding for divisibility by 8
        if self.config.vocab_size % 8 != 0:
            self.config.vocab_size += 8 - (self.config.vocab_size % 8)
        self.model = BertForPreTraining(self.config)
        self.another_model = BertForPreTraining(self.config)

        self.model.to(self.device)
        self.another_model.to(self.device)
        param_optimizer = list(self.model.named_parameters())
        no_decay = ['bias', 'gamma', 'beta', 'LayerNorm']

        optimizer_grouped_parameters = []
        names = []

        for n, p in param_optimizer:
            if not any(nd in n for nd in no_decay):
                optimizer_grouped_parameters.append({
                    'params': [p],
                    'weight_decay': 0.01,
                    'name': n
                })
                names.append({'params': [n], 'weight_decay': 0.01})
            if any(nd in n for nd in no_decay):
                optimizer_grouped_parameters.append({
                    'params': [p],
                    'weight_decay': 0.00,
                    'name': n
                })
                names.append({'params': [n], 'weight_decay': 0.00})

        if self.args.phase2:
            max_steps = self.args.max_steps
            tmp = max_steps * 10
            r = self.args.phase1_end_step / tmp
            lr = self.args.learning_rate * (1 - r)
        else:
            max_steps = int(self.args.max_steps / 9 * 10)
            lr = self.args.learning_rate
        if self.args.optimizer == "lamb":
            self.optimizer = BertLAMB(optimizer_grouped_parameters,
                                      lr=lr,
                                      warmup=self.args.warmup_proportion
                                      if not self.args.phase2 else -1,
                                      t_total=max_steps)
        elif self.args.optimizer == "adam":
            self.optimizer = BertAdam(optimizer_grouped_parameters,
                                      lr=lr,
                                      warmup=self.args.warmup_proportion
                                      if not self.args.phase2 else -1,
                                      t_total=max_steps)
Esempio n. 4
0
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file,
                                     pytorch_dump_path):
    # Initialise PyTorch model
    config = BertConfig.from_json_file(bert_config_file)
    print("Building PyTorch model from configuration: {}".format(str(config)))
    model = BertForPreTraining(config)

    # Load weights from tf checkpoint
    load_tf_weights_in_bert(model, tf_checkpoint_path)

    # Save pytorch-model
    print("Save PyTorch model to {}".format(pytorch_dump_path))
    torch.save(model.state_dict(), pytorch_dump_path)
def prepare_model(args, device):

    # Prepare model
    config = BertConfig.from_json_file(args.bert_config_path)

    # Padding for divisibility by 8
    if config.vocab_size % 8 != 0:
        config.vocab_size += 8 - (config.vocab_size % 8)
        print('padded vocab size to: {}'.format(config.vocab_size))

    # Set some options that the config file is expected to have (but don't need to be set properly
    # at this point)
    config.pad = False
    config.unpad = False
    config.dense_seq_output = False
    config.fused_mha = False
    config.fused_gelu_bias = False
    config.fuse_qkv = False
    config.fuse_scale = False
    config.fuse_mask = False
    config.fuse_dropout = False
    config.apex_softmax = False
    config.enable_stream = False
    if config.fuse_mask == True: config.apex_softmax = True
    if config.pad == False: config.enable_stream = True
    if config.unpad == True: config.fused_mha = False

    #Load from TF checkpoint
    model = BertForPreTraining.from_pretrained(args.tf_checkpoint,
                                               from_tf=True,
                                               config=config)

    return model
Esempio n. 6
0
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file,
                                     pytorch_dump_path):

    # 加载模型参数
    config = BertConfig.from_json_file(bert_config_file)
    print("Building PyTorch model from configuration: {}".format(str(config)))

    # 加载模型
    model = BertForPreTraining(config)

    # 加载检查点参数到模型中,进行处理
    # 但是有一个问题,为什么加不加返回model都能返回???猜测其为内部已进行处理
    load_tf_weights_in_bert(model, tf_checkpoint_path)
    print("Save PyTorch model to {}".format(pytorch_dump_path))

    # 保存pytorch的检查点
    torch.save(model.state_dict(), pytorch_dump_path)
Esempio n. 7
0
def main():
    args = get_config()

    world_size = flow.env.get_world_size()
    if args.train_global_batch_size is None:
        args.train_global_batch_size = args.train_batch_size * world_size
    else:
        assert args.train_global_batch_size % args.train_batch_size == 0

    if args.val_global_batch_size is None:
        args.val_global_batch_size = args.val_batch_size * world_size
    else:
        assert args.val_global_batch_size % args.val_batch_size == 0

    flow.boxing.nccl.set_fusion_threshold_mbytes(args.nccl_fusion_threshold_mb)
    flow.boxing.nccl.set_fusion_max_ops_num(args.nccl_fusion_max_ops)

    if args.with_cuda:
        device = "cuda"
    else:
        device = "cpu"

    print("Device is: ", device)

    print("Creating Dataloader")
    train_data_loader = OfRecordDataLoader(
        ofrecord_dir=args.ofrecord_path,
        mode="train",
        dataset_size=args.train_dataset_size,
        batch_size=args.train_global_batch_size,
        data_part_num=args.train_data_part,
        seq_length=args.seq_length,
        max_predictions_per_seq=args.max_predictions_per_seq,
        consistent=args.use_consistent,
    )

    test_data_loader = OfRecordDataLoader(
        ofrecord_dir=args.ofrecord_path,
        mode="test",
        dataset_size=1024,
        batch_size=args.val_global_batch_size,
        data_part_num=4,
        seq_length=args.seq_length,
        max_predictions_per_seq=args.max_predictions_per_seq,
        consistent=args.use_consistent,
    )

    print("Building BERT Model")
    hidden_size = 64 * args.num_attention_heads
    intermediate_size = 4 * hidden_size
    bert_model = BertForPreTraining(
        args.vocab_size,
        args.seq_length,
        hidden_size,
        args.num_hidden_layers,
        args.num_attention_heads,
        intermediate_size,
        nn.GELU(),
        args.hidden_dropout_prob,
        args.attention_probs_dropout_prob,
        args.max_position_embeddings,
        args.type_vocab_size,
    )

    # Load the same initial parameters with lazy model.
    # from utils.compare_lazy_outputs import load_params_from_lazy
    # load_params_from_lazy(
    #     bert_model.state_dict(),
    #     "../../OneFlow-Benchmark/LanguageModeling/BERT/initial_model",
    # )

    assert id(bert_model.cls.predictions.decoder.weight) == id(
        bert_model.bert.embeddings.word_embeddings.weight
    )

    ns_criterion = nn.CrossEntropyLoss(reduction="mean")
    mlm_criterion = nn.CrossEntropyLoss(reduction="none")

    if args.use_consistent:
        placement = flow.env.all_device_placement("cuda")
        bert_model = bert_model.to_consistent(
            placement=placement, sbp=flow.sbp.broadcast
        )
    else:
        bert_model.to(device)
        ns_criterion.to(device)
        mlm_criterion.to(device)

    optimizer = build_optimizer(
        args.optim_name,
        bert_model,
        args.lr,
        args.weight_decay,
        weight_decay_excludes=["bias", "LayerNorm", "layer_norm"],
        clip_grad_max_norm=1,
        clip_grad_norm_type=2.0,
    )

    steps = args.epochs * len(train_data_loader)
    warmup_steps = int(steps * args.warmup_proportion)

    lr_scheduler = PolynomialLR(optimizer, steps=steps, end_learning_rate=0.0)

    lr_scheduler = flow.optim.lr_scheduler.WarmUpLR(
        lr_scheduler, warmup_factor=0, warmup_iters=warmup_steps, warmup_method="linear"
    )

    def get_masked_lm_loss(
        logit, masked_lm_labels, label_weights, max_predictions_per_seq,
    ):

        label_id = flow.reshape(masked_lm_labels, [-1])

        # The `positions` tensor might be zero-padded (if the sequence is too
        # short to have the maximum number of predictions). The `label_weights`
        # tensor has a value of 1.0 for every real prediction and 0.0 for the
        # padding predictions.
        pre_example_loss = mlm_criterion(logit, label_id)
        pre_example_loss = flow.reshape(pre_example_loss, [-1, max_predictions_per_seq])
        numerator = flow.sum(pre_example_loss * label_weights)
        denominator = flow.sum(label_weights) + 1e-5
        loss = numerator / denominator
        return loss

    class BertGraph(nn.Graph):
        def __init__(self):
            super().__init__()
            self.bert = bert_model
            self.ns_criterion = ns_criterion
            self.masked_lm_criterion = partial(
                get_masked_lm_loss, max_predictions_per_seq=args.max_predictions_per_seq
            )
            self.add_optimizer(optimizer, lr_sch=lr_scheduler)
            self._train_data_loader = train_data_loader
            if args.grad_acc_steps > 1:
                self.config.set_gradient_accumulation_steps(args.grad_acc_steps)
            if args.use_fp16:
                self.config.enable_amp(True)
                grad_scaler = flow.amp.GradScaler(
                    init_scale=2 ** 30,
                    growth_factor=2.0,
                    backoff_factor=0.5,
                    growth_interval=2000,
                )
                self.set_grad_scaler(grad_scaler)
            self.config.allow_fuse_add_to_output(True)
            self.config.allow_fuse_model_update_ops(True)

        def build(self):

            (
                input_ids,
                next_sentence_labels,
                input_mask,
                segment_ids,
                masked_lm_ids,
                masked_lm_positions,
                masked_lm_weights,
            ) = self._train_data_loader()
            input_ids = input_ids.to(device=device)
            input_mask = input_mask.to(device=device)
            segment_ids = segment_ids.to(device=device)
            next_sentence_labels = next_sentence_labels.to(device=device)
            masked_lm_ids = masked_lm_ids.to(device=device)
            masked_lm_positions = masked_lm_positions.to(device=device)
            masked_lm_weights = masked_lm_weights.to(device=device)

            # 1. forward the next_sentence_prediction and masked_lm model
            prediction_scores, seq_relationship_scores = self.bert(
                input_ids, segment_ids, input_mask, masked_lm_positions
            )

            # 2-1. loss of is_next classification result
            next_sentence_loss = self.ns_criterion(
                seq_relationship_scores.reshape(-1, 2), next_sentence_labels.reshape(-1)
            )

            masked_lm_loss = self.masked_lm_criterion(
                prediction_scores, masked_lm_ids, masked_lm_weights
            )

            total_loss = masked_lm_loss + next_sentence_loss

            total_loss.backward()
            return (
                seq_relationship_scores,
                next_sentence_labels,
                total_loss,
                masked_lm_loss,
                next_sentence_loss,
            )

    bert_graph = BertGraph()

    class BertEvalGraph(nn.Graph):
        def __init__(self):
            super().__init__()
            self.bert = bert_model
            self._test_data_loader = test_data_loader
            self.config.allow_fuse_add_to_output(True)

        def build(self):
            (
                input_ids,
                next_sent_labels,
                input_masks,
                segment_ids,
                masked_lm_ids,
                masked_lm_positions,
                masked_lm_weights,
            ) = self._test_data_loader()
            input_ids = input_ids.to(device=device)
            input_masks = input_masks.to(device=device)
            segment_ids = segment_ids.to(device=device)
            next_sent_labels = next_sent_labels.to(device=device)
            masked_lm_ids = masked_lm_ids.to(device=device)
            masked_lm_positions = masked_lm_positions.to(device)

            with flow.no_grad():
                # 1. forward the next_sentence_prediction and masked_lm model
                _, seq_relationship_scores = self.bert(
                    input_ids, input_masks, segment_ids
                )

            return seq_relationship_scores, next_sent_labels

    bert_eval_graph = BertEvalGraph()

    train_total_losses = []

    for epoch in range(args.epochs):
        metric = Metric(
            desc="bert pretrain",
            print_steps=args.loss_print_every_n_iters,
            batch_size=args.train_global_batch_size * args.grad_acc_steps,
            keys=["total_loss", "mlm_loss", "nsp_loss", "pred_acc"],
        )

        # Train
        bert_model.train()

        for step in range(len(train_data_loader)):
            bert_outputs = pretrain(bert_graph, args.metric_local)

            if flow.env.get_rank() == 0:
                metric.metric_cb(step, epoch=epoch)(bert_outputs)

            train_total_losses.append(bert_outputs["total_loss"])

    # Eval
    bert_model.eval()
    val_acc = validation(
        epoch,
        len(test_data_loader),
        bert_eval_graph,
        args.val_print_every_n_iters,
        args.metric_local,
    )

    save_model(bert_model, args.checkpoint_path, epoch, val_acc, args.use_consistent)
Esempio n. 8
0
def main():

    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument(
        "--input_dir",
        default=None,
        type=str,
        required=True,
        help="The input data dir. Should contain .hdf5 files  for the task.")
    parser.add_argument("--config_file",
                        default="bert_config.json",
                        type=str,
                        required=False,
                        help="The BERT model config")
    ckpt_group = parser.add_mutually_exclusive_group(required=True)
    ckpt_group.add_argument("--ckpt_dir",
                            default=None,
                            type=str,
                            help="The ckpt directory, e.g. /results")
    ckpt_group.add_argument("--ckpt_path",
                            default=None,
                            type=str,
                            help="Path to the specific checkpoint")

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--eval', dest='do_eval', action='store_true')
    group.add_argument('--prediction', dest='do_eval', action='store_false')
    ## Other parameters
    parser.add_argument(
        "--bert_model",
        default="bert-large-uncased",
        type=str,
        required=False,
        help="Bert pre-trained model selected in the list: bert-base-uncased, "
        "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese."
    )
    parser.add_argument(
        "--max_seq_length",
        default=512,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument(
        "--max_predictions_per_seq",
        default=80,
        type=int,
        help="The maximum total of masked tokens in input sequence")
    parser.add_argument("--ckpt_step",
                        default=-1,
                        type=int,
                        required=False,
                        help="The model checkpoint iteration, e.g. 1000")

    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
        help=
        "Total number of eval  steps to perform, otherwise use full dataset")
    parser.add_argument("--no_cuda",
                        default=False,
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--fp16',
        default=False,
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument("--log_path",
                        help="Out file for DLLogger",
                        default="/workspace/dllogger_inference.out",
                        type=str)

    args = parser.parse_args()

    if 'LOCAL_RANK' in os.environ:
        args.local_rank = int(os.environ['LOCAL_RANK'])

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")

    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')

    if is_main_process():
        dllogger.init(backends=[
            dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
                                       filename=args.log_path),
            dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE,
                                   step_format=format_step)
        ])
    else:
        dllogger.init(backends=[])

    n_gpu = torch.cuda.device_count()
    if n_gpu > 1:
        assert (args.local_rank != -1
                )  # only use torch.distributed for multi-gpu

    dllogger.log(
        step=
        "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16),
        data={})

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    # Prepare model
    config = BertConfig.from_json_file(args.config_file)
    # Padding for divisibility by 8
    if config.vocab_size % 8 != 0:
        config.vocab_size += 8 - (config.vocab_size % 8)
    model = BertForPreTraining(config)

    if args.ckpt_dir:
        if args.ckpt_step == -1:
            #retrieve latest model
            model_names = [
                f for f in os.listdir(args.ckpt_dir) if f.endswith(".pt")
            ]
            args.ckpt_step = max([
                int(x.split('.pt')[0].split('_')[1].strip())
                for x in model_names
            ])
            dllogger.log(step="load model saved at iteration",
                         data={"number": args.ckpt_step})
        model_file = os.path.join(args.ckpt_dir,
                                  "ckpt_" + str(args.ckpt_step) + ".pt")
    else:
        model_file = args.ckpt_path
    state_dict = torch.load(model_file, map_location="cpu")["model"]
    model.load_state_dict(state_dict, strict=False)

    if args.fp16:
        model.half(
        )  # all parameters and buffers are converted to half precision
    model.to(device)

    multi_gpu_training = args.local_rank != -1 and torch.distributed.is_initialized(
    )
    if multi_gpu_training:
        model = DDP(model)

    files = [
        os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)
        if os.path.isfile(os.path.join(args.input_dir, f)) and 'test' in f
    ]
    files.sort()

    dllogger.log(step="***** Running Inference *****", data={})
    dllogger.log(step="  Inference batch", data={"size": args.eval_batch_size})

    model.eval()

    nb_instances = 0
    max_steps = args.max_steps if args.max_steps > 0 else np.inf
    global_step = 0
    total_samples = 0

    begin_infer = time.time()
    with torch.no_grad():
        if args.do_eval:
            final_loss = 0.0  #
            for data_file in files:
                dllogger.log(step="Opening ", data={"file": data_file})
                dataset = pretraining_dataset(
                    input_file=data_file,
                    max_pred_length=args.max_predictions_per_seq)
                if not multi_gpu_training:
                    train_sampler = RandomSampler(dataset)
                    datasetloader = DataLoader(dataset,
                                               sampler=train_sampler,
                                               batch_size=args.eval_batch_size,
                                               num_workers=4,
                                               pin_memory=True)
                else:
                    train_sampler = DistributedSampler(dataset)
                    datasetloader = DataLoader(dataset,
                                               sampler=train_sampler,
                                               batch_size=args.eval_batch_size,
                                               num_workers=4,
                                               pin_memory=True)
                for step, batch in enumerate(
                        tqdm(datasetloader, desc="Iteration")):
                    if global_step > max_steps:
                        break
                    batch = [t.to(device) for t in batch]
                    input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch  #\
                    loss = model(input_ids=input_ids,
                                 token_type_ids=segment_ids,
                                 attention_mask=input_mask,
                                 masked_lm_labels=masked_lm_labels,
                                 next_sentence_label=next_sentence_labels)
                    final_loss += loss.item()

                    global_step += 1

                total_samples += len(datasetloader)
                torch.cuda.empty_cache()
                if global_step > max_steps:
                    break
            final_loss /= global_step
            if multi_gpu_training:
                final_loss = torch.tensor(final_loss, device=device)
                dist.all_reduce(final_loss)
                final_loss /= torch.distributed.get_world_size()
            if (not multi_gpu_training or
                (multi_gpu_training and torch.distributed.get_rank() == 0)):
                dllogger.log(step="Inference Loss",
                             data={"final_loss": final_loss.item()})

        else:  # inference
            # if multi_gpu_training:
            #     torch.distributed.barrier()
            # start_t0 = time.time()
            for data_file in files:
                dllogger.log(step="Opening ", data={"file": data_file})
                dataset = pretraining_dataset(
                    input_file=data_file,
                    max_pred_length=args.max_predictions_per_seq)
                if not multi_gpu_training:
                    train_sampler = RandomSampler(dataset)
                    datasetloader = DataLoader(dataset,
                                               sampler=train_sampler,
                                               batch_size=args.eval_batch_size,
                                               num_workers=4,
                                               pin_memory=True)
                else:
                    train_sampler = DistributedSampler(dataset)
                    datasetloader = DataLoader(dataset,
                                               sampler=train_sampler,
                                               batch_size=args.eval_batch_size,
                                               num_workers=4,
                                               pin_memory=True)

                for step, batch in enumerate(
                        tqdm(datasetloader, desc="Iteration")):
                    if global_step > max_steps:
                        break

                    batch = [t.to(device) for t in batch]
                    input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch  #\

                    lm_logits, nsp_logits = model(input_ids=input_ids,
                                                  token_type_ids=segment_ids,
                                                  attention_mask=input_mask,
                                                  masked_lm_labels=None,
                                                  next_sentence_label=None)

                    nb_instances += input_ids.size(0)
                    global_step += 1

                total_samples += len(datasetloader)
                torch.cuda.empty_cache()
                if global_step > max_steps:
                    break
            # if multi_gpu_training:
            #     torch.distributed.barrier()
            if (not multi_gpu_training or
                (multi_gpu_training and torch.distributed.get_rank() == 0)):
                dllogger.log(step="Done Inferring on samples", data={})

    end_infer = time.time()
    dllogger.log(step="Inference perf",
                 data={
                     "inference_sequences_per_second":
                     total_samples * args.eval_batch_size /
                     (end_infer - begin_infer)
                 })
Esempio n. 9
0
def prepare_model_and_optimizer(args, device):

    # Prepare model
    config = BertConfig.from_json_file(args.config_file)

    # Padding for divisibility by 8
    if config.vocab_size % 8 != 0:
        config.vocab_size += 8 - (config.vocab_size % 8)
    model = BertForPreTraining(config)

    checkpoint = None
    if not args.resume_from_checkpoint:
        global_step = 0
    else:
        if args.resume_step == -1 and not args.init_checkpoint:
            model_names = [
                f for f in os.listdir(args.output_dir) if f.endswith(".pt")
            ]
            args.resume_step = max([
                int(x.split('.pt')[0].split('_')[1].strip())
                for x in model_names
            ])

        global_step = args.resume_step if not args.init_checkpoint else 0

        if not args.init_checkpoint:
            checkpoint = torch.load(os.path.join(
                args.output_dir, "ckpt_{}.pt".format(global_step)),
                                    map_location="cpu")
        else:
            checkpoint = torch.load(args.init_checkpoint, map_location="cpu")

        model.load_state_dict(checkpoint['model'], strict=False)
        if args.phase2:
            global_step -= args.phase1_end_step
        if is_main_process():
            print("resume step from ", args.resume_step)

    model.to(device)
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'gamma', 'beta', 'LayerNorm']

    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    optimizer = FusedLAMB(optimizer_grouped_parameters, lr=args.learning_rate)
    lr_scheduler = PolyWarmUpScheduler(optimizer,
                                       warmup=args.warmup_proportion,
                                       total_steps=args.max_steps)
    if args.fp16:

        if args.loss_scale == 0:
            model, optimizer = amp.initialize(model,
                                              optimizer,
                                              opt_level="O2",
                                              loss_scale="dynamic")
        else:
            model, optimizer = amp.initialize(model,
                                              optimizer,
                                              opt_level="O2",
                                              loss_scale=args.loss_scale)
        amp._amp_state.loss_scalers[0]._loss_scale = 2**20

    if args.resume_from_checkpoint:
        if args.phase2 or args.init_checkpoint:
            keys = list(checkpoint['optimizer']['state'].keys())
            #Override hyperparameters from previous checkpoint
            for key in keys:
                checkpoint['optimizer']['state'][key]['step'] = global_step
            for iter, item in enumerate(
                    checkpoint['optimizer']['param_groups']):
                checkpoint['optimizer']['param_groups'][iter][
                    'step'] = global_step
                checkpoint['optimizer']['param_groups'][iter][
                    't_total'] = args.max_steps
                checkpoint['optimizer']['param_groups'][iter][
                    'warmup'] = args.warmup_proportion
                checkpoint['optimizer']['param_groups'][iter][
                    'lr'] = args.learning_rate
        optimizer.load_state_dict(checkpoint['optimizer'])  # , strict=False)

        # Restore AMP master parameters
        if args.fp16:
            optimizer._lazy_init_maybe_master_weights()
            optimizer._amp_stash.lazy_init_called = True
            optimizer.load_state_dict(checkpoint['optimizer'])
            for param, saved_param in zip(amp.master_params(optimizer),
                                          checkpoint['master params']):
                param.data.copy_(saved_param.data)

    if args.local_rank != -1:
        if not args.allreduce_post_accumulation:
            model = DDP(
                model,
                message_size=250000000,
                gradient_predivide_factor=torch.distributed.get_world_size())
        else:
            flat_dist_call([param.data for param in model.parameters()],
                           torch.distributed.broadcast, (0, ))
    elif args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

    return model, optimizer, lr_scheduler, checkpoint, global_step
Esempio n. 10
0
def prepare_model_and_optimizer(args, device):

    # Prepare model
    config = BertConfig.from_json_file(args.config_file)

    # Padding for divisibility by 8
    if config.vocab_size % 8 != 0:
        config.vocab_size += 8 - (config.vocab_size % 8)
    model = BertForPreTraining(config)

    checkpoint = None
    if not args.resume_from_checkpoint:
        global_step = 0
    else:
        if args.resume_step == -1:
            model_names = [
                f for f in os.listdir(args.output_dir) if f.endswith(".pt")
            ]
            args.resume_step = max([
                int(x.split(".pt")[0].split("_")[1].strip())
                for x in model_names
            ])
        global_step = args.resume_step

        checkpoint = torch.load(os.path.join(args.output_dir,
                                             "ckpt_{}.pt".format(global_step)),
                                map_location="cpu")
        model.load_state_dict(checkpoint["model"], strict=False)
        if args.phase2:
            global_step -= args.phase1_end_step
        if is_main_process():
            print("resume step from ", args.resume_step)

    model.to(device)
    param_optimizer = list(model.named_parameters())
    no_decay = ["bias", "gamma", "beta", "LayerNorm"]

    optimizer_grouped_parameters = []
    names = []

    count = 1
    for n, p in param_optimizer:
        count += 1
        if not any(nd in n for nd in no_decay):
            optimizer_grouped_parameters.append({
                "params": [p],
                "weight_decay": 0.01,
                "name": n
            })
            names.append({"params": [n], "weight_decay": 0.01})
        if any(nd in n for nd in no_decay):
            optimizer_grouped_parameters.append({
                "params": [p],
                "weight_decay": 0.00,
                "name": n
            })
            names.append({"params": [n], "weight_decay": 0.00})

    optimizer = BertLAMB(optimizer_grouped_parameters,
                         lr=args.learning_rate,
                         warmup=args.warmup_proportion,
                         t_total=args.max_steps)
    if args.fp16:

        if args.loss_scale == 0:
            # optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
            model, optimizer = amp.initialize(
                model,
                optimizer,
                opt_level="O2",
                loss_scale="dynamic",
                master_weights=False if args.accumulate_into_fp16 else True,
            )
        else:
            # optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
            model, optimizer = amp.initialize(
                model,
                optimizer,
                opt_level="O2",
                loss_scale=args.loss_scale,
                master_weights=False if args.accumulate_into_fp16 else True,
            )
        amp._amp_state.loss_scalers[0]._loss_scale = 2**20

    if args.resume_from_checkpoint:
        if args.phase2:
            keys = list(checkpoint["optimizer"]["state"].keys())
            # Override hyperparameters from Phase 1
            for key in keys:
                checkpoint["optimizer"]["state"][key]["step"] = global_step
            for iter, item in enumerate(
                    checkpoint["optimizer"]["param_groups"]):
                checkpoint["optimizer"]["param_groups"][iter][
                    "t_total"] = args.max_steps
                checkpoint["optimizer"]["param_groups"][iter][
                    "warmup"] = args.warmup_proportion
                checkpoint["optimizer"]["param_groups"][iter][
                    "lr"] = args.learning_rate
        optimizer.load_state_dict(checkpoint["optimizer"])  # , strict=False)

        # Restore AMP master parameters
        if args.fp16:
            optimizer._lazy_init_maybe_master_weights()
            optimizer._amp_stash.lazy_init_called = True
            optimizer.load_state_dict(checkpoint["optimizer"])
            for param, saved_param in zip(amp.master_params(optimizer),
                                          checkpoint["master params"]):
                param.data.copy_(saved_param.data)

    if args.local_rank != -1:
        if not args.allreduce_post_accumulation:
            model = DDP(
                model,
                message_size=250000000,
                gradient_predivide_factor=torch.distributed.get_world_size())
        else:
            flat_dist_call([param.data for param in model.parameters()],
                           torch.distributed.broadcast, (0, ))
    elif args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

    return model, optimizer, checkpoint, global_step
Esempio n. 11
0
def main():

    print("IN NEW MAIN XD\n")
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument(
        "--input_dir",
        default=None,
        type=str,
        required=True,
        help="The input data dir. Should contain .hdf5 files  for the task.")
    parser.add_argument("--config_file",
                        default="bert_config.json",
                        type=str,
                        required=False,
                        help="The BERT model config")
    parser.add_argument("--ckpt_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The ckpt directory, e.g. /results")

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--eval', dest='do_eval', action='store_true')
    group.add_argument('--prediction', dest='do_eval', action='store_false')
    ## Other parameters
    parser.add_argument(
        "--bert_model",
        default="bert-large-uncased",
        type=str,
        required=False,
        help="Bert pre-trained model selected in the list: bert-base-uncased, "
        "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese."
    )
    parser.add_argument(
        "--max_seq_length",
        default=512,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument(
        "--max_predictions_per_seq",
        default=80,
        type=int,
        help="The maximum total of masked tokens in input sequence")
    parser.add_argument("--ckpt_step",
                        default=-1,
                        type=int,
                        required=False,
                        help="The model checkpoint iteration, e.g. 1000")

    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
        help=
        "Total number of eval  steps to perform, otherwise use full dataset")
    parser.add_argument("--no_cuda",
                        default=False,
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--fp16',
        default=False,
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")

    args = parser.parse_args()

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")

    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
    n_gpu = torch.cuda.device_count()
    if n_gpu > 1:
        assert (args.local_rank != -1
                )  # only use torch.distributed for multi-gpu
    logger.info("device %s n_gpu %d distributed inference %r", device, n_gpu,
                bool(args.local_rank != -1))

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    # Prepare model
    config = BertConfig.from_json_file(args.config_file)
    model = BertForPreTraining(config)

    if args.ckpt_step == -1:
        #retrieve latest model
        model_names = [
            f for f in os.listdir(args.ckpt_dir) if f.endswith(".model")
        ]
        args.ckpt_step = max([
            int(x.split('.model')[0].split('_')[1].strip())
            for x in model_names
        ])
        print("load model saved at iteraton", args.ckpt_step)
    model_file = os.path.join(args.ckpt_dir,
                              "ckpt_" + str(args.ckpt_step) + ".model")
    state_dict = torch.load(model_file, map_location="cpu")
    model.load_state_dict(state_dict, strict=False)

    if args.fp16:
        model.half(
        )  # all parameters and buffers are converted to half precision
    model.to(device)

    multi_gpu_training = args.local_rank != -1 and torch.distributed.is_initialized(
    )
    if multi_gpu_training:
        model = DDP(model)

    files = [
        os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)
        if os.path.isfile(os.path.join(args.input_dir, f))
    ]
    files.sort()

    logger.info("***** Running evaluation *****")
    logger.info("  Batch size = %d", args.eval_batch_size)

    model.eval()
    print("Evaluation. . .")

    nb_instances = 0
    max_steps = args.max_steps if args.max_steps > 0 else np.inf
    global_step = 0

    with torch.no_grad():
        if args.do_eval:
            final_loss = 0.0  #
            for data_file in files:
                logger.info("file %s" % (data_file))
                dataset = pretraining_dataset(
                    input_file=data_file,
                    max_pred_length=args.max_predictions_per_seq)
                if not multi_gpu_training:
                    train_sampler = RandomSampler(dataset)
                    datasetloader = DataLoader(dataset,
                                               sampler=train_sampler,
                                               batch_size=args.eval_batch_size,
                                               num_workers=4,
                                               pin_memory=True)
                else:
                    train_sampler = DistributedSampler(dataset)
                    datasetloader = DataLoader(dataset,
                                               sampler=train_sampler,
                                               batch_size=args.eval_batch_size,
                                               num_workers=4,
                                               pin_memory=True)
                for step, batch in enumerate(
                        tqdm(datasetloader, desc="Iteration")):
                    if global_step > max_steps:
                        break

                    batch = [t.to(device) for t in batch]
                    input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch  #\
                    loss = model(input_ids=input_ids,
                                 token_type_ids=segment_ids,
                                 attention_mask=input_mask,
                                 masked_lm_labels=masked_lm_labels,
                                 next_sentence_label=next_sentence_labels)
                    final_loss += loss

                    global_step += 1

                torch.cuda.empty_cache()
                if global_step > max_steps:
                    break
            final_loss /= global_step
            if multi_gpu_training:
                final_loss /= torch.distributed.get_world_size()
                dist.all_reduce(final_loss)
            if (not multi_gpu_training or
                (multi_gpu_training and torch.distributed.get_rank() == 0)):
                logger.info("Finished: Final Loss = {}".format(final_loss))

        else:  # inference
            # if multi_gpu_training:
            #     torch.distributed.barrier()
            # start_t0 = time.time()
            for data_file in files:
                logger.info("file %s" % (data_file))
                dataset = pretraining_dataset(
                    input_file=data_file,
                    max_pred_length=args.max_predictions_per_seq)
                if not multi_gpu_training:
                    train_sampler = RandomSampler(dataset)
                    datasetloader = DataLoader(dataset,
                                               sampler=train_sampler,
                                               batch_size=args.eval_batch_size,
                                               num_workers=4,
                                               pin_memory=True)
                else:
                    train_sampler = DistributedSampler(dataset)
                    datasetloader = DataLoader(dataset,
                                               sampler=train_sampler,
                                               batch_size=args.eval_batch_size,
                                               num_workers=4,
                                               pin_memory=True)
                for step, batch in enumerate(
                        tqdm(datasetloader, desc="Iteration")):
                    if global_step > max_steps:
                        break

                    batch = [t.to(device) for t in batch]
                    input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch  #\

                    lm_logits, nsp_logits = model(input_ids=input_ids,
                                                  token_type_ids=segment_ids,
                                                  attention_mask=input_mask,
                                                  masked_lm_labels=None,
                                                  next_sentence_label=None)

                    nb_instances += input_ids.size(0)

                    global_step += 1
                torch.cuda.empty_cache()
                if global_step > max_steps:
                    break
            # if multi_gpu_training:
            #     torch.distributed.barrier()
            if (not multi_gpu_training or
                (multi_gpu_training and torch.distributed.get_rank() == 0)):
                logger.info("Finished")
Esempio n. 12
0
def inference(args):
    start_t = time.time()
    bert_module = BertForPreTraining(
        args.vocab_size,
        args.seq_length,
        args.hidden_size,
        args.num_hidden_layers,
        args.num_attention_heads,
        args.intermediate_size,
        nn.GELU(),
        args.hidden_dropout_prob,
        args.attention_probs_dropout_prob,
        args.max_position_embeddings,
        args.type_vocab_size,
        args.vocab_size,
    )
    end_t = time.time()
    print("Initialize model using time: {:.3f}s".format(end_t - start_t))

    start_t = time.time()
    if args.use_lazy_model:
        from utils.compare_lazy_outputs import load_params_from_lazy

        load_params_from_lazy(
            bert_module.state_dict(),
            args.model_path,
        )
    else:
        bert_module.load_state_dict(flow.load(args.model_path))
    end_t = time.time()
    print("Loading parameters using time: {:.3f}s".format(end_t - start_t))

    bert_module.eval()
    bert_module.to(args.device)

    class BertEvalGraph(nn.Graph):
        def __init__(self):
            super().__init__()
            self.bert = bert_module

        def build(self, input_ids, input_masks, segment_ids):
            input_ids = input_ids.to(device=args.device)
            input_masks = input_masks.to(device=args.device)
            segment_ids = segment_ids.to(device=args.device)

            with flow.no_grad():
                # 1. forward the next_sentence_prediction and masked_lm model
                _, seq_relationship_scores = self.bert(input_ids, input_masks,
                                                       segment_ids)

            return seq_relationship_scores

    bert_eval_graph = BertEvalGraph()

    start_t = time.time()
    inputs = [np.random.randint(0, 20, size=args.seq_length)]
    inputs = flow.Tensor(inputs,
                         dtype=flow.int64,
                         device=flow.device(args.device))
    mask = flow.cast(inputs > 0, dtype=flow.int64)

    segment_info = flow.zeros_like(inputs)
    prediction = bert_eval_graph(inputs, mask, segment_info)
    print(prediction.numpy())
    end_t = time.time()
    print("Inference using time: {:.3f}".format(end_t - start_t))
def prepare_model_and_optimizer(args, device):
    global_step = 0
    args.resume_step = 0
    checkpoint = None

    config = BertConfig.from_json_file(args.bert_config_path)
    config.fused_mha = args.fused_mha
    config.fused_gelu_bias = args.fused_gelu_bias
    config.dense_seq_output = args.dense_seq_output
    config.unpad = args.unpad
    config.pad = args.pad
    config.fuse_qkv = not args.disable_fuse_qkv
    config.fuse_scale = not args.disable_fuse_scale
    config.fuse_mask = not args.disable_fuse_mask
    config.fuse_dropout = args.enable_fuse_dropout
    config.apex_softmax = not args.disable_apex_softmax
    config.enable_stream = args.enable_stream
    if config.fuse_mask == True: config.apex_softmax = True
    if config.pad == False: config.enable_stream = True
    if config.unpad == True: config.fused_mha = False

    # Padding for divisibility by 8
    if config.vocab_size % 8 != 0:
        config.vocab_size += 8 - (config.vocab_size % 8)

    # Load from Pyt checkpoint - either given as init_checkpoint, or picked up from output_dir if found
    if args.init_checkpoint is not None or found_resume_checkpoint(args):
        # Prepare model

        model = BertForPreTraining(config)
        if args.init_checkpoint is None: # finding checkpoint in output_dir
            checkpoint_str = "phase2_ckpt_*.pt" if args.phase2 else "phase1_ckpt_*.pt"
            model_names = [f for f in glob.glob(os.path.join(args.output_dir, checkpoint_str))]
            global_step = max([int(x.split('.pt')[0].split('_')[-1].strip()) for x in model_names])
            args.resume_step = global_step #used for throughput computation

            resume_init_checkpoint = os.path.join(args.output_dir, checkpoint_str.replace("*", str(global_step)))
            print("Setting init checkpoint to %s - which is the latest in %s" %(resume_init_checkpoint, args.output_dir))
            checkpoint=torch.load(resume_init_checkpoint, map_location="cpu")
        else:
            checkpoint=torch.load(args.init_checkpoint, map_location="cpu")["model"]

        # Fused MHA requires a remapping of checkpoint parameters
        if config.fused_mha:
            checkpoint_remapped = remap_attn_parameters(checkpoint)
            model.load_state_dict(checkpoint_remapped, strict=False)
        else:
            model.load_state_dict(checkpoint, strict=True)
    else: #Load from TF Checkpoint
        model = BertForPreTraining.from_pretrained(args.init_tf_checkpoint, from_tf=True, config=config)


    model.to(device)
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'gamma', 'beta', 'LayerNorm']

    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay_rate},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]

    mlperf_logger.log_event(key=mlperf_logger.constants.OPT_BASE_LR,
                            value=args.learning_rate, sync=False)
    optimizer = FusedLAMB(optimizer_grouped_parameters,
                          lr=args.learning_rate,
                          betas=(args.opt_lamb_beta_1, args.opt_lamb_beta_2))
    mlperf_logger.log_event(key='opt_epsilon', value=optimizer.defaults['eps'],
                            sync=False)
    b1, b2 = optimizer.defaults['betas']
    mlperf_logger.log_event(key='opt_lamb_beta_1', value=b1, sync=False)
    mlperf_logger.log_event(key='opt_lamb_beta_2', value=b2, sync=False)
    mlperf_logger.log_event(key='opt_lamb_weight_decay_rate',
                            value=optimizer.defaults['weight_decay'],
                            sync=False)

    if args.warmup_steps == 0:
        warmup_steps = int(args.max_steps * args.warmup_proportion)
        warmup_start = 0
    else:
        warmup_steps = args.warmup_steps
        warmup_start = args.start_warmup_step
    lr_scheduler = LinearWarmupPolyDecayScheduler(optimizer, start_warmup_steps=warmup_start, warmup_steps=warmup_steps,
                                                  total_steps=args.max_steps, end_learning_rate=0.0, degree=1.0)
    
                           
    if args.fp16:

        if args.loss_scale == 0:
            model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale="dynamic")
        else:
            model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale)
        amp._amp_state.loss_scalers[0]._loss_scale = float(os.getenv("INIT_LOSS_SCALE", 2**20))


    if found_resume_checkpoint(args):
        optimizer.load_state_dict(checkpoint['optimizer']) #restores m,v states (only if resuming checkpoint, not for init_checkpoint and init_tf_checkpoint for now)

        # Restore AMP master parameters          
        if args.fp16:
            optimizer._lazy_init_maybe_master_weights()
            optimizer._amp_stash.lazy_init_called = True
            optimizer.load_state_dict(checkpoint['optimizer'])
            for param, saved_param in zip(amp.master_params(optimizer), checkpoint['master params']):
                param.data.copy_(saved_param.data)

    if args.local_rank != -1:
        if not args.allreduce_post_accumulation:
            model = DDP(model, message_size=250000000, gradient_predivide_factor=torch.distributed.get_world_size())
        else:
            flat_dist_call([param.data for param in model.parameters()], torch.distributed.broadcast, (0,) )

    return model, optimizer, lr_scheduler, checkpoint, global_step
Esempio n. 14
0
print(f'input_ids: {input_ids.shape}')
print(f'segment_ids: {segment_ids.shape}')
print(f'input_mask: {input_mask.shape}')
print(f'masked_lm_labels: {masked_lm_labels.shape}')
print(f'next_sentence_labels: {next_sentence_labels.shape}')

# Load model
config = BertConfig.from_json_file(args.config_file)

# We skip padding for consistency with the HuggingFace repository
# if config.vocab_size % 8 != 0:
#     config.vocab_size += 8 - (config.vocab_size % 8)

# noinspection PyUnresolvedReferences
model = BertForPreTraining(config).cuda()

loss = model(input_ids=input_ids,
             token_type_ids=segment_ids,
             attention_mask=input_mask,
             masked_lm_labels=masked_lm_labels,
             next_sentence_label=next_sentence_labels,
             checkpoint_activations=args.checkpoint_activations)

flops, params = clever_format(
    profile(
        model,
        inputs=batch,
        custom_ops={
            Embedding: None,  # TODO: custom operator: Embedding
            FusedLayerNorm: None,  # TODO: custom operator: FusedLayerNorm
Esempio n. 15
0
class Trainer:
    def is_main_process(self):
        return self.team_rank == 0

    def parse_arguments(self):
        parser = argparse.ArgumentParser()

        # Required parameters
        parser.add_argument("--input_file",
                            default=None,
                            type=str,
                            required=True,
                            help="The input data file. Should be zip file "
                            "containing .hdf5 files for the task.")

        parser.add_argument("--config_file",
                            default=None,
                            type=str,
                            required=True,
                            help="The BERT model config")

        parser.add_argument("--bert_model",
                            default="bert-large-uncased",
                            type=str,
                            help="Bert pre-trained model selected in the "
                            "list: bert-base-uncased, bert-large-uncased, "
                            "bert-base-cased, bert-base-multilingual, "
                            "bert-base-chinese.")

        parser.add_argument("--output_dir",
                            default=None,
                            type=str,
                            required=True,
                            help="The output directory where the model "
                            "checkpoints will be written.")

        # Other parameters
        parser.add_argument("--max_seq_length",
                            default=512,
                            type=int,
                            help="The maximum total input sequence length "
                            "after WordPiece tokenization. \n"
                            "Sequences longer than this will be truncated, "
                            "and sequences shorter \n"
                            "than this will be padded.")
        parser.add_argument("--max_predictions_per_seq",
                            default=80,
                            type=int,
                            help="The maximum total of masked tokens in input "
                            "sequence")
        parser.add_argument("--train_batch_size",
                            default=32,
                            type=int,
                            help="Total batch size for training.")
        parser.add_argument("--learning_rate",
                            default=5e-5,
                            type=float,
                            help="The initial learning rate for Adam.")
        parser.add_argument("--max_steps",
                            default=1000,
                            type=float,
                            help="Total number of training steps to perform.")
        parser.add_argument("--warmup_proportion",
                            default=0.01,
                            type=float,
                            help="Proportion of training to perform linear "
                            "learning rate warmup for. "
                            "E.g., 0.1 = 10%% of training.")
        parser.add_argument("--local_rank",
                            type=int,
                            default=-1,
                            help="local_rank for distributed training on gpus")
        parser.add_argument('--seed',
                            type=int,
                            default=42,
                            help="random seed for initialization")
        parser.add_argument('--log_freq',
                            type=float,
                            default=50.0,
                            help='frequency of logging loss.')
        parser.add_argument('--checkpoint_activations',
                            default=False,
                            action='store_true',
                            help="Whether to use gradient checkpointing")
        parser.add_argument("--resume_from_checkpoint",
                            default=False,
                            action='store_true',
                            help="Whether to resume training from checkpoint.")
        parser.add_argument('--resume_step',
                            type=int,
                            default=-1,
                            help="Step to resume training from.")
        parser.add_argument('--num_steps_per_checkpoint',
                            type=int,
                            default=100,
                            help="Number of update steps until a model "
                            "checkpoint is saved to disk.")
        parser.add_argument('--phase2',
                            default=False,
                            action='store_true',
                            help="Whether to train with seq len 512")
        parser.add_argument('--phase1_end_step',
                            type=int,
                            default=7038,
                            help="Number of training steps in Phase1 - "
                            "seq len 128")
        parser.add_argument('--online_distillation',
                            type=str,
                            default="none",
                            choices=["none", "original", "overlap", "logit"],
                            help="Settings for online distillation")
        parser.add_argument('--burnin_steps', type=int, default=0)
        parser.add_argument('--distillation_weight', type=float, default=1)
        parser.add_argument('--distillation_loss',
                            type=str,
                            default="kl_divergence",
                            choices=["cross_entropy", "kl_divergence"])
        parser.add_argument('--distillation_steps', type=int, default=50)
        parser.add_argument('--optimizer',
                            type=str,
                            default="lamb",
                            choices=["lamb", "adam"])
        self.args = parser.parse_args()

    def setup_training(self):
        assert (torch.cuda.is_available())

        torch.cuda.set_device(self.args.local_rank)
        self.device = torch.device("cuda", self.args.local_rank)
        # Initializes the distributed backend which will take care of
        # sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')

        self.rank = torch.distributed.get_rank()
        self.size = torch.distributed.get_world_size()
        if self.args.online_distillation == "none":
            self.team = 0
            self.team_masters = [0]
            self.team_master = 0
            self.local_group = torch.distributed.new_group(
                ranks=list(range(0, self.size)))
            self.team_rank = torch.distributed.get_rank()
            self.team_size = torch.distributed.get_world_size()
        else:
            assert self.size % 2 == 0, \
                'with distillation, world size must be a multiple of 2'
            self.team = self.rank // (self.size // 2)
            self.team_masters = [0, (self.size // 2)]
            self.team_master = self.team_masters[self.team]
            self.is_team_master = (self.rank % (self.size // 2) == 0)
            local_group0 = torch.distributed.new_group(
                ranks=list(range(0, self.size // 2)))
            local_group1 = torch.distributed.new_group(
                ranks=list(range(self.size // 2, self.size)))
            self.local_groups = [local_group0, local_group1]
            self.local_group = self.local_groups[self.team]

            self.team_rank = self.rank % (self.size // 2)
            self.team_size = self.size // 2

            comm_model_group_rank0 = \
                [0] + list(range(self.team_size, self.team_size * 2))
            comm_model_group_rank1 = \
                [self.team_size] + list(range(0, self.team_size))
            self.comm_model_group_ranks = [
                comm_model_group_rank0, comm_model_group_rank1
            ]

            if self.args.online_distillation == "logit":
                for i in range(0, self.size // 2):
                    ranks = [i, i + self.size // 2]
                    grp = torch.distributed.new_group(ranks=ranks)
                    if self.rank in ranks:
                        self.equalize_data_group = grp
                # use different seeds in different teams
                self.args.data_seed = 12345
                self.args.seed += self.team * 12345
            else:
                # use different seeds in different teams
                self.args.seed += self.team * 12345

        self.args.train_batch_size //= self.team_size

        if not self.args.resume_from_checkpoint:
            chio.makedirs(self.args.output_dir, exist_ok=True)

    def prepare_model_and_optimizer(self):
        # Prepare model
        self.config = BertConfig.from_json_file(self.args.config_file)

        # Padding for divisibility by 8
        if self.config.vocab_size % 8 != 0:
            self.config.vocab_size += 8 - (self.config.vocab_size % 8)
        self.model = BertForPreTraining(self.config)
        self.another_model = BertForPreTraining(self.config)

        self.model.to(self.device)
        self.another_model.to(self.device)
        param_optimizer = list(self.model.named_parameters())
        no_decay = ['bias', 'gamma', 'beta', 'LayerNorm']

        optimizer_grouped_parameters = []
        names = []

        for n, p in param_optimizer:
            if not any(nd in n for nd in no_decay):
                optimizer_grouped_parameters.append({
                    'params': [p],
                    'weight_decay': 0.01,
                    'name': n
                })
                names.append({'params': [n], 'weight_decay': 0.01})
            if any(nd in n for nd in no_decay):
                optimizer_grouped_parameters.append({
                    'params': [p],
                    'weight_decay': 0.00,
                    'name': n
                })
                names.append({'params': [n], 'weight_decay': 0.00})

        if self.args.phase2:
            max_steps = self.args.max_steps
            tmp = max_steps * 10
            r = self.args.phase1_end_step / tmp
            lr = self.args.learning_rate * (1 - r)
        else:
            max_steps = int(self.args.max_steps / 9 * 10)
            lr = self.args.learning_rate
        if self.args.optimizer == "lamb":
            self.optimizer = BertLAMB(optimizer_grouped_parameters,
                                      lr=lr,
                                      warmup=self.args.warmup_proportion
                                      if not self.args.phase2 else -1,
                                      t_total=max_steps)
        elif self.args.optimizer == "adam":
            self.optimizer = BertAdam(optimizer_grouped_parameters,
                                      lr=lr,
                                      warmup=self.args.warmup_proportion
                                      if not self.args.phase2 else -1,
                                      t_total=max_steps)

    def prepare_snapshot(self):
        self.snapshot = Snapshot(self.args, self.model, self.another_model,
                                 self.optimizer, self.team)
        flat_dist_call([param.data for param in self.model.parameters()],
                       torch.distributed.broadcast,
                       (self.team_master, self.local_group))

    def forward(self, model, batch, calc_loss=True):
        input_ids, segment_ids, input_mask, \
            masked_lm_labels, next_sentence_labels = batch
        if calc_loss:
            return model(
                input_ids=input_ids,
                token_type_ids=segment_ids,
                attention_mask=input_mask,
                masked_lm_labels=masked_lm_labels,
                next_sentence_label=next_sentence_labels,
                checkpoint_activations=self.args.checkpoint_activations)
        else:
            return model(
                input_ids=input_ids,
                token_type_ids=segment_ids,
                attention_mask=input_mask,
                masked_lm_labels=None,
                next_sentence_label=None,
                checkpoint_activations=self.args.checkpoint_activations)

    def backward(self, loss):
        loss.backward()

    def comm_model(self):
        for i in range(2):
            root = self.comm_model_group_ranks[i][0]
            teams = set(range(root, root + self.team_size))
            if self.rank in teams:
                flat_dist_call(
                    [param.data for param in self.model.parameters()],
                    torch.distributed.broadcast, (i * self.team_size, ))
            else:
                flat_dist_call(
                    [param.data for param in self.another_model.parameters()],
                    torch.distributed.broadcast, (i * self.team_size, ))

    def all_reduce(self, overflow_buf, accum=1):
        scaler = amp.scaler.LossScaler(1.0)

        # 1. allocate an uninitialized buffer for flattened gradient
        master_grads = [
            p.grad for p in amp.master_params(self.optimizer)
            if p.grad is not None
        ]
        flat_grad_size = sum(p.numel() for p in master_grads)
        allreduce_dtype = torch.float32
        flat_raw = torch.empty(flat_grad_size,
                               device='cuda',
                               dtype=allreduce_dtype)
        # 2. combine unflattening and predivision of unscaled 'raw' gradient
        allreduced_views = apex_C.unflatten(flat_raw, master_grads)
        overflow_buf.zero_()
        amp_C.multi_tensor_scale(
            65536, overflow_buf, [master_grads, allreduced_views],
            scaler.loss_scale() / (self.team_size * accum))
        # 3. sum gradient across ranks. Because of the predivision,
        #    this averages the gradient
        torch.distributed.all_reduce(flat_raw, group=self.local_group)
        # 4. combine unscaling and unflattening of allreduced gradient
        overflow_buf.zero_()
        amp_C.multi_tensor_scale(65536, overflow_buf,
                                 [allreduced_views, master_grads],
                                 1. / scaler.loss_scale())

    def take_optimizer_step(self, global_step):
        # 1. call optimizer step function
        self.optimizer.step()
        global_step += 1
        for param in self.model.parameters():
            param.grad = None

        return global_step

    def init_dataloader(self, epoch, pool, rng=None):
        rng = rng or random
        if not self.args.resume_from_checkpoint or epoch > 0 or \
                self.args.phase2:
            with chio.open_as_container(self.args.input_file) as input_file:
                files = [f for f in input_file.list() if "training" in f]
            files.sort()
            num_files = len(files)
            rng.shuffle(files)
            f_start_id = 0
        else:
            f_start_id = self.snapshot.f_id
            files = self.snapshot.files
            self.args.resume_from_checkpoint = False
            num_files = len(files)

        if torch.distributed.is_initialized() and \
                self.team_size > num_files:
            remainder = self.team_size % num_files
            data_file = files[(f_start_id * self.team_size + self.team_rank +
                               remainder * f_start_id) % num_files]
        else:
            data_file = files[(f_start_id * self.team_size + self.team_rank) %
                              len(files)]

        return pool.submit(create_pretraining_dataset, self.args.input_file,
                           data_file, self.args.max_predictions_per_seq,
                           self.args), f_start_id, files, data_file

    def update_dataloader(self, pool, f_id, files):
        if self.team_size > len(files):
            remainder = self.team_size % len(files)
            data_file = files[(f_id * self.team_size + self.team_rank +
                               remainder * f_id) % len(files)]
        else:
            data_file = files[(f_id * self.team_size + self.team_rank) %
                              len(files)]

        dataset_future = pool.submit(create_pretraining_dataset,
                                     self.args.input_file, data_file,
                                     self.args.max_predictions_per_seq,
                                     self.args)
        return dataset_future, data_file

    def loss(self, prediction_scores, seq_relationship_score, batch):
        _, _, _, masked_lm_labels, next_sentence_labels = batch
        loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-1)
        masked_lm_loss = loss_fct(
            prediction_scores.view(-1, self.config.vocab_size),
            masked_lm_labels.view(-1))
        next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2),
                                      next_sentence_labels.view(-1))
        return masked_lm_loss + next_sentence_loss

    def compute_distillation_loss(self, output, another_output, target=None):
        c = output.shape[-1]
        output = output.view(-1, c)
        another_output = another_output.view(-1, c)
        with torch.no_grad():
            if target is None:
                mask = torch.ones(len(output),
                                  1,
                                  device=output.device,
                                  dtype=output.dtype)
            else:
                mask = (target != -1).long().view(-1, 1)
        if self.args.distillation_loss == 'cross_entropy':
            other_distr = torch.softmax(another_output, dim=1)
            return -torch.sum(
                mask *
                (torch.log_softmax(output, dim=1) * other_distr)) / sum(mask)
        elif self.args.distillation_loss == 'kl_divergence':
            return torch.sum(
                mask *
                (torch.softmax(output, dim=1) *
                 (torch.log_softmax(output, dim=1) -
                  torch.log_softmax(another_output, dim=1)))) / sum(mask)
        else:
            raise ValueError('unknown distillation loss: {}'.format(
                self.args.distillation_loss))

    def train_simple(self):
        global_step = self.snapshot.global_step or 0
        if self.args.phase2:
            self.args.accum = self.args.train_batch_size // 8
            self.args.train_batch_size = 8
        else:
            self.args.accum = 1

        if self.is_main_process():
            print("SEED {}".format(self.args.seed))
            logger.info("***** Running training *****")
            # logger.info("  Num examples = %d", len(train_data))
            logger.info("  Batch size = %d", self.args.train_batch_size)
            logger.info("  Accum = %d", self.args.accum)
            print("  LR = ", self.args.learning_rate)
            print("Training. . .")

        self.model.train()
        average_loss = 0.0  # averaged loss every self.args.log_freq steps
        epoch = 0

        # Note: We loop infinitely over epochs, termination is handled via
        #       iteration count
        begin = None
        with ThreadPoolExecutor(1) as pool:
            while True:
                dataset_future, f_start_id, files, data_file = \
                    self.init_dataloader(epoch, pool)
                previous_file = data_file
                train_dataloader, _ = dataset_future.result(timeout=None)

                overflow_buf = torch.cuda.IntTensor([0])

                for f_id in range(f_start_id + 1, len(files)):
                    logger.info("file no %s file %s" % (f_id, previous_file))
                    dataset_future, data_file = \
                        self.update_dataloader(pool, f_id, files)
                    previous_file = data_file

                    it = 0
                    for batch in train_dataloader:
                        if begin is None:
                            begin = time.time()
                        it += 1
                        batch = [t.to(self.device) for t in batch]
                        loss = self.forward(self.model, batch)
                        self.backward(loss)
                        average_loss += loss.item()

                        if it % self.args.accum == 0:
                            self.all_reduce(overflow_buf, self.args.accum)
                            global_step = self.take_optimizer_step(global_step)
                            it = 0

                            if global_step % self.args.log_freq == 0:
                                divisor = self.args.log_freq * self.args.accum
                                if self.is_main_process():
                                    print(
                                        "Team: {} Step:{} Average Loss = {} ".
                                        format(self.team, global_step,
                                               average_loss / divisor))
                                average_loss = 0

                            if global_step >= self.args.max_steps or \
                                (global_step %
                                 self.args.num_steps_per_checkpoint) == 0:
                                if self.team_rank == 0:
                                    # Save a trained model
                                    logger.info("** ** Saving model ** **")
                                    self.snapshot.save(global_step, f_id,
                                                       files)

                            if global_step >= self.args.max_steps:
                                del train_dataloader
                                torch.distributed.barrier()
                                if torch.distributed.get_rank() == 0:
                                    print("Total time taken {}".format(
                                        time.time() - begin))
                                return self.args

                    del train_dataloader
                    # Make sure pool has finished and switch train_dataloader
                    # NOTE: Will block until complete
                    train_dataloader, data_file = dataset_future.result(
                        timeout=None)

                epoch += 1

    def train_online_distillation_original(self):
        global_step = self.snapshot.global_step or 0

        if self.is_main_process():
            print("SEED {}".format(self.args.seed))
            logger.info("***** Running training *****")
            # logger.info("  Num examples = %d", len(train_data))
            logger.info("  Batch size = %d", self.args.train_batch_size)
            print("  LR = ", self.args.learning_rate)
            print("  Online Distillation")
            print("Training. . .")

        self.model.train()
        average_loss = 0.0  # averaged loss every self.args.log_freq steps
        average_dloss_0 = 0.0  # averaged loss every self.args.log_freq steps
        average_dloss_1 = 0.0
        epoch = 0
        begin = None

        # Note: We loop infinitely over epochs, termination is handled via
        #       iteration count
        with ThreadPoolExecutor(1) as pool:
            while True:
                dataset_future, f_start_id, files, data_file = \
                    self.init_dataloader(epoch, pool)
                previous_file = data_file
                train_dataloader, _ = dataset_future.result(timeout=None)

                overflow_buf = torch.cuda.IntTensor([0])

                for f_id in range(f_start_id + 1, len(files)):
                    logger.info("file no %s file %s" % (f_id, previous_file))
                    dataset_future, data_file = \
                        self.update_dataloader(pool, f_id, files)
                    previous_file = data_file

                    for batch in train_dataloader:
                        if begin is None:
                            begin = time.time()
                        step = global_step
                        if self.args.phase2:
                            step += self.args.phase1_end_step
                        if step >= self.args.burnin_steps and \
                                (step % self.args.distillation_steps) == 0:
                            self.comm_model()

                        batch = [t.to(self.device) for t in batch]
                        _, _, _, masked_lm_labels, _ = batch
                        if step < self.args.burnin_steps:
                            loss = self.forward(self.model, batch)
                            dloss0 = torch.zeros(())
                            dloss1 = torch.zeros(())
                        else:
                            out0, out1 = self.forward(self.model,
                                                      batch,
                                                      calc_loss=False)
                            with torch.no_grad():
                                aout0, aout1 = self.forward(self.another_model,
                                                            batch,
                                                            calc_loss=False)
                            loss = self.loss(out0, out1, batch)
                            dloss0 = \
                                self.compute_distillation_loss(
                                    out0, aout0, masked_lm_labels.view(-1))
                            dloss1 = \
                                self.compute_distillation_loss(out1, aout1)
                            dloss = dloss0 + dloss1
                            loss = loss + \
                                self.args.distillation_weight * dloss
                        self.backward(loss)
                        self.all_reduce(overflow_buf)
                        global_step = self.take_optimizer_step(global_step)
                        average_loss += loss.item()
                        average_dloss_0 += dloss0.item()
                        average_dloss_1 += dloss1.item()

                        if global_step % self.args.log_freq == 0:
                            divisor = self.args.log_freq
                            if self.is_main_process():
                                print(
                                    "Team: {} Step:{} Average Loss = {} Average dLoss = {} {}"
                                    .format(self.team, global_step,
                                            average_loss / divisor,
                                            average_dloss_0 / divisor,
                                            average_dloss_1 / divisor))
                            average_loss = 0
                            average_dloss_0 = 0
                            average_dloss_1 = 0

                        if global_step >= self.args.max_steps or \
                            (global_step %
                             self.args.num_steps_per_checkpoint) == 0:
                            if self.team_rank == 0:
                                # Save a trained model
                                logger.info("** ** Saving model ** **")
                                self.snapshot.save(global_step, f_id, files)

                            if global_step >= self.args.max_steps:
                                del train_dataloader
                                torch.distributed.barrier()
                                if torch.distributed.get_rank() == 0:
                                    print("Total time taken {}".format(
                                        time.time() - begin))
                                return self.args

                    del train_dataloader
                    # Make sure pool has finished and switch train_dataloader
                    # NOTE: Will block until complete
                    train_dataloader, data_file = dataset_future.result(
                        timeout=None)

                epoch += 1

    def train_online_distillation_overlap(self):
        global_step = self.snapshot.global_step or 0

        main_stream = torch.cuda.Stream()
        another_model_fwd_stream = torch.cuda.Stream()
        all_reduce_stream = torch.cuda.Stream()
        distillation_stream = torch.cuda.Stream()

        fwd_event = torch.cuda.Event()
        bwd_event = torch.cuda.Event()
        another_model_fwd_event = torch.cuda.Event()
        all_reduce_event = torch.cuda.Event()
        distillation_event = torch.cuda.Event()

        if self.is_main_process():
            print("SEED {}".format(self.args.seed))
            logger.info("***** Running training *****")
            # logger.info("  Num examples = %d", len(train_data))
            logger.info("  Batch size = %d", self.args.train_batch_size)
            print("  LR = ", self.args.learning_rate)
            print("  Online Distillation")
            print("Training. . .")

        self.model.train()
        average_loss = 0.0  # averaged loss every self.args.log_freq steps
        average_dloss_0 = 0
        average_dloss_1 = 0
        epoch = 0
        begin = None

        # Note: We loop infinitely over epochs, termination is handled via
        #       iteration count
        batch = None
        another_output = None
        with ThreadPoolExecutor(1) as pool:
            while True:
                dataset_future, f_start_id, files, data_file = \
                    self.init_dataloader(epoch, pool)
                previous_file = data_file
                train_dataloader, _ = dataset_future.result(timeout=None)

                overflow_buf = torch.cuda.IntTensor([0])

                for f_id in range(f_start_id + 1, len(files)):
                    logger.info("file no %s file %s" % (f_id, previous_file))
                    dataset_future, data_file = \
                        self.update_dataloader(pool, f_id, files)
                    previous_file = data_file

                    for next_batch in train_dataloader:
                        next_batch = [t.to(self.device) for t in next_batch]
                        if batch is None:
                            batch = next_batch
                            continue
                        if begin is None:
                            begin = time.time()

                        step = global_step
                        if self.args.phase2:
                            step += self.args.phase1_end_step

                        _, _, _, masked_lm_labels, _ = batch
                        fwd_event.record()
                        distillation_event.record()
                        if step >= self.args.burnin_steps:
                            with torch.cuda.stream(distillation_stream):
                                distillation_event.wait()
                                if (step % self.args.distillation_steps) \
                                        == 0:
                                    self.comm_model()
                                distillation_event.record()

                        with torch.cuda.stream(main_stream):
                            fwd_event.wait()
                            if another_output is None:
                                loss = self.forward(self.model, batch)
                                dloss0 = torch.zeros(())
                                dloss1 = torch.zeros(())
                            else:
                                out0, out1 = self.forward(self.model,
                                                          batch,
                                                          calc_loss=False)
                                aout0, aout1 = another_output
                                loss = self.loss(out0, out1, batch)
                                dloss0 = \
                                    self.compute_distillation_loss(
                                        out0, aout0,
                                        masked_lm_labels.view(-1))
                                dloss1 = \
                                    self.compute_distillation_loss(out1,
                                                                   aout1)
                                dloss = dloss0 + dloss1

                                loss = loss + \
                                    self.args.distillation_weight * dloss
                            fwd_event.record()
                        fwd_event.wait()

                        bwd_event.record()
                        with torch.cuda.stream(main_stream):
                            bwd_event.wait()
                            self.backward(loss)
                            bwd_event.record()
                        bwd_event.wait()
                        distillation_event.wait()

                        all_reduce_event.record()
                        another_model_fwd_event.record()
                        with torch.cuda.stream(all_reduce_stream):
                            all_reduce_event.wait()
                            self.all_reduce(overflow_buf)
                            all_reduce_event.record()

                        if step >= self.args.burnin_steps:
                            with torch.cuda.stream(another_model_fwd_stream):
                                another_model_fwd_event.wait()
                                with torch.no_grad():
                                    another_output = self.forward(
                                        self.another_model,
                                        next_batch,
                                        calc_loss=False)
                                another_model_fwd_event.record()
                        all_reduce_event.wait()
                        another_model_fwd_event.wait()

                        global_step = self.take_optimizer_step(global_step)

                        average_loss += loss.item()
                        average_dloss_0 += dloss0.item()
                        average_dloss_1 += dloss1.item()
                        if global_step % self.args.log_freq == 0:
                            divisor = self.args.log_freq
                            if self.is_main_process():
                                print(
                                    "Team: {} Step:{} Average Loss = {} Average dLoss = {} {}"
                                    .format(self.team, global_step,
                                            average_loss / divisor,
                                            average_dloss_0 / divisor,
                                            average_dloss_1 / divisor))
                            average_loss = 0
                            average_dloss_0 = 0
                            average_dloss_1 = 0

                        if global_step >= self.args.max_steps or \
                            (global_step %
                             self.args.num_steps_per_checkpoint) == 0:
                            if self.team_rank == 0:
                                # Save a trained model
                                logger.info("** ** Saving model ** **")
                                self.snapshot.save(global_step, f_id, files)

                        if global_step >= self.args.max_steps:
                            del train_dataloader
                            torch.distributed.barrier()
                            if torch.distributed.get_rank() == 0:
                                print(
                                    "Total time taken {}".format(time.time() -
                                                                 begin))
                            return self.args
                        batch = next_batch

                    del train_dataloader
                    # Make sure pool has finished and switch train_dataloader
                    # NOTE: Will block until complete
                    train_dataloader, data_file = dataset_future.result(
                        timeout=None)

                epoch += 1

    def train_online_distillation_logit(self):
        global_step = self.snapshot.global_step or 0

        if self.is_main_process():
            print("SEED {}".format(self.args.seed))
            logger.info("***** Running training *****")
            # logger.info("  Num examples = %d", len(train_data))
            logger.info("  Batch size = %d", self.args.train_batch_size)
            print("  LR = ", self.args.learning_rate)
            print("  Online Distillation")
            print("Training. . .")

        self.model.train()
        average_loss = 0.0  # averaged loss every self.args.log_freq steps
        average_dloss_0 = 0.0
        average_dloss_1 = 0.0
        epoch = 0
        begin = None

        # Note: We loop infinitely over epochs, termination is handled via
        #       iteration count
        rng = random.Random(self.args.data_seed)
        cnt = 0
        with ThreadPoolExecutor(1) as pool:
            while True:
                cnt += 1

                step = global_step
                if self.args.phase2:
                    step += self.args.phase1_end_step
                if step < self.args.burnin_steps:
                    dataset_future, f_start_id, files, data_file = \
                        self.init_dataloader(epoch, pool)
                    use_same_data = False
                else:
                    torch.manual_seed(self.args.data_seed + cnt)
                    dataset_future, f_start_id, files, data_file = \
                        self.init_dataloader(epoch, pool, rng)
                    use_same_data = True
                previous_file = data_file
                train_dataloader, _ = dataset_future.result(timeout=None)

                overflow_buf = torch.cuda.IntTensor([0])

                for f_id in range(f_start_id + 1, len(files)):
                    logger.info("file no %s file %s" % (f_id, previous_file))
                    dataset_future, data_file = \
                        self.update_dataloader(pool, f_id, files)
                    previous_file = data_file

                    for batch in train_dataloader:
                        if begin is None:
                            begin = time.time()
                        step = global_step
                        if self.args.phase2:
                            step += self.args.phase1_end_step
                        if step == self.args.burnin_steps and \
                                not use_same_data:
                            break

                        batch = [t.to(self.device) for t in batch]
                        _, _, _, masked_lm_labels, _ = batch

                        aout0 = None
                        aout1 = None
                        if step < self.args.burnin_steps:
                            loss = self.forward(self.model, batch)
                            dloss0 = torch.zeros(())
                            dloss1 = torch.zeros(())
                        else:
                            out0, out1 = self.forward(self.model,
                                                      batch,
                                                      calc_loss=False)
                            mask = masked_lm_labels.view(-1)

                            c = out0.shape[-1]
                            # Send logit that are not maksed
                            dout0 = out0.view(-1, c)
                            dout0 = dout0[mask != -1]
                            with torch.no_grad():
                                aout0 = dout0.detach().clone()
                                aout1 = out1.detach().clone()
                                flat_dist_call([aout0, aout1],
                                               torch.distributed.all_reduce,
                                               (torch.distributed.ReduceOp.SUM,
                                                self.equalize_data_group))
                                aout0 = aout0 * self.size - dout0
                                aout1 = aout1 * self.size - out1
                            loss = self.loss(out0, out1, batch)
                            dloss0 = \
                                self.compute_distillation_loss(dout0, aout0)
                            dloss1 = \
                                self.compute_distillation_loss(out1, aout1)
                            dloss = dloss0 + dloss1
                            loss = loss + \
                                self.args.distillation_weight * dloss
                        self.backward(loss)

                        self.all_reduce(overflow_buf)
                        global_step = self.take_optimizer_step(global_step)

                        average_loss += loss.item()
                        average_dloss_0 += dloss0.item()
                        average_dloss_1 += dloss1.item()
                        if global_step % self.args.log_freq == 0:
                            divisor = self.args.log_freq
                            if self.is_main_process():
                                print(
                                    "Team: {} Step:{} Average Loss = {} Average dLoss = {} {}"
                                    .format(self.team, global_step,
                                            average_loss / divisor,
                                            average_dloss_0 / divisor,
                                            average_dloss_1 / divisor))
                            average_loss = 0
                            average_dloss_0 = 0
                            average_dloss_1 = 0

                        if global_step >= self.args.max_steps or \
                            (global_step %
                             self.args.num_steps_per_checkpoint) == 0:
                            if self.team_rank == 0:
                                # Save a trained model
                                logger.info("** ** Saving model ** **")
                                self.snapshot.save(global_step, f_id, files)

                        if global_step >= self.args.max_steps:
                            del train_dataloader
                            torch.distributed.barrier()
                            if torch.distributed.get_rank() == 0:
                                print(
                                    "Total time taken {}".format(time.time() -
                                                                 begin))
                            return self.args

                    del train_dataloader
                    # Make sure pool has finished and switch train_dataloader
                    # NOTE: Will block until complete
                    train_dataloader, data_file = dataset_future.result(
                        timeout=None)

                    if step == self.args.burnin_steps and not use_same_data:
                        break

                epoch += 1
Esempio n. 16
0
def main():

    args = get_config()

    if args.with_cuda:
        device = flow.device("cuda")
    else:
        device = flow.device("cpu")

    print("Creating Dataloader")
    train_data_loader = OfRecordDataLoader(
        ofrecord_dir=args.ofrecord_path,
        mode="train",
        dataset_size=args.train_dataset_size,
        batch_size=args.train_batch_size,
        data_part_num=args.train_data_part,
        seq_length=args.seq_length,
        max_predictions_per_seq=args.max_predictions_per_seq,
        consistent=False,
    )

    test_data_loader = OfRecordDataLoader(
        ofrecord_dir=args.ofrecord_path,
        mode="test",
        dataset_size=1024,
        batch_size=args.val_batch_size,
        data_part_num=4,
        seq_length=args.seq_length,
        max_predictions_per_seq=args.max_predictions_per_seq,
        consistent=False,
    )

    print("Building BERT Model")
    hidden_size = 64 * args.num_attention_heads
    intermediate_size = 4 * hidden_size
    bert_model = BertForPreTraining(
        args.vocab_size,
        args.seq_length,
        hidden_size,
        args.num_hidden_layers,
        args.num_attention_heads,
        intermediate_size,
        nn.GELU(),
        args.hidden_dropout_prob,
        args.attention_probs_dropout_prob,
        args.max_position_embeddings,
        args.type_vocab_size,
    )

    # Load the same initial parameters with lazy model.
    # from utils.compare_lazy_outputs import load_params_from_lazy
    # load_params_from_lazy(
    #     bert_model.state_dict(),
    #     "../../OneFlow-Benchmark/LanguageModeling/BERT/initial_model",
    # )

    bert_model = bert_model.to(device)
    if args.use_ddp:
        bert_model = ddp(bert_model)

    optimizer = build_optimizer(
        args.optim_name,
        bert_model,
        args.lr,
        args.weight_decay,
        weight_decay_excludes=["bias", "LayerNorm", "layer_norm"],
        clip_grad_max_norm=1,
        clip_grad_norm_type=2.0,
    )

    steps = args.epochs * len(train_data_loader)
    warmup_steps = int(steps * args.warmup_proportion)

    lr_scheduler = PolynomialLR(optimizer, steps=steps, end_learning_rate=0.0)

    lr_scheduler = flow.optim.lr_scheduler.WarmUpLR(lr_scheduler,
                                                    warmup_factor=0,
                                                    warmup_iters=warmup_steps,
                                                    warmup_method="linear")

    ns_criterion = nn.CrossEntropyLoss(reduction="mean")
    mlm_criterion = nn.CrossEntropyLoss(reduction="none")

    def get_masked_lm_loss(
        logit_blob,
        masked_lm_positions,
        masked_lm_labels,
        label_weights,
        max_prediction_per_seq,
    ):
        # gather valid position indices
        logit_blob = flow.gather(
            logit_blob,
            index=masked_lm_positions.unsqueeze(2).repeat(
                1, 1, args.vocab_size),
            dim=1,
        )

        logit_blob = flow.reshape(logit_blob, [-1, args.vocab_size])
        label_id_blob = flow.reshape(masked_lm_labels, [-1])

        # The `positions` tensor might be zero-padded (if the sequence is too
        # short to have the maximum number of predictions). The `label_weights`
        # tensor has a value of 1.0 for every real prediction and 0.0 for the
        # padding predictions.
        pre_example_loss = mlm_criterion(logit_blob, label_id_blob)
        pre_example_loss = flow.reshape(pre_example_loss,
                                        [-1, max_prediction_per_seq])
        numerator = flow.sum(pre_example_loss * label_weights)
        denominator = flow.sum(label_weights) + 1e-5
        loss = numerator / denominator
        return loss

    train_total_losses = []
    for epoch in range(args.epochs):
        metric = Metric(
            desc="bert pretrain",
            print_steps=args.loss_print_every_n_iters,
            batch_size=args.train_batch_size,
            keys=["total_loss", "mlm_loss", "nsp_loss", "pred_acc"],
        )

        # Train
        bert_model.train()

        for step in range(len(train_data_loader)):
            bert_outputs = pretrain(
                train_data_loader,
                bert_model,
                ns_criterion,
                partial(
                    get_masked_lm_loss,
                    max_prediction_per_seq=args.max_predictions_per_seq,
                ),
                optimizer,
                lr_scheduler,
            )

            if flow.env.get_rank() == 0:
                metric.metric_cb(step, epoch=epoch)(bert_outputs)

            train_total_losses.append(bert_outputs["total_loss"])

        # Eval
        bert_model.eval()
        val_acc = validation(epoch, test_data_loader, bert_model,
                             args.val_print_every_n_iters)

        save_model(bert_model, args.checkpoint_path, epoch, val_acc, False)
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task."
    )
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory where the model checkpoints will be written."
    )

    ## Other parameters
    parser.add_argument(
        "--bert_model",
        default='bert-base-multilingual-cased',
        type=str,
        help="Bert pre-trained model selected in the list: bert-base-uncased, "
        "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
        "bert-base-multilingual-cased, bert-base-chinese.")
    parser.add_argument(
        "--max_seq_length",
        default=384,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--do_train",
                        action='store_true',
                        help="Whether to run training.")
    # parser.add_argument("--do_eval",
    #                     action='store_true',
    #                     help="Whether to run eval on the dev set.")
    parser.add_argument("--train_batch_size",
                        default=2,
                        type=int,
                        help="Total batch size for training.")
    #     parser.add_argument("--eval_batch_size",
    #                         default=2,
    #                         type=int,
    #                         help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=3e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on GPUs")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumualte before performing a backward/update pass."
    )
    parser.add_argument(
        '--fp16',
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument(
        '--loss_scale',
        type=float,
        default=0,
        help=
        "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
        "0 (default value): dynamic loss scaling.\n"
        "Positive power of 2: static loss scaling value.\n")
    parser.add_argument('--visdom',
                        action='store_true',
                        help='Use visdom for loss visualization')
    parser.add_argument('--check_saved_model',
                        action='store_true',
                        help='Use visdom for loss visualization')
    parser.add_argument('--last_final_epoch',
                        type=int,
                        default=-1,
                        help="저번에 이미 최종 학습을 했고, 이에 이어서 트레이닝을 원할때 사용,\n"
                        "기존에 train_epoch를 3으로 세팅했다면, 2가 아닌 3을 입력하세요.")

    args = parser.parse_args()
    print(args)

    if args.visdom:
        import visdom
        viz = visdom.Visdom()
        # visdom을 통해서 loss를 시각화

    os.makedirs(args.output_dir, exist_ok=True)

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info(
        "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train:
        raise ValueError(
            "Training is currently the only implemented execution option. Please set `do_train`."
        )

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError(
            "Output directory ({}) already exists and is not empty.".format(
                args.output_dir))
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    tokenizer = BertTokenizer.from_pretrained(args.bert_model,
                                              do_lower_case=False)

    processor = DataProcessor()
    label_list = processor.get_labels()

    num_train_optimization_steps = None
    if args.do_train:
        print("Loading Train Dataset", args.data_dir)

        train_examples = processor.get_train_examples(args.data_dir)
        train_dataset = LazyDataset(train_examples, args.max_seq_length,
                                    tokenizer)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_dataset)
        else:
            train_sampler = DistributedSampler(train_dataset)

        num_train_optimization_steps = int(
            len(train_dataset) / args.train_batch_size /
            args.gradient_accumulation_steps) * args.num_train_epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    # Prepare model
    loaded_epoch = -1
    saved_model_path = -1

    if args.last_final_epoch != -1:
        last_model = os.path.join(args.output_dir, WEIGHTS_NAME)
        if os.path.exists(last_model):
            saved_model_path = last_model
            loaded_epoch = args.last_final_epoch - 1

    elif args.check_saved_model:
        for epoch in range(int(args.num_train_epochs)):
            tmp = os.path.join(args.output_dir,
                               (f"weight_on_ep{epoch}_" + WEIGHTS_NAME))
            if os.path.exists(tmp):
                saved_model_path = tmp
                loaded_epoch = epoch

    if saved_model_path != -1:
        logger.info(f"Loading on saved model {saved_model_path}")
        config_file = os.path.join(args.output_dir, CONFIG_NAME)
        config = BertConfig(config_file)
        logger.info("Model config {}".format(config))
        model = BertForPreTraining(config)
        model.load_state_dict(torch.load(saved_model_path))
    else:
        loaded_epoch = -1
        model = BertForPreTraining.from_pretrained(args.bert_model)

    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )
        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)

    if args.visdom:
        # 일단 visdom 기본 figure를 정의
        vis_title = f'Baseline on {len(train_dataset)} dataset'
        vis_legend = ['LM Loss', 'Click Loss', 'Total Loss']
        iter_plot = create_vis_plot(viz, 'Iteration', 'Loss', vis_title,
                                    vis_legend)
        epoch_plot = create_vis_plot(viz, 'Epoch', 'Loss', vis_title,
                                     vis_legend)

    # if args.do_eval:
    #     eval_examples = processor.get_dev_examples(args.data_dir)
    #
    #     logger.info("***** Running evaluation *****")
    #     logger.info("  Num examples = %d", len(eval_examples))
    #     logger.info("  Batch size = %d", args.eval_batch_size)
    #
    #     eval_data = LazyDatasetClassifier(eval_examples, label_list, args.max_seq_length, tokenizer)
    #     # Run prediction for full data
    #     """
    #     cur_tensors = (torch.tensor(f.input_ids),
    #            torch.tensor(f.input_mask),
    #            torch.tensor(f.segment_ids),
    #            torch.tensor(f.lm_label_ids),
    #            torch.tensor(f.label))
    #     """
    #     eval_sampler = SequentialSampler(eval_data)
    #     eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
    #     save_eval_loss = []

    global_step = 0
    if args.do_train:

        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_dataset))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)

        train_dataloader = DataLoader(train_dataset,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)
        """
        cur_tensors = (torch.tensor(f.input_ids),
               torch.tensor(f.input_mask),
               torch.tensor(f.segment_ids),
               torch.tensor(f.lm_label_ids),
               torch.tensor(f.label))
        """

        save_loss = []
        save_epoch_loss = []
        save_step = int(len(train_dataloader) // 5)

        for epoch in trange((loaded_epoch + 1),
                            int(args.num_train_epochs),
                            desc="Epoch"):

            #     if args.do_eval and loaded_epoch != -1:
            #         model.eval()
            #         eval_loss, eval_accuracy = 0, 0
            #         nb_eval_steps, nb_eval_examples = 0, 0
            #
            #         for batch in tqdm(eval_dataloader, desc="Evaluating"):
            #             batch = tuple(t.to(device) for t in batch)
            #             input_ids, input_mask, segment_ids, label_ids = batch
            #
            #             with torch.no_grad():
            #                 tmp_eval_loss = model(input_ids, segment_ids, input_mask, None, label_ids)
            #                 prediction_scores, logits = model(input_ids, segment_ids, input_mask)
            #
            #             if n_gpu > 1:
            #                 tmp_eval_loss = tmp_eval_loss.mean()  # mean() to average on multi-gpu.
            #
            #             logits = logits.detach().cpu().numpy()
            #             label_ids = label_ids.to('cpu').numpy()
            #             tmp_eval_accuracy = accuracy(logits, label_ids)
            #
            #             eval_loss += tmp_eval_loss.mean().item()
            #             eval_accuracy += tmp_eval_accuracy
            #
            #             nb_eval_examples += input_ids.size(0)
            #             nb_eval_steps += 1
            #
            #         eval_loss = eval_loss / nb_eval_steps
            #         eval_accuracy = eval_accuracy / nb_eval_examples
            #         result = {'eval_loss': eval_loss,
            #                   'eval_accuracy': eval_accuracy,
            #                   'global_step': global_step}
            #
            #         save_eval_loss.append(eval_loss)
            #
            #         output_eval_file = os.path.join(args.output_dir, f"Epoch_{epoch}_eval_results.txt")
            #         with open(output_eval_file, "w") as writer:
            #             logger.info(f"***** Eval results on Epoch {epoch} *****")
            #             for key in sorted(result.keys()):
            #                 logger.info("  %s = %s", key, str(result[key]))
            #                 writer.write("%s = %s\n" % (key, str(result[key])))

            model.train()
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            tr_loss_ml = 0
            tr_loss_click = 0
            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, lm_label_ids, label = batch
                # if global_step == 0:
                #     print(input_ids.shape, input_mask.shape, segment_ids.shape, lm_label_ids.shape, label.shape)
                loss, loss_ml, loss_click = model(input_ids, segment_ids,
                                                  input_mask, lm_label_ids,
                                                  label)

                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                    loss_ml = loss_ml.mean()
                    loss_click = loss_click.mean()

                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps
                    loss_ml = loss_ml / args.gradient_accumulation_steps
                    loss_click = loss_click / args.gradient_accumulation_steps

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

                tr_loss += loss.item()
                tr_loss_ml += loss_ml.item()
                tr_loss_click += loss_click.item()

                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    if args.fp16:
                        # modify learning rate with special warm up BERT uses
                        # if args.fp16 is False, BertAdam is used that handles this automatically
                        lr_this_step = args.learning_rate * warmup_linear(
                            global_step / num_train_optimization_steps,
                            args.warmup_proportion)
                        for param_group in optimizer.param_groups:
                            param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

                if global_step != 0 and global_step % save_step == 0:
                    # 한 에포치당 5번 저장
                    logger.info(f'Saving state, iter: {global_step}')
                    model_to_save = model.module if hasattr(
                        model, 'module') else model
                    # Only save the model it-self
                    model_name = f"weight_on_{global_step}_" + WEIGHTS_NAME
                    output_model_file = os.path.join(args.output_dir,
                                                     model_name)
                    torch.save(model_to_save.state_dict(), output_model_file)
                    output_config_file = os.path.join(args.output_dir,
                                                      CONFIG_NAME)
                    with open(output_config_file, 'w') as f:
                        f.write(model_to_save.config.to_json_string())
                    print("Loss at ", global_step, loss_ml.item(),
                          loss_click.item(), loss.item())

                save_loss.append(
                    [loss_ml.item(),
                     loss_click.item(),
                     loss.item()])

                if args.visdom:
                    update_vis_plot(viz, global_step, loss_ml.item(),
                                    loss_click.item(), iter_plot, epoch_plot,
                                    'append')

            if epoch != (int(args.num_train_epochs) - 1):
                # 각 에포치가 끝날때 마다 저장
                logger.info(f'Saving state, epoch: {epoch}')
                model_to_save = model.module if hasattr(model,
                                                        'module') else model
                # Only save the model it-self
                model_name = f"weight_on_ep{epoch}_" + WEIGHTS_NAME
                output_model_file = os.path.join(args.output_dir, model_name)
                torch.save(model_to_save.state_dict(), output_model_file)
                output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
                with open(output_config_file, 'w') as f:
                    f.write(model_to_save.config.to_json_string())
                print("Loss at epoch", epoch, tr_loss_ml, tr_loss_click,
                      tr_loss)

            save_epoch_loss.append([tr_loss_ml, tr_loss_click, tr_loss])
            if args.visdom:
                update_vis_plot(viz, epoch, tr_loss_ml, tr_loss_click,
                                epoch_plot, None, 'append',
                                len(train_dataset) // args.train_batch_size)

            # if args.do_eval and loaded_epoch == -1:
            #
            #     model.eval()
            #     eval_loss, eval_accuracy = 0, 0
            #     nb_eval_steps, nb_eval_examples = 0, 0
            #
            #     for batch in tqdm(eval_dataloader, desc="Evaluating"):
            #         batch = tuple(t.to(device) for t in batch)
            #         input_ids, input_mask, segment_ids, label_ids = batch
            #
            #         with torch.no_grad():
            #             tmp_eval_loss = model(input_ids, segment_ids, input_mask, None, label_ids)
            #             prediction_scores, logits = model(input_ids, segment_ids, input_mask)
            #
            #         if n_gpu > 1:
            #             tmp_eval_loss = tmp_eval_loss.mean()  # mean() to average on multi-gpu.
            #
            #         logits = logits.detach().cpu().numpy()
            #         label_ids = label_ids.to('cpu').numpy()
            #         tmp_eval_accuracy = accuracy(logits, label_ids)
            #
            #         eval_loss += tmp_eval_loss.mean().item()
            #         eval_accuracy += tmp_eval_accuracy
            #
            #         nb_eval_examples += input_ids.size(0)
            #         nb_eval_steps += 1
            #
            #     eval_loss = eval_loss / nb_eval_steps
            #     eval_accuracy = eval_accuracy / nb_eval_examples
            #     result = {'eval_loss': eval_loss,
            #               'eval_accuracy': eval_accuracy,
            #               'global_step': global_step}
            #
            #     save_eval_loss.append(eval_loss)
            #
            #     output_eval_file = os.path.join(args.output_dir, f"Epoch_{epoch}_eval_results.txt")
            #     with open(output_eval_file, "w") as writer:
            #         logger.info(f"***** Eval results on Epoch {epoch} *****")
            #         for key in sorted(result.keys()):
            #             logger.info("  %s = %s", key, str(result[key]))
            #             writer.write("%s = %s\n" % (key, str(result[key])))

        # Save a trained model
        logger.info("** ** * Saving fine - tuned model ** ** * ")
        # model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
        # output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
        # if args.do_train:
        #     torch.save(model_to_save.state_dict(), output_model_file)

        save_loss = np.array(save_loss)
        save_epoch_loss = np.array(save_epoch_loss)
        np.save(os.path.join(args.output_dir, "save_loss.npy"), save_loss)
        np.save(os.path.join(args.output_dir, "save_epoch_loss.npy"),
                save_epoch_loss)

        # if args.do_eval:
        #     save_eval_loss = np.array(save_eval_loss)
        #     np.save(os.path.join(args.output_dir, "save_eval_loss.npy"), save_eval_loss)

        model_to_save = model.module if hasattr(model, 'module') else model
        # Only save the model it-self
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
        torch.save(model_to_save.state_dict(), output_model_file)
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
        with open(output_config_file, 'w') as f:
            f.write(model_to_save.config.to_json_string())
Esempio n. 18
0
corpus = load_lm_data(args.entity_dict, args.data, args.output_dir,
                      args.dataset, tokenizer)
## Training Dataset
train_iter = corpus.get_iterator('train',
                                 args.batch_size,
                                 args.max_seq_length,
                                 args.max_doc_length,
                                 device=device)

## total batch numbers and optim updating steps
total_train_steps = int(train_iter.batch_steps * args.num_train_epochs)

########################################################################################################################
# Building the model
########################################################################################################################
model = BertForPreTraining.from_pretrained(args.bert_model,
                                           entity_num=train_iter.entity_num)

args.n_all_param = sum([p.nelement() for p in model.bert.parameters()])
args.n_nonemb_param = sum(
    [p.nelement() for p in model.bert.encoder.parameters()])

logger.info('=' * 100)
for k, v in args.__dict__.items():
    logger.info('    - {} : {}'.format(k, v))
logger.info('=' * 100)
logger.info('#params = {}'.format(args.n_all_param))
logger.info('#non emb params = {}'.format(args.n_nonemb_param))

if args.fp16:
    model = model.half()
Esempio n. 19
0
            lazy_state_dict["bert-embeddings-token_type_embeddings"])
    eager_state_dict["bert.embeddings.position_embeddings.weight"].data.copy_(
        flow.tensor(lazy_state_dict["bert-embeddings-position_embeddings"].
                    numpy().squeeze(0)))


if __name__ == "__main__":
    lazy_model_path = "./of_bert_1000000_model_log/snapshot_snapshot_1000000"

    bert_module = BertForPreTraining(
        30522,
        128,
        768,
        12,
        12,
        3072,
        nn.GELU(),
        0.0,
        0.0,
        512,
        2,
    )

    load_params_from_lazy(bert_module.state_dict(), lazy_model_path)

    assert id(bert_module.cls.predictions.decoder.weight) == id(
        bert_module.bert.embeddings.word_embeddings.weight)

    with open(
            "../../OneFlow-Benchmark/LanguageModeling/BERT/lazy_input_output_1.pickle",
            "rb") as handle:
Esempio n. 20
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--train_file",
                        default=None,
                        type=str,
                        required=True,
                        help="The input train corpus.")
    parser.add_argument(
        "--bert_model",
        default=None,
        type=str,
        required=True,
        help="Bert pre-trained model selected in the list: bert-base-uncased, "
        "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese."
    )
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory where the model checkpoints will be written."
    )

    ## Other parameters
    parser.add_argument(
        "--do_lower_case",
        action='store_true',
        help="Set this flag if you are using an uncased model.")
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--do_train",
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=3e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument(
        "--on_memory",
        action='store_true',
        help="Whether to load train samples into memory or use disk")
    parser.add_argument(
        "--do_lower_case",
        action='store_true',
        help=
        "Whether to lower case the input text. True for uncased models, False for cased models."
    )
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumualte before performing a backward/update pass."
    )
    parser.add_argument(
        '--fp16',
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument(
        '--loss_scale',
        type=float,
        default=0,
        help=
        "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
        "0 (default value): dynamic loss scaling.\n"
        "Positive power of 2: static loss scaling value.\n")

    args = parser.parse_args()

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info(
        "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train and not args.do_eval:
        raise ValueError(
            "At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError(
            "Output directory ({}) already exists and is not empty.".format(
                args.output_dir))
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    tokenizer = BertTokenizer.from_pretrained(args.bert_model,
                                              do_lower_case=args.do_lower_case)

    #train_examples = None
    num_train_optimization_steps = None
    if args.do_train:
        print("Loading Train Dataset", args.train_file)
        train_dataset = BERTDataset(args.train_file,
                                    tokenizer,
                                    seq_len=args.max_seq_length,
                                    corpus_lines=None,
                                    on_memory=args.on_memory)
        num_train_optimization_steps = int(
            len(train_dataset) / args.train_batch_size /
            args.gradient_accumulation_steps) * args.num_train_epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    # Prepare model
    model = BertForPreTraining.from_pretrained(args.bert_model)
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )
        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)

    global_step = 0
    if args.do_train:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_dataset))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)

        if args.local_rank == -1:
            train_sampler = RandomSampler(train_dataset)
        else:
            #TODO: check if this works with current data generator from disk that relies on next(file)
            # (it doesn't return item back by index)
            train_sampler = DistributedSampler(train_dataset)
        train_dataloader = DataLoader(train_dataset,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)

        model.train()
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
                loss = model(input_ids, segment_ids, input_mask, lm_label_ids,
                             is_next)
                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps
                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    if args.fp16:
                        # modify learning rate with special warm up BERT uses
                        # if args.fp16 is False, BertAdam is used that handles this automatically
                        lr_this_step = args.learning_rate * warmup_linear(
                            global_step / num_train_optimization_steps,
                            args.warmup_proportion)
                        for param_group in optimizer.param_groups:
                            param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

        # Save a trained model
        logger.info("** ** * Saving fine - tuned model ** ** * ")
        model_to_save = model.module if hasattr(
            model, 'module') else model  # Only save the model it-self
        output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
        if args.do_train:
            torch.save(model_to_save.state_dict(), output_model_file)
Esempio n. 21
0
def main():

    print("IN NEW MAIN XD\n")
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument(
        "--input_dir",
        default=None,
        type=str,
        required=True,
        help="The input data dir. Should contain .hdf5 files  for the task.")

    parser.add_argument("--config_file",
                        default=None,
                        type=str,
                        required=True,
                        help="The BERT model config")

    parser.add_argument(
        "--bert_model",
        default="bert-large-uncased",
        type=str,
        help="Bert pre-trained model selected in the list: bert-base-uncased, "
        "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese."
    )

    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory where the model checkpoints will be written."
    )

    ## Other parameters
    parser.add_argument(
        "--max_seq_length",
        default=512,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument(
        "--max_predictions_per_seq",
        default=80,
        type=int,
        help="The maximum total of masked tokens in input sequence")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--max_steps",
                        default=1000,
                        type=float,
                        help="Total number of training steps to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.01,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumualte before performing a backward/update pass."
    )
    parser.add_argument(
        '--fp16',
        default=False,
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument(
        '--loss_scale',
        type=float,
        default=0.0,
        help=
        'Loss scaling, positive power of 2 values can improve fp16 convergence.'
    )
    parser.add_argument('--log_freq',
                        type=float,
                        default=50.0,
                        help='frequency of logging loss.')
    parser.add_argument('--checkpoint_activations',
                        default=False,
                        action='store_true',
                        help="Whether to use gradient checkpointing")
    parser.add_argument("--resume_from_checkpoint",
                        default=False,
                        action='store_true',
                        help="Whether to resume training from checkpoint.")
    parser.add_argument('--resume_step',
                        type=int,
                        default=-1,
                        help="Step to resume training from.")
    parser.add_argument(
        '--num_steps_per_checkpoint',
        type=int,
        default=2000,
        help="Number of update steps until a model checkpoint is saved to disk."
    )

    args = parser.parse_args()

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    assert (torch.cuda.is_available())

    if args.local_rank == -1:
        device = torch.device("cuda")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')

    logger.info("device %s n_gpu %d distributed training %r", device, n_gpu,
                bool(args.local_rank != -1))

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))
    if args.train_batch_size % args.gradient_accumulation_steps != 0:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, batch size {} should be divisible"
            .format(args.gradient_accumulation_steps, args.train_batch_size))

    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    if not args.resume_from_checkpoint and os.path.exists(
            args.output_dir) and (os.listdir(args.output_dir) and os.listdir(
                args.output_dir) != ['logfile.txt']):
        raise ValueError(
            "Output directory ({}) already exists and is not empty.".format(
                args.output_dir))

    if not args.resume_from_checkpoint:
        os.makedirs(args.output_dir, exist_ok=True)

    # Prepare model
    config = BertConfig.from_json_file(args.config_file)
    model = BertForPreTraining(config)

    if not args.resume_from_checkpoint:
        global_step = 0
    else:
        if args.resume_step == -1:
            model_names = [
                f for f in os.listdir(args.output_dir) if f.endswith(".pt")
            ]
            args.resume_step = max([
                int(x.split('.pt')[0].split('_')[1].strip())
                for x in model_names
            ])

        global_step = args.resume_step

        checkpoint = torch.load(os.path.join(args.output_dir,
                                             "ckpt_{}.pt".format(global_step)),
                                map_location="cpu")
        model.load_state_dict(checkpoint['model'], strict=False)

        print("resume step from ", args.resume_step)

    model.to(device)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    if args.fp16:

        optimizer = FusedAdam(
            optimizer_grouped_parameters,
            lr=args.learning_rate,
            #warmup=args.warmup_proportion,
            #t_total=args.max_steps,
            bias_correction=False,
            weight_decay=0.01,
            max_grad_norm=1.0)

        if args.loss_scale == 0:
            # optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
            model, optimizer = amp.initialize(model,
                                              optimizer,
                                              opt_level="O2",
                                              keep_batchnorm_fp32=False,
                                              loss_scale="dynamic")
        else:
            # optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
            model, optimizer = amp.initialize(model,
                                              optimizer,
                                              opt_level="O2",
                                              keep_batchnorm_fp32=False,
                                              loss_scale=args.loss_scale)

        scheduler = LinearWarmUpScheduler(optimizer,
                                          warmup=args.warmup_proportion,
                                          total_steps=args.max_steps)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=args.max_steps)

    if args.resume_from_checkpoint:
        optimizer.load_state_dict(checkpoint['optimizer'])  # , strict=False)

    if args.local_rank != -1:
        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    files = [
        os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)
        if os.path.isfile(os.path.join(args.input_dir, f))
    ]
    files.sort()

    num_files = len(files)

    logger.info("***** Running training *****")
    # logger.info("  Num examples = %d", len(train_data))
    logger.info("  Batch size = %d", args.train_batch_size)
    print("  LR = ", args.learning_rate)

    model.train()
    print("Training. . .")

    most_recent_ckpts_paths = []

    print("Training. . .")
    tr_loss = 0.0  # total added training loss
    average_loss = 0.0  # averaged loss every args.log_freq steps
    epoch = 0
    training_steps = 0
    while True:
        if not args.resume_from_checkpoint:
            random.shuffle(files)
            f_start_id = 0
        else:
            f_start_id = checkpoint['files'][0]
            files = checkpoint['files'][1:]
            args.resume_from_checkpoint = False
        for f_id in range(f_start_id, len(files)):
            data_file = files[f_id]
            logger.info("file no %s file %s" % (f_id, data_file))
            train_data = pretraining_dataset(
                input_file=data_file,
                max_pred_length=args.max_predictions_per_seq)

            if args.local_rank == -1:
                train_sampler = RandomSampler(train_data)
                train_dataloader = DataLoader(
                    train_data,
                    sampler=train_sampler,
                    batch_size=args.train_batch_size * n_gpu,
                    num_workers=4,
                    pin_memory=True)
            else:
                train_sampler = DistributedSampler(train_data)
                train_dataloader = DataLoader(train_data,
                                              sampler=train_sampler,
                                              batch_size=args.train_batch_size,
                                              num_workers=4,
                                              pin_memory=True)

            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="File Iteration")):

                training_steps += 1
                batch = [t.to(device) for t in batch]
                input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch  #\
                loss = model(
                    input_ids=input_ids,
                    token_type_ids=segment_ids,
                    attention_mask=input_mask,
                    masked_lm_labels=masked_lm_labels,
                    next_sentence_label=next_sentence_labels,
                    checkpoint_activations=args.checkpoint_activations)
                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.

                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                if args.fp16:
                    #   optimizer.backward(loss)
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                else:
                    loss.backward()
                tr_loss += loss
                average_loss += loss.item()

                if training_steps % args.gradient_accumulation_steps == 0:
                    if args.fp16:
                        scheduler.step()
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

                if training_steps == 1 * args.gradient_accumulation_steps:
                    logger.info(
                        "Step:{} Average Loss = {} Step Loss = {} LR {}".
                        format(global_step, average_loss, loss.item(),
                               optimizer.param_groups[0]['lr']))

                if training_steps % (args.log_freq *
                                     args.gradient_accumulation_steps) == 0:
                    logger.info(
                        "Step:{} Average Loss = {} Step Loss = {} LR {}".
                        format(global_step, average_loss / args.log_freq,
                               loss.item(), optimizer.param_groups[0]['lr']))
                    average_loss = 0

                if global_step >= args.max_steps or training_steps % (
                        args.num_steps_per_checkpoint *
                        args.gradient_accumulation_steps) == 0:

                    if (not torch.distributed.is_initialized()
                            or (torch.distributed.is_initialized()
                                and torch.distributed.get_rank() == 0)):
                        # Save a trained model
                        logger.info(
                            "** ** * Saving fine - tuned model ** ** * ")
                        model_to_save = model.module if hasattr(
                            model,
                            'module') else model  # Only save the model it-self
                        output_save_file = os.path.join(
                            args.output_dir, "ckpt_{}.pt".format(global_step))

                        torch.save(
                            {
                                'model': model_to_save.state_dict(),
                                'optimizer': optimizer.state_dict(),
                                'files': [f_id] + files
                            }, output_save_file)

                        most_recent_ckpts_paths.append(output_save_file)
                        if len(most_recent_ckpts_paths) > 3:
                            ckpt_to_be_removed = most_recent_ckpts_paths.pop(0)
                            os.remove(ckpt_to_be_removed)

                    if global_step >= args.max_steps:
                        tr_loss = tr_loss * args.gradient_accumulation_steps / training_steps
                        if (torch.distributed.is_initialized()):
                            tr_loss /= torch.distributed.get_world_size()
                            torch.distributed.all_reduce(tr_loss)
                        logger.info("Total Steps:{} Final Loss = {}".format(
                            training_steps, tr_loss.item()))
                        return
            del train_dataloader
            del train_sampler
            del train_data
            #for obj in gc.get_objects():
            #  if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
            #    del obj

            torch.cuda.empty_cache()
        epoch += 1
Esempio n. 22
0
def main():
    parser = argparse.ArgumentParser()

    # Required parameters
    parser.add_argument("--input_dir",
                        type=str,
                        required=True)
    parser.add_argument("--teacher_model",
                        default=None,
                        type=str,
                        required=True)
    parser.add_argument("--student_model",
                        default=None,
                        type=str,
                        required=True)
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True)
    parser.add_argument('--vocab_file',
                        type=str,
                        default=None,
                        required=True,
                        help="Vocabulary mapping/file BERT was pretrainined on")

    # Other parameters
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--reduce_memory",
                        action="store_true",
                        help="Store training data as on-disc memmaps to massively reduce memory usage")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--do_lower_case",
                        action='store_true',
                        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument('--weight_decay',
                        '--wd',
                        default=1e-4,
                        type=float, metavar='W',
                        help='weight decay')
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument('--steps_per_epoch',
                        type=int,
                        default=-1,
                        help="Number of updates steps to in one epoch.")
    parser.add_argument('--max_steps',
                        type=int,
                        default=-1,
                        help="Number of training steps.")
    parser.add_argument('--amp',
                        action='store_true',
                        default=False,
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--continue_train',
                        action='store_true',
                        default=False,
                        help='Whether to train from checkpoints')
    parser.add_argument('--disable_progress_bar',
                        default=False,
                        action='store_true',
                        help='Disable tqdm progress bar')
    parser.add_argument('--max_grad_norm',
                        type=float,
                        default=1.,
                        help="Gradient Clipping threshold")

    # Additional arguments
    parser.add_argument('--eval_step',
                        type=int,
                        default=1000)

    # This is used for running on Huawei Cloud.
    parser.add_argument('--data_url',
                        type=str,
                        default="")

    #Distillation specific
    parser.add_argument('--value_state_loss',
                        action='store_true',
                        default=False)
    parser.add_argument('--hidden_state_loss',
                        action='store_true',
                        default=False)
    parser.add_argument('--use_last_layer',
                        action='store_true',
                        default=False)
    parser.add_argument('--use_kld',
                        action='store_true',
                        default=False)
    parser.add_argument('--use_cosine',
                        action='store_true',
                        default=False)
    parser.add_argument('--distill_config',
                        default="distillation_config.json",
                        type=str,
                        help="path the distillation config")
    parser.add_argument('--num_workers',
                        type=int,
                        default=4,
                        help='number of DataLoader worker processes per rank')

    args = parser.parse_args()
    logger.info('args:{}'.format(args))

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt='%m/%d/%Y %H:%M:%S',
                        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
                        stream=sys.stdout)

    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.amp))

    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
            args.gradient_accumulation_steps))

    # Reference params
    author_gbs = 256
    author_steps_per_epoch = 22872
    author_epochs = 3
    author_max_steps = author_steps_per_epoch * author_epochs
    # Compute present run params
    if args.max_steps == -1 or args.steps_per_epoch == -1:
        args.steps_per_epoch = author_steps_per_epoch * author_gbs // (args.train_batch_size * get_world_size() * args.gradient_accumulation_steps)
        args.max_steps = author_max_steps * author_gbs // (args.train_batch_size * get_world_size() * args.gradient_accumulation_steps)

    #Set seed
    set_seed(args.seed, n_gpu)

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    if not os.path.exists(args.output_dir) and is_main_process():
        os.makedirs(args.output_dir)

    tokenizer = BertTokenizer.from_pretrained(args.teacher_model, do_lower_case=args.do_lower_case)

    teacher_model, teacher_config = BertModel.from_pretrained(args.teacher_model,
                                              distill_config=args.distill_config)

    # Required to make sure model's fwd doesn't return anything. required for DDP.
    # fwd output not being used in loss computation crashes DDP
    teacher_model.make_teacher()

    if args.continue_train:
        student_model, student_config = BertForPreTraining.from_pretrained(args.student_model,
                                                           distill_config=args.distill_config)
    else:
        student_model, student_config = BertForPreTraining.from_scratch(args.student_model, 
                                                        distill_config=args.distill_config)

    # We need a projection layer since teacher.hidden_size != student.hidden_size
    use_projection = student_config.hidden_size != teacher_config.hidden_size
    if use_projection:
        project = Project(student_config, teacher_config)
        if args.continue_train:
            project_model_file = os.path.join(args.student_model, "project.bin")
            project_ckpt = torch.load(project_model_file, map_location="cpu")
            project.load_state_dict(project_ckpt)

    distill_config = {"nn_module_names": []} #Empty list since we don't want to use nn module hooks here
    distill_hooks_student, distill_hooks_teacher = DistillHooks(distill_config), DistillHooks(distill_config)

    student_model.register_forward_hook(distill_hooks_student.child_to_main_hook)
    teacher_model.register_forward_hook(distill_hooks_teacher.child_to_main_hook)

    ## Register hooks on nn.Modules
    # student_fwd_pre_hook = student_model.register_forward_pre_hook(distill_hooks_student.register_nn_module_hook)
    # teacher_fwd_pre_hook = teacher_model.register_forward_pre_hook(distill_hooks_teacher.register_nn_module_hook)

    student_model.to(device)
    teacher_model.to(device)
    if use_projection:
        project.to(device)
    if args.local_rank != -1:
        teacher_model = torch.nn.parallel.DistributedDataParallel(
               teacher_model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False
           )
        student_model = torch.nn.parallel.DistributedDataParallel(
               student_model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False
           )
        if use_projection:
            project = torch.nn.parallel.DistributedDataParallel(
                   project, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False
               )
    size = 0
    for n, p in student_model.named_parameters():
        logger.info('n: {}'.format(n))
        logger.info('p: {}'.format(p.nelement()))
        size += p.nelement()

    logger.info('Total parameters: {}'.format(size))

    # Prepare optimizer
    param_optimizer = list(student_model.named_parameters())
    if use_projection:
        param_optimizer += list(project.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]

    optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False)
    scheduler = LinearWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=args.max_steps)    

    global_step = 0
    logging.info("***** Running training *****")
    logging.info("  Num examples = {}".format(args.train_batch_size * args.max_steps))
    logging.info("  Batch size = %d", args.train_batch_size)
    logging.info("  Num steps = %d", args.max_steps)

    # Prepare the data loader.
    if is_main_process():
        tic = time.perf_counter()
    train_dataloader = lddl.torch.get_bert_pretrain_data_loader(
        args.input_dir,
        local_rank=args.local_rank,
        vocab_file=args.vocab_file,
        data_loader_kwargs={
            'batch_size': args.train_batch_size * n_gpu,
            'num_workers': args.num_workers,
            'pin_memory': True,
        },
        base_seed=args.seed,
        log_dir=None if args.output_dir is None else os.path.join(args.output_dir, 'lddl_log'),
        log_level=logging.WARNING,
        start_epoch=0,
    )
    if is_main_process():
        print('get_bert_pretrain_data_loader took {} s!'.format(time.perf_counter() - tic))
    train_dataloader = tqdm(train_dataloader, desc="Iteration", disable=args.disable_progress_bar) if is_main_process() else train_dataloader

    tr_loss, tr_att_loss, tr_rep_loss, tr_value_loss = 0., 0., 0., 0.
    nb_tr_examples, local_step = 0, 0

    student_model.train()
    scaler = torch.cuda.amp.GradScaler()

    transformer_losses = TransformerLosses(student_config, teacher_config, device, args)
    iter_start = time.time()
    while global_step < args.max_steps:
        for batch in train_dataloader:
            if global_step >= args.max_steps:
                break

            #remove forward_pre_hook after one forward pass
            #the purpose of forward_pre_hook is to register
            #forward_hooks on nn_module_names provided in config
            # if idx == 1:
            #     student_fwd_pre_hook.remove()
            #     teacher_fwd_pre_hook.remove()
            #     # return

            # Initialize loss metrics
            if global_step % args.steps_per_epoch == 0:
                tr_loss, tr_att_loss, tr_rep_loss, tr_value_loss = 0., 0., 0., 0.
                mean_loss, mean_att_loss, mean_rep_loss, mean_value_loss = 0., 0., 0., 0.

            batch = {k: v.to(device) for k, v in batch.items()}
            input_ids, segment_ids, input_mask, lm_label_ids, is_next = batch['input_ids'], batch['token_type_ids'], batch['attention_mask'], batch['labels'], batch['next_sentence_labels']

            att_loss = 0.
            rep_loss = 0.
            value_loss = 0.
            with torch.cuda.amp.autocast(enabled=args.amp):
                student_model(input_ids, segment_ids, input_mask, None)

                # Gather student states extracted by hooks
                temp_model = unwrap_ddp(student_model)
                student_atts = flatten_states(temp_model.distill_states_dict, "attention_scores")
                student_reps = flatten_states(temp_model.distill_states_dict, "hidden_states")
                student_values = flatten_states(temp_model.distill_states_dict, "value_states")
                student_embeddings = flatten_states(temp_model.distill_states_dict, "embedding_states")
                bsz, attn_heads, seq_len, _  = student_atts[0].shape

                #No gradient for teacher training
                with torch.no_grad():
                    teacher_model(input_ids, segment_ids, input_mask)

                # Gather teacher states extracted by hooks
                temp_model = unwrap_ddp(teacher_model)
                teacher_atts = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "attention_scores")]
                teacher_reps = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "hidden_states")]
                teacher_values = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "value_states")]
                teacher_embeddings = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "embedding_states")]

                teacher_layer_num = len(teacher_atts)
                student_layer_num = len(student_atts)

                #MiniLM
                if student_config.distillation_config["student_teacher_layer_mapping"] == "last_layer":
                    if student_config.distillation_config["use_attention_scores"]:
                        student_atts = [student_atts[-1]]
                        new_teacher_atts = [teacher_atts[-1]]

                    if student_config.distillation_config["use_value_states"]:
                        student_values = [student_values[-1]]
                        new_teacher_values = [teacher_values[-1]]

                    if student_config.distillation_config["use_hidden_states"]:
                        new_teacher_reps = [teacher_reps[-1]]
                        new_student_reps = [student_reps[-1]]
                else:
                    assert teacher_layer_num % student_layer_num == 0

                    layers_per_block = int(teacher_layer_num / student_layer_num)
                    if student_config.distillation_config["use_attention_scores"]:
                        new_teacher_atts = [teacher_atts[i * layers_per_block + layers_per_block - 1]
                                            for i in range(student_layer_num)]

                    if student_config.distillation_config["use_value_states"]:
                        new_teacher_values = [teacher_values[i * layers_per_block + layers_per_block - 1]
                                    for i in range(student_layer_num)]

                    if student_config.distillation_config["use_hidden_states"]:
                        new_teacher_reps = [teacher_reps[i * layers_per_block + layers_per_block - 1]
                                    for i in range(student_layer_num)]
                        new_student_reps = student_reps

                if student_config.distillation_config["use_attention_scores"]:
                    att_loss = transformer_losses.compute_loss(student_atts, new_teacher_atts, loss_name="attention_loss")

                if student_config.distillation_config["use_hidden_states"]:
                    if use_projection:
                        rep_loss = transformer_losses.compute_loss(project(new_student_reps), new_teacher_reps, loss_name="hidden_state_loss")
                    else:
                        rep_loss = transformer_losses.compute_loss(new_student_reps, new_teacher_reps, loss_name="hidden_state_loss")

                if student_config.distillation_config["use_embedding_states"]:
                    if use_projection:
                        rep_loss += transformer_losses.compute_loss(project(student_embeddings), teacher_embeddings, loss_name="embedding_state_loss")
                    else:
                        rep_loss += transformer_losses.compute_loss(student_embeddings, teacher_embeddings, loss_name="embedding_state_loss")

                if student_config.distillation_config["use_value_states"]:
                    value_loss = transformer_losses.compute_loss(student_values, new_teacher_values, loss_name="value_state_loss")

                loss = att_loss + rep_loss + value_loss


            if args.gradient_accumulation_steps > 1:
                loss = loss / args.gradient_accumulation_steps

            tr_att_loss += att_loss.item() / args.gradient_accumulation_steps
            if student_config.distillation_config["use_hidden_states"]:
                tr_rep_loss += rep_loss.item() / args.gradient_accumulation_steps
            if student_config.distillation_config["use_value_states"]:
                tr_value_loss += value_loss.item() / args.gradient_accumulation_steps
            if args.amp:
                scaler.scale(loss).backward()
                scaler.unscale_(optimizer)
            else:
                loss.backward()

            if use_projection:
                torch.nn.utils.clip_grad_norm_(chain(student_model.parameters(), project.parameters()), args.max_grad_norm, error_if_nonfinite=False)
            else:
                torch.nn.utils.clip_grad_norm_(student_model.parameters(), args.max_grad_norm, error_if_nonfinite=False)

            tr_loss += loss.item()
            nb_tr_examples += input_ids.size(0)
            local_step += 1

            if local_step % args.gradient_accumulation_steps == 0:
                scheduler.step()
                if args.amp:
                    scaler.step(optimizer)
                    scaler.update()
                else:
                    optimizer.step()

                optimizer.zero_grad()
                global_step = optimizer.param_groups[0]["step"] if "step" in optimizer.param_groups[0] else 0

                if (global_step % args.steps_per_epoch) > 0:
                    mean_loss = tr_loss / (global_step % args.steps_per_epoch)
                    mean_att_loss = tr_att_loss / (global_step % args.steps_per_epoch)
                    mean_rep_loss = tr_rep_loss / (global_step % args.steps_per_epoch)
                    value_loss = tr_value_loss / (global_step % args.steps_per_epoch)

                if (global_step + 1) % args.eval_step == 0 and is_main_process():
                    result = {}
                    result['global_step'] = global_step
                    result['lr'] = optimizer.param_groups[0]["lr"]
                    result['loss'] = mean_loss
                    result['att_loss'] = mean_att_loss
                    result['rep_loss'] = mean_rep_loss
                    result['value_loss'] = value_loss
                    result['perf'] = (global_step + 1) * get_world_size() * args.train_batch_size * args.gradient_accumulation_steps / (time.time() - iter_start)
                    output_eval_file = os.path.join(args.output_dir, "log.txt")
                    if is_main_process():
                        with open(output_eval_file, "a") as writer:
                            logger.info("***** Eval results *****")
                            for key in sorted(result.keys()):
                                logger.info("  %s = %s", key, str(result[key]))
                                writer.write("%s = %s\n" % (key, str(result[key])))

                        # Save a trained model
                        model_name = "{}".format(WEIGHTS_NAME)

                        logging.info("** ** * Saving fine-tuned model ** ** * ")
                        # Only save the model it-self
                        model_to_save = student_model.module if hasattr(student_model, 'module') else student_model
                        if use_projection:
                            project_to_save = project.module if hasattr(project, 'module') else project

                        output_model_file = os.path.join(args.output_dir, model_name)
                        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
                        output_project_file = os.path.join(args.output_dir, "project.bin")
                        torch.save(model_to_save.state_dict(), output_model_file)
                        if use_projection:
                            torch.save(project_to_save.state_dict(), output_project_file)
                        model_to_save.config.to_json_file(output_config_file)
                        tokenizer.save_vocabulary(args.output_dir)

                        if oncloud:
                            logging.info(mox.file.list_directory(args.output_dir, recursive=True))
                            logging.info(mox.file.list_directory('.', recursive=True))
                            mox.file.copy_parallel(args.output_dir, args.data_url)
                            mox.file.copy_parallel('.', args.data_url)

    model_name = "{}".format(WEIGHTS_NAME)
    logging.info("** ** * Saving fine-tuned model ** ** * ")
    model_to_save = student_model.module if hasattr(student_model, 'module') else student_model

    if use_projection:
        project_to_save = project.module if hasattr(project, 'module') else project
        output_project_file = os.path.join(args.output_dir, "project.bin")
        if is_main_process():
            torch.save(project_to_save.state_dict(), output_project_file)

    output_model_file = os.path.join(args.output_dir, model_name)
    output_config_file = os.path.join(args.output_dir, CONFIG_NAME)

    if is_main_process():
        torch.save(model_to_save.state_dict(), output_model_file)
        model_to_save.config.to_json_file(output_config_file)
        tokenizer.save_vocabulary(args.output_dir)

    if oncloud:
        logging.info(mox.file.list_directory(args.output_dir, recursive=True))
        logging.info(mox.file.list_directory('.', recursive=True))
        mox.file.copy_parallel(args.output_dir, args.data_url)
        mox.file.copy_parallel('.', args.data_url)