def build(self, lr=None):
        self.prev_global_step = 0
        if self.args.distill_rep_attn and not self.args.distill_logit:
            self.stage = 'kd_stage1'
        elif self.args.distill_logit and not self.args.distill_rep_attn:
            self.stage = 'kd_stage2'
        elif self.args.distill_logit and self.args.distill_rep_attn:
            self.stage = 'kd_joint'
        else:
            self.stage = 'nokd'
        self.output_dir = os.path.join(self.args.output_dir, self.stage)
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        param_optimizer = list(self.student_model.named_parameters())
        self.clip_params = {}
        for k, v in param_optimizer:
            if 'clip_' in k:
                self.clip_params[k] = v

        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {
                'params': [
                    p for n, p in param_optimizer
                    if (not any(nd in n
                                for nd in no_decay) and not 'clip_' in n)
                ],
                'weight_decay':
                self.args.weight_decay
            },
            {
                'params': [
                    p for n, p in param_optimizer
                    if (any(nd in n for nd in no_decay) and not 'clip_' in n)
                ],
                'weight_decay':
                0.0
            },
            {
                'params': [p for n, p in self.clip_params.items()],
                'lr': self.args.clip_lr,
                'weight_decay': self.args.clip_wd
            },
        ]

        schedule = 'warmup_linear'
        learning_rate = self.args.learning_rate if not lr else lr
        self.optimizer = BertAdam(optimizer_grouped_parameters,
                                  schedule=schedule,
                                  lr=learning_rate,
                                  warmup=self.args.warmup_proportion,
                                  t_total=self.num_train_optimization_steps)
        logging.info("Optimizer prepared.")
        self._check_quantized_modules()
        self._setup_grad_scale_stats()
Exemple #2
0
    def set_model(self):
        print('[Runner] - Initializing Transformer model...')
        
        # build the Transformer model with speech prediction head
        model_config = TransformerConfig(self.config)
        self.dr = model_config.downsample_rate

        self.model = TransformerForMaskedAcousticModel(model_config, self.input_dim, self.output_dim).to(self.device)
        self.model.train()

        if self.args.multi_gpu:
            self.model = torch.nn.DataParallel(self.model)
            print('[Runner] - Multi-GPU training Enabled: ' + str(torch.cuda.device_count()))
        print('[Runner] - Number of parameters: ' + str(sum(p.numel() for p in self.model.parameters() if p.requires_grad)))

        # Setup optimizer
        param_optimizer = list(self.model.named_parameters())

        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
            {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
            ]

        if self.apex:
            try:
                from apex.optimizers import FP16_Optimizer
                from apex.optimizers import FusedAdam
            except ImportError:
                raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

            optimizer = FusedAdam(optimizer_grouped_parameters,
                                    lr=self.learning_rate,
                                    bias_correction=False,
                                    max_grad_norm=1.0)
            if self.config['optimizer']['loss_scale'] == 0:
                self.optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
            else:
                self.optimizer = FP16_Optimizer(optimizer, static_loss_scale=self.config['optimizer']['loss_scale'])
            self.warmup_linear = WarmupLinearSchedule(warmup=self.warmup_proportion,
                                                      t_total=self.total_steps)
        else:
            self.optimizer = BertAdam(optimizer_grouped_parameters,
                                      lr=self.learning_rate,
                                      warmup=self.warmup_proportion,
                                      t_total=self.total_steps)
Exemple #3
0
def get_optimizer(params, lr, warmup_proportion, training_steps):
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in params if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in params if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=lr,
                         warmup=warmup_proportion,
                         t_total=training_steps)
    return optimizer
Exemple #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help="The input data dir. Should contain the .tsv files or the task.")
    parser.add_argument("--teacher_model",
                        default=None,
                        type=str,
                        help="The teacher model dir.")
    parser.add_argument("--student_model",
                        default=None,
                        type=str,
                        required=True,
                        help="The student model dir.")
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory where model checkpoints will be written.")
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help=
        "Where do you want to store the pre-trained models downloaded from s3")
    parser.add_argument("--max_seq_len",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length ")
    parser.add_argument("--num_labels",
                        default=2,
                        type=int,
                        required=True,
                        help="")
    parser.add_argument("--task_mode",
                        default='classification',
                        type=str,
                        required=False,
                        help="task type")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--do_train",
                        action='store_true',
                        help="Whether to run train on the train set.")
    parser.add_argument(
        "--do_lower_case",
        action='store_true',
        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument('--weight_decay',
                        '--wd',
                        default=1e-4,
                        type=float,
                        metavar='W',
                        help='weight decay')
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumulate")

    # added arguments
    parser.add_argument('--aug_train', action='store_true')
    parser.add_argument('--eval_step', type=int, default=50)
    parser.add_argument('--pred_distill', action='store_true')
    parser.add_argument('--data_url', type=str, default="")
    parser.add_argument('--temperature', type=float, default=1.)

    args = parser.parse_args()
    logger.info('The args: {}'.format(args))

    # Prepare devices
    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    n_gpu = torch.cuda.device_count()

    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO)

    logger.info("device: {} n_gpu: {}".format(device, n_gpu))

    # Prepare seed
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    # Prepare task settings
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError(
            "Output directory ({}) already exists and is not empty.".format(
                args.output_dir))
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    num_labels = args.num_labels

    tokenizer = BertTokenizer.from_pretrained(args.student_model,
                                              do_lower_case=args.do_lower_case)

    if args.do_train:
        train_path = os.path.join(args.data_dir, 'train.txt')
        eval_path = os.path.join(args.data_dir, 'eval.txt')
        train_examples = read_examples(train_path)
        eval_examples = read_examples(eval_path)
        num_train_optimization_steps = int(
            len(train_examples) / args.train_batch_size /
            args.gradient_accumulation_steps) * args.num_train_epochs
        train_features = convert_examples_to_features(train_examples,
                                                      tokenizer,
                                                      args.max_seq_len)
        eval_features = convert_examples_to_features(eval_examples, tokenizer,
                                                     args.max_seq_len)

        train_features = MyDataLoader(train_features)
        eval_features = MyDataLoader(eval_features)

        train_dataloader = DataLoader(train_features,
                                      shuffle=True,
                                      batch_size=args.train_batch_size)
        # eval_dataloader = DataLoader(eval_features, shuffle=False, batch_size=args.eval_batch_size)

        teacher_model = TinyBertForSequenceClassification.from_pretrained(
            args.teacher_model, num_labels=num_labels)
        teacher_model.to(device)

    student_model = TinyBertForSequenceClassification.from_pretrained(
        args.student_model, num_labels=num_labels)
    student_model.to(device)
    # 只做预测
    if args.do_train:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)
        if n_gpu > 1:
            student_model = torch.nn.DataParallel(student_model)
            teacher_model = torch.nn.DataParallel(teacher_model)
        # Prepare optimizer
        param_optimizer = list(student_model.named_parameters())
        size = 0
        for n, p in student_model.named_parameters():
            logger.info('n: {}'.format(n))
            size += p.nelement()

        logger.info('Total parameters: {}'.format(size))
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [{
            'params': [
                p for n, p in param_optimizer
                if not any(nd in n for nd in no_decay)
            ],
            'weight_decay':
            0.01
        }, {
            'params':
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            'weight_decay':
            0.0
        }]
        schedule = 'warmup_linear'
        if not args.pred_distill:
            schedule = 'none'
        optimizer = BertAdam(optimizer_grouped_parameters,
                             schedule=schedule,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)
        # Prepare loss functions
        loss_mse = MSELoss()

        def soft_cross_entropy(predicts, targets):
            student_likelihood = torch.nn.functional.log_softmax(predicts,
                                                                 dim=-1)
            targets_prob = torch.nn.functional.softmax(targets, dim=-1)
            return (-targets_prob * student_likelihood).mean()

        # Train and evaluate
        global_step = 0
        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")

        for epoch_ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0.
            tr_att_loss = 0.
            tr_rep_loss = 0.
            tr_cls_loss = 0.

            student_model.train()
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="Iteration", ascii=True)):
                batch = tuple(t.to(device) for t in batch)

                input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch
                if input_ids.size()[0] != args.train_batch_size:
                    continue

                att_loss = 0.
                rep_loss = 0.
                cls_loss = 0.

                student_logits, student_atts, student_reps = student_model(
                    input_ids, segment_ids, input_mask, is_student=True)

                with torch.no_grad():
                    teacher_logits, teacher_atts, teacher_reps = teacher_model(
                        input_ids, segment_ids, input_mask)

                # 第一阶段
                if not args.pred_distill:
                    teacher_layer_num = len(teacher_atts)
                    student_layer_num = len(student_atts)
                    assert teacher_layer_num % student_layer_num == 0
                    layers_per_block = int(teacher_layer_num /
                                           student_layer_num)
                    new_teacher_atts = [
                        teacher_atts[i * layers_per_block + layers_per_block -
                                     1] for i in range(student_layer_num)
                    ]

                    for student_att, teacher_att in zip(
                            student_atts, new_teacher_atts):
                        student_att = torch.where(
                            student_att <= -1e2,
                            torch.zeros_like(student_att).to(device),
                            student_att)
                        teacher_att = torch.where(
                            teacher_att <= -1e2,
                            torch.zeros_like(teacher_att).to(device),
                            teacher_att)

                        tmp_loss = loss_mse(student_att, teacher_att)
                        att_loss += tmp_loss

                    new_teacher_reps = [
                        teacher_reps[i * layers_per_block]
                        for i in range(student_layer_num + 1)
                    ]
                    new_student_reps = student_reps
                    for student_rep, teacher_rep in zip(
                            new_student_reps, new_teacher_reps):
                        tmp_loss = loss_mse(student_rep, teacher_rep)
                        rep_loss += tmp_loss

                    loss = rep_loss + att_loss
                    tr_att_loss += att_loss.item()
                    tr_rep_loss += rep_loss.item()
                # 第二阶段
                else:
                    if args.task_mode == "classification":
                        cls_loss = soft_cross_entropy(
                            student_logits / args.temperature,
                            teacher_logits / args.temperature)
                    elif args.task_mode == "regression":
                        loss_mse = MSELoss()
                        cls_loss = loss_mse(student_logits.view(-1),
                                            label_ids.view(-1))
                    loss = cls_loss
                    tr_cls_loss += cls_loss.item()

                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                loss.backward()

                tr_loss += loss.item()
                nb_tr_examples += label_ids.size(0)
                nb_tr_steps += 1

                if (step + 1) % args.gradient_accumulation_steps == 0:
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

                if (global_step + 1) % args.eval_step == 0:
                    logger.info("***** Running evaluation *****")
                    logger.info("  Epoch = {} iter {} step".format(
                        epoch_, global_step))
                    logger.info("  Num examples = %d", len(eval_examples))
                    logger.info("  Batch size = %d", args.eval_batch_size)

                    student_model.eval()

                    loss = tr_loss / (step + 1)
                    cls_loss = tr_cls_loss / (step + 1)
                    att_loss = tr_att_loss / (step + 1)
                    rep_loss = tr_rep_loss / (step + 1)

                    result = {}
                    result['global_step'] = global_step
                    result['cls_loss'] = cls_loss
                    result['att_loss'] = att_loss
                    result['rep_loss'] = rep_loss
                    result['loss'] = loss

                    result_to_file(result, output_eval_file)

            logger.info("***** Save model *****")
            model_to_save = student_model.module if hasattr(
                student_model, 'module') else student_model
            model_name = f'{epoch_}_{WEIGHTS_NAME}'
            output_model_file = os.path.join(args.output_dir, model_name)
            output_config_file = os.path.join(args.output_dir, CONFIG_NAME)

            torch.save(model_to_save.state_dict(), output_model_file)
            model_to_save.config.to_json_file(output_config_file)
            tokenizer.save_vocabulary(args.output_dir)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task."
    )
    parser.add_argument("--teacher_model",
                        default=None,
                        type=str,
                        help="The teacher model dir.")
    parser.add_argument("--student_model",
                        default=None,
                        type=str,
                        required=True,
                        help="The student model dir.")
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model predictions and checkpoints will be written."
    )
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help=
        "Where do you want to store the pre-trained models downloaded from s3")
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--do_lower_case",
        action='store_true',
        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument('--weight_decay',
                        '--wd',
                        default=1e-4,
                        type=float,
                        metavar='W',
                        help='weight decay')
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )

    # added arguments
    parser.add_argument('--aug_train', action='store_true')
    parser.add_argument('--eval_step', type=int, default=50)
    parser.add_argument('--pred_distill', action='store_true')
    parser.add_argument('--data_url', type=str, default="")
    parser.add_argument('--temperature', type=float, default=1.)
    parser.add_argument('--local_rank', type=int, default=-1)

    args = parser.parse_args()
    logger.info('The args: {}'.format(args))

    processors = {
        "cola": ColaProcessor,
        "mnli": MnliProcessor,
        "mnli-mm": MnliMismatchedProcessor,
        "mrpc": MrpcProcessor,
        "sst-2": Sst2Processor,
        "sts-b": StsbProcessor,
        "qqp": QqpProcessor,
        "qnli": QnliProcessor,
        "rte": RteProcessor,
        "wnli": WnliProcessor
    }

    output_modes = {
        "cola": "classification",
        "mnli": "classification",
        "mrpc": "classification",
        "sst-2": "classification",
        "sts-b": "regression",
        "qqp": "classification",
        "qnli": "classification",
        "rte": "classification",
        "wnli": "classification"
    }

    # intermediate distillation default parameters
    default_params = {
        "cola": {
            "num_train_epochs": 50,
            "max_seq_length": 64
        },
        "mnli": {
            "num_train_epochs": 5,
            "max_seq_length": 128
        },
        "mrpc": {
            "num_train_epochs": 20,
            "max_seq_length": 128
        },
        "sst-2": {
            "num_train_epochs": 10,
            "max_seq_length": 64
        },
        "sts-b": {
            "num_train_epochs": 20,
            "max_seq_length": 128
        },
        "qqp": {
            "num_train_epochs": 5,
            "max_seq_length": 128
        },
        "qnli": {
            "num_train_epochs": 10,
            "max_seq_length": 128
        },
        "rte": {
            "num_train_epochs": 20,
            "max_seq_length": 128
        }
    }

    acc_tasks = ["mnli", "mrpc", "sst-2", "qqp", "qnli", "rte"]
    corr_tasks = ["sts-b"]
    mcc_tasks = ["cola"]

    # Prepare devices

    n_gpu = torch.cuda.device_count()

    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO)

    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    logger.info("device: {} n_gpu: {}".format(device, n_gpu))

    # Prepare seed
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    # Prepare task settings
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError(
            "Output directory ({}) already exists and is not empty.".format(
                args.output_dir))
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    task_name = args.task_name.lower()

    if task_name in default_params:
        args.max_seq_len = default_params[task_name]["max_seq_length"]

    if not args.pred_distill and not args.do_eval:
        if task_name in default_params:
            args.num_train_epoch = default_params[task_name][
                "num_train_epochs"]

    if task_name not in processors:
        raise ValueError("Task not found: %s" % task_name)

    processor = processors[task_name]()
    output_mode = output_modes[task_name]
    label_list = processor.get_labels()
    num_labels = len(label_list)

    tokenizer = BertTokenizer.from_pretrained(args.student_model,
                                              do_lower_case=args.do_lower_case)

    if not args.do_eval:
        #if not args.aug_train:
        #    train_examples = processor.get_train_examples(args.data_dir)
        #else:
        #    train_examples = processor.get_aug_examples(args.data_dir)
        if args.gradient_accumulation_steps < 1:
            raise ValueError(
                "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
                .format(args.gradient_accumulation_steps))

        args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

        # rewrite data processing here
        assert args.task_name == "MNLI", "the script is designed for MNLI only now"
        mnli_datasets = load_dataset("text",
                                     data_files=os.path.join(
                                         args.data_dir, "train_aug.tsv"))
        label_classes = processor.get_labels()
        label_map = {label: i for i, label in enumerate(label_classes)}

        def preprocess_func(examples, max_seq_length=args.max_seq_length):
            splits = [e.split('\t') for e in examples['text']]
            # tokenize for sent1 & sent2
            tokens_s1 = [tokenizer.tokenize(e[8]) for e in splits]
            tokens_s2 = [tokenizer.tokenize(e[9]) for e in splits]
            for t1, t2 in zip(tokens_s1, tokens_s2):
                truncate_seq_pair(t1, t2, max_length=max_seq_length - 3)
            input_ids_list = []
            input_mask_list = []
            segment_ids_list = []
            seq_length_list = []
            labels_list = []
            labels = [e[-1] for e in splits]
            # print(len(labels))
            for token_a, token_b, l in zip(
                    tokens_s1, tokens_s2,
                    labels):  # zip(tokens_as, tokens_bs):
                tokens = ["[CLS]"] + token_a + ["[SEP]"]
                segment_ids = [0] * len(tokens)
                tokens += token_b + ["[SEP]"]
                segment_ids += [1] * (len(token_b) + 1)
                input_ids = tokenizer.convert_tokens_to_ids(
                    tokens)  # tokenize to id
                input_mask = [1] * len(input_ids)
                seq_length = len(input_ids)
                padding = [0] * (max_seq_length - len(input_ids))
                input_ids += padding
                input_mask += padding
                segment_ids += padding
                assert len(input_ids) == max_seq_length
                assert len(input_mask) == max_seq_length
                assert len(segment_ids) == max_seq_length
                input_ids_list.append(input_ids)
                input_mask_list.append(input_mask)
                segment_ids_list.append(segment_ids)
                seq_length_list.append(seq_length)
                labels_list.append(label_map[l])

            results = {
                "input_ids": input_ids_list,
                "input_mask": input_mask_list,
                "segment_ids": segment_ids_list,
                "seq_length": seq_length_list,
                "label_ids": labels_list
            }

            return results

        mnli_datasets = mnli_datasets.map(preprocess_func, batched=True)

        # train_features = convert_examples_to_features(train_examples, label_list,
        #                                               args.max_seq_length, tokenizer, output_mode, logger)
        train_data = mnli_datasets['train'].remove_columns('text')

        print(train_data[0])
        # train_data, _ = get_tensor_data(output_mode, train_features)
        num_train_optimization_steps = int(
            len(train_data) / args.train_batch_size /
            args.gradient_accumulation_steps) * args.num_train_epochs
        logger.info("Initializing Distributed Environment")
        torch.cuda.set_device(args.local_rank)
        dist.init_process_group(backend="nccl")
        train_sampler = torch.utils.data.DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)

    eval_examples = processor.get_dev_examples(args.data_dir)
    eval_features = convert_examples_to_features(eval_examples, label_list,
                                                 args.max_seq_length,
                                                 tokenizer, output_mode,
                                                 logger)
    eval_data, eval_labels = get_tensor_data(output_mode, eval_features)
    eval_sampler = SequentialSampler(eval_data)
    eval_dataloader = DataLoader(eval_data,
                                 sampler=eval_sampler,
                                 batch_size=args.eval_batch_size)

    # DDP setting
    local_rank = args.local_rank
    torch.cuda.set_device(local_rank)

    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    student_model = TinyBertForSequenceClassification.from_pretrained(
        args.student_model, num_labels=num_labels).to(device)

    if args.do_eval:
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)

        student_model.eval()
        result = do_eval(student_model, task_name, eval_dataloader, device,
                         output_mode, eval_labels, num_labels)
        logger.info("***** Eval results *****")
        for key in sorted(result.keys()):
            logger.info("  %s = %s", key, str(result[key]))
    else:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_data))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)
        teacher_model = TinyBertForSequenceClassification.from_pretrained(
            args.teacher_model, num_labels=num_labels).to(device)
        student_model = DDP(student_model,
                            device_ids=[local_rank],
                            output_device=local_rank)
        teacher_model = DDP(teacher_model,
                            device_ids=[local_rank],
                            output_device=local_rank)
        # Prepare optimizer
        param_optimizer = list(student_model.named_parameters())
        size = 0
        for n, p in student_model.named_parameters():
            logger.info('n: {}'.format(n))
            size += p.nelement()

        logger.info('Total parameters: {}'.format(size))
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [{
            'params': [
                p for n, p in param_optimizer
                if not any(nd in n for nd in no_decay)
            ],
            'weight_decay':
            0.01
        }, {
            'params':
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            'weight_decay':
            0.0
        }]
        schedule = 'warmup_linear'
        if not args.pred_distill:
            schedule = 'none'
        optimizer = BertAdam(optimizer_grouped_parameters,
                             schedule=schedule,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)
        scaler = torch.cuda.amp.GradScaler()

        # Prepare loss functions
        loss_mse = MSELoss()

        def soft_cross_entropy(predicts, targets):
            student_likelihood = torch.nn.functional.log_softmax(predicts,
                                                                 dim=-1)
            targets_prob = torch.nn.functional.softmax(targets, dim=-1)
            return (-targets_prob * student_likelihood).mean()

        # Train and evaluate
        global_step = 0
        best_dev_acc = 0.0
        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")

        for epoch_ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0.
            tr_att_loss = 0.
            tr_rep_loss = 0.
            tr_cls_loss = 0.

            student_model.train()
            nb_tr_examples, nb_tr_steps = 0, 0

            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="Iteration", ascii=True)):
                # optimizer.zero_grad()
                #batch = tuple(torch.tensor(t, dtype=torch.long).to(device) for t in batch)
                # print(batch)
                inputs = {}
                for k, v in batch.items():
                    if isinstance(v, torch.Tensor):
                        inputs[k] = v.to(device)
                    elif isinstance(v, List):
                        inputs[k] = torch.stack(v, dim=1).to(device)

                # inputs = {k: torch.tensor(v, dtype=torch.long).to(device) for k, v in batch.items()}
                # input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch
                # print([(k, inputs[k].size()) for k in inputs])
                if inputs['input_ids'].size()[0] != args.train_batch_size:
                    continue

                att_loss = 0.
                rep_loss = 0.
                cls_loss = 0.
                with autocast():
                    student_logits, student_atts, student_reps = student_model(
                        inputs['input_ids'],
                        inputs['segment_ids'],
                        inputs['input_mask'],
                        is_student=True)
                    with torch.no_grad():
                        teacher_logits, teacher_atts, teacher_reps = teacher_model(
                            inputs['input_ids'], inputs['segment_ids'],
                            inputs['input_mask'])

                    if not args.pred_distill:
                        teacher_layer_num = len(teacher_atts)
                        student_layer_num = len(student_atts)
                        assert teacher_layer_num % student_layer_num == 0
                        layers_per_block = int(teacher_layer_num /
                                               student_layer_num)
                        new_teacher_atts = [
                            teacher_atts[i * layers_per_block +
                                         layers_per_block - 1]
                            for i in range(student_layer_num)
                        ]

                        for student_att, teacher_att in zip(
                                student_atts, new_teacher_atts):
                            student_att = torch.where(
                                student_att <= -1e2,
                                torch.zeros_like(student_att).to(device),
                                student_att)
                            teacher_att = torch.where(
                                teacher_att <= -1e2,
                                torch.zeros_like(teacher_att).to(device),
                                teacher_att)

                            tmp_loss = loss_mse(student_att, teacher_att)
                            att_loss += tmp_loss

                        new_teacher_reps = [
                            teacher_reps[i * layers_per_block]
                            for i in range(student_layer_num + 1)
                        ]
                        new_student_reps = student_reps
                        for student_rep, teacher_rep in zip(
                                new_student_reps, new_teacher_reps):
                            tmp_loss = loss_mse(student_rep, teacher_rep)
                            rep_loss += tmp_loss
                        # add this term for amp detection
                        loss = rep_loss + att_loss + 0 * soft_cross_entropy(
                            student_logits / args.temperature,
                            teacher_logits / args.temperature)
                        tr_att_loss += att_loss.item()
                        tr_rep_loss += rep_loss.item()
                    else:
                        if output_mode == "classification":
                            cls_loss = soft_cross_entropy(
                                student_logits / args.temperature,
                                teacher_logits / args.temperature)
                        elif output_mode == "regression":
                            loss_mse = MSELoss()
                            cls_loss = loss_mse(student_logits.view(-1),
                                                label_ids.view(-1))

                        loss = cls_loss + 0 * loss_mse(
                            student_atts[0], teacher_atts[0]) + 0 * loss_mse(
                                teacher_reps[0], student_reps[0])
                        tr_cls_loss += cls_loss.item()

                # if n_gpu > 1:
                #     loss = loss.mean()  # mean() to average on multi-gpu.
                # if args.gradient_accumulation_steps > 1:
                #     loss = loss / args.gradient_accumulation_steps
                scaler.scale(loss).backward()
                # loss.backward()

                tr_loss += loss.item()
                nb_tr_examples += inputs['label_ids'].size(0)
                nb_tr_steps += 1

                if (step + 1) % args.gradient_accumulation_steps == 0:
                    # optimizer.step()
                    scaler.step(optimizer)
                    scaler.update()
                    optimizer.zero_grad()
                    global_step += 1

                if (global_step +
                        1) % args.eval_step == 0 and args.local_rank == 0:
                    logger.info("***** Running evaluation *****")
                    logger.info("  Epoch = {} iter {} step".format(
                        epoch_, global_step))
                    logger.info("  Num examples = %d", len(eval_examples))
                    logger.info("  Batch size = %d", args.eval_batch_size)

                    student_model.eval()

                    loss = tr_loss / (step + 1)
                    cls_loss = tr_cls_loss / (step + 1)
                    att_loss = tr_att_loss / (step + 1)
                    rep_loss = tr_rep_loss / (step + 1)

                    result = {}
                    if args.pred_distill:
                        result = do_eval(student_model, task_name,
                                         eval_dataloader, device, output_mode,
                                         eval_labels, num_labels)
                    result['global_step'] = global_step
                    result['cls_loss'] = cls_loss
                    result['att_loss'] = att_loss
                    result['rep_loss'] = rep_loss
                    result['loss'] = loss

                    result_to_file(result, output_eval_file)

                    if not args.pred_distill:
                        save_model = True
                    else:
                        save_model = False

                        if task_name in acc_tasks and result[
                                'acc'] > best_dev_acc:
                            best_dev_acc = result['acc']
                            save_model = True

                        if task_name in corr_tasks and result[
                                'corr'] > best_dev_acc:
                            best_dev_acc = result['corr']
                            save_model = True

                        if task_name in mcc_tasks and result[
                                'mcc'] > best_dev_acc:
                            best_dev_acc = result['mcc']
                            save_model = True

                    if save_model and args.local_rank == 0:
                        logger.info("***** Save model *****")

                        model_to_save = student_model.module if hasattr(
                            student_model, 'module') else student_model

                        model_name = WEIGHTS_NAME
                        # if not args.pred_distill:
                        #     model_name = "step_{}_{}".format(global_step, WEIGHTS_NAME)
                        output_model_file = os.path.join(
                            args.output_dir, model_name)
                        output_config_file = os.path.join(
                            args.output_dir, CONFIG_NAME)

                        torch.save(model_to_save.state_dict(),
                                   output_model_file)
                        model_to_save.config.to_json_file(output_config_file)
                        tokenizer.save_vocabulary(args.output_dir)

                        # Test mnli-mm
                        if args.pred_distill and task_name == "mnli":
                            task_name = "mnli-mm"
                            processor = processors[task_name]()
                            if not os.path.exists(args.output_dir + '-MM'):
                                os.makedirs(args.output_dir + '-MM')

                            eval_examples = processor.get_dev_examples(
                                args.data_dir)

                            eval_features = convert_examples_to_features(
                                eval_examples, label_list, args.max_seq_length,
                                tokenizer, output_mode, logger)
                            eval_data, eval_labels = get_tensor_data(
                                output_mode, eval_features)

                            logger.info("***** Running mm evaluation *****")
                            logger.info("  Num examples = %d",
                                        len(eval_examples))
                            logger.info("  Batch size = %d",
                                        args.eval_batch_size)

                            eval_sampler = SequentialSampler(eval_data)
                            eval_dataloader = DataLoader(
                                eval_data,
                                sampler=eval_sampler,
                                batch_size=args.eval_batch_size)

                            result = do_eval(student_model, task_name,
                                             eval_dataloader, device,
                                             output_mode, eval_labels,
                                             num_labels)

                            result['global_step'] = global_step

                            tmp_output_eval_file = os.path.join(
                                args.output_dir + '-MM', "eval_results.txt")
                            result_to_file(result, tmp_output_eval_file)

                            task_name = 'mnli'

                        if oncloud:
                            logging.info(
                                mox.file.list_directory(args.output_dir,
                                                        recursive=True))
                            logging.info(
                                mox.file.list_directory('.', recursive=True))
                            mox.file.copy_parallel(args.output_dir,
                                                   args.data_url)
                            mox.file.copy_parallel('.', args.data_url)

                    student_model.train()
Exemple #6
0
class Runner():
    ''' Handler for complete pre-training progress of upstream models '''
    def __init__(self, args, config, dataloader, ckpdir):

        self.device = torch.device('cuda') if (
            args.gpu and torch.cuda.is_available()) else torch.device('cpu')
        if torch.cuda.is_available(): print('[Runner] - CUDA is available!')
        self.model_kept = []
        self.global_step = 1
        self.log = SummaryWriter(ckpdir)

        self.args = args
        self.config = config
        self.dataloader = dataloader
        self.ckpdir = ckpdir

        # optimizer
        self.learning_rate = float(config['optimizer']['learning_rate'])
        self.warmup_proportion = config['optimizer']['warmup_proportion']
        self.gradient_accumulation_steps = config['optimizer'][
            'gradient_accumulation_steps']
        self.gradient_clipping = config['optimizer']['gradient_clipping']

        # Training details
        self.apex = config['runner']['apex']
        self.total_steps = config['runner']['total_steps']
        self.log_step = config['runner']['log_step']
        self.save_step = config['runner']['save_step']
        self.duo_feature = config['runner']['duo_feature']
        self.max_keep = config['runner']['max_keep']

        # model
        self.transformer_config = config['transformer']
        self.input_dim = self.transformer_config['input_dim']
        self.output_dim = 1025 if self.duo_feature else None  # output dim is the same as input dim if not using duo features

    def set_model(self):
        print('[Runner] - Initializing Transformer model...')

        # build the Transformer model with speech prediction head
        model_config = TransformerConfig(self.config)
        self.dr = model_config.downsample_rate
        self.hidden_size = model_config.hidden_size

        self.model = TransformerForMaskedAcousticModel(
            model_config, self.input_dim, self.output_dim).to(self.device)
        self.model.train()

        if self.args.multi_gpu:
            self.model = torch.nn.DataParallel(self.model)
            print('[Runner] - Multi-GPU training Enabled: ' +
                  str(torch.cuda.device_count()))
        print('[Runner] - Number of parameters: ' + str(
            sum(p.numel()
                for p in self.model.parameters() if p.requires_grad)))

        # Setup optimizer
        param_optimizer = list(self.model.named_parameters())

        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [{
            'params': [
                p for n, p in param_optimizer
                if not any(nd in n for nd in no_decay)
            ],
            'weight_decay':
            0.01
        }, {
            'params':
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            'weight_decay':
            0.0
        }]

        if self.apex:
            try:
                from apex.optimizers import FP16_Optimizer
                from apex.optimizers import FusedAdam
            except ImportError:
                raise ImportError(
                    "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
                )

            optimizer = FusedAdam(optimizer_grouped_parameters,
                                  lr=self.learning_rate,
                                  bias_correction=False,
                                  max_grad_norm=1.0)
            if self.config['optimizer']['loss_scale'] == 0:
                self.optimizer = FP16_Optimizer(optimizer,
                                                dynamic_loss_scale=True)
            else:
                self.optimizer = FP16_Optimizer(
                    optimizer,
                    static_loss_scale=self.config['optimizer']['loss_scale'])
            self.warmup_linear = WarmupLinearSchedule(
                warmup=self.warmup_proportion, t_total=self.total_steps)
        else:
            self.optimizer = BertAdam(optimizer_grouped_parameters,
                                      lr=self.learning_rate,
                                      warmup=self.warmup_proportion,
                                      t_total=self.total_steps)

    def save_model(self, name='states', to_path=None):
        all_states = {
            'SpecHead':
            self.model.SpecHead.state_dict() if not self.args.multi_gpu else
            self.model.module.SpecHead.state_dict(),
            'Transformer':
            self.model.Transformer.state_dict() if not self.args.multi_gpu else
            self.model.module.Transformer.state_dict(),
            'Optimizer':
            self.optimizer.state_dict(),
            'Global_step':
            self.global_step,
            'Settings': {
                'Config': self.config,
                'Paras': self.args,
            },
        }

        if to_path is None:
            new_model_path = '{}/{}-{}.ckpt'.format(self.ckpdir, name,
                                                    self.global_step)
        else:
            new_model_path = to_path

        torch.save(all_states, new_model_path)
        self.model_kept.append(new_model_path)

        if len(self.model_kept) >= self.max_keep:
            os.remove(self.model_kept[0])
            self.model_kept.pop(0)

    def up_sample_frames(self, spec, return_first=False):
        if len(spec.shape) != 3:
            spec = spec.unsqueeze(0)
            assert (len(spec.shape) == 3
                    ), 'Input should have acoustic feature of shape BxTxD'
        # spec shape: [batch_size, sequence_length // downsample_rate, output_dim * downsample_rate]
        spec_flatten = spec.view(spec.shape[0], spec.shape[1] * self.dr,
                                 spec.shape[2] // self.dr)
        if return_first: return spec_flatten[0]
        return spec_flatten  # spec_flatten shape: [batch_size, sequence_length * downsample_rate, output_dim // downsample_rate]

    def down_sample_frames(self, spec):
        left_over = spec.shape[1] % self.dr
        if left_over != 0: spec = spec[:, :-left_over, :]
        spec_stacked = spec.view(spec.shape[0], spec.shape[1] // self.dr,
                                 spec.shape[2] * self.dr)
        return spec_stacked

    def process_data(self, spec):
        """Process training data for the masked acoustic model"""
        with torch.no_grad():

            assert (
                len(spec) == 5
            ), 'dataloader should return (spec_masked, pos_enc, mask_label, attn_mask, spec_stacked)'
            # Unpack and Hack bucket: Bucketing should cause acoustic feature to have shape 1xBxTxD'
            spec_masked = spec[0].squeeze(0)
            pos_enc = spec[1].squeeze(0)
            mask_label = spec[2].squeeze(0)
            attn_mask = spec[3].squeeze(0)
            spec_stacked = spec[4].squeeze(0)

            spec_masked = spec_masked.to(device=self.device)
            if pos_enc.dim() == 3:
                # pos_enc: (batch_size, seq_len, hidden_size)
                # GPU memory need (batch_size * seq_len * hidden_size)
                pos_enc = torch.FloatTensor(pos_enc).to(device=self.device)
            elif pos_enc.dim() == 2:
                # pos_enc: (seq_len, hidden_size)
                # GPU memory only need (seq_len * hidden_size) even after expanded
                pos_enc = torch.FloatTensor(pos_enc).to(
                    device=self.device).expand(spec_masked.size(0),
                                               *pos_enc.size())
            mask_label = torch.ByteTensor(mask_label).to(device=self.device)
            attn_mask = torch.FloatTensor(attn_mask).to(device=self.device)
            spec_stacked = spec_stacked.to(device=self.device)

        return spec_masked, pos_enc, mask_label, attn_mask, spec_stacked  # (x, pos_enc, mask_label, attention_mask. y)

    def train(self):
        ''' Self-Supervised Pre-Training of Transformer Model'''

        pbar = tqdm(total=self.total_steps)
        while self.global_step <= self.total_steps:

            progress = tqdm(self.dataloader, desc="Iteration")

            step = 0
            loss_val = 0
            for batch_is_valid, *batch in progress:
                try:
                    if self.global_step > self.total_steps: break
                    if not batch_is_valid: continue
                    step += 1

                    spec_masked, pos_enc, mask_label, attn_mask, spec_stacked = self.process_data(
                        batch)
                    loss, pred_spec = self.model(spec_masked, pos_enc,
                                                 mask_label, attn_mask,
                                                 spec_stacked)

                    # Accumulate Loss
                    if self.gradient_accumulation_steps > 1:
                        loss = loss / self.gradient_accumulation_steps
                    if self.apex and self.args.multi_gpu:
                        raise NotImplementedError
                    elif self.apex:
                        self.optimizer.backward(loss)
                    elif self.args.multi_gpu:
                        loss = loss.sum()
                        loss.backward()
                    else:
                        loss.backward()
                    loss_val += loss.item()

                    # Update
                    if (step + 1) % self.gradient_accumulation_steps == 0:
                        if self.apex:
                            # modify learning rate with special warm up BERT uses
                            # if conifg.apex is False, BertAdam is used and handles this automatically
                            lr_this_step = self.learning_rate * self.warmup_linear.get_lr(
                                self.global_step, self.warmup_proportion)
                            for param_group in self.optimizer.param_groups:
                                param_group['lr'] = lr_this_step

                        # Step
                        grad_norm = torch.nn.utils.clip_grad_norm_(
                            self.model.parameters(), self.gradient_clipping)
                        if math.isnan(grad_norm):
                            print(
                                '[Runner] - Error : grad norm is NaN @ step ' +
                                str(self.global_step))
                        else:
                            self.optimizer.step()
                        self.optimizer.zero_grad()

                        if self.global_step % self.log_step == 0:
                            # Log
                            self.log.add_scalar('lr',
                                                self.optimizer.get_lr()[0],
                                                self.global_step)
                            self.log.add_scalar('loss', (loss_val),
                                                self.global_step)
                            self.log.add_scalar('gradient norm', grad_norm,
                                                self.global_step)
                            progress.set_description("Loss %.4f" % (loss_val))

                        if self.global_step % self.save_step == 0:
                            self.save_model('states')
                            mask_spec = self.up_sample_frames(
                                spec_masked[0], return_first=True)
                            pred_spec = self.up_sample_frames(
                                pred_spec[0], return_first=True)
                            true_spec = self.up_sample_frames(
                                spec_stacked[0], return_first=True)
                            mask_spec = plot_spectrogram_to_numpy(
                                mask_spec.data.cpu().numpy())
                            pred_spec = plot_spectrogram_to_numpy(
                                pred_spec.data.cpu().numpy())
                            true_spec = plot_spectrogram_to_numpy(
                                true_spec.data.cpu().numpy())
                            self.log.add_image('mask_spec', mask_spec,
                                               self.global_step)
                            self.log.add_image('pred_spec', pred_spec,
                                               self.global_step)
                            self.log.add_image('true_spec', true_spec,
                                               self.global_step)

                        loss_val = 0
                        pbar.update(1)
                        self.global_step += 1

                except RuntimeError as e:
                    if 'CUDA out of memory' in str(e):
                        print('CUDA out of memory at step: ', self.global_step)
                        torch.cuda.empty_cache()
                        self.optimizer.zero_grad()
                    else:
                        raise

        pbar.close()
        self.log.close()
class KDLearner(object):
    def __init__(self,
                 args,
                 device,
                 student_model,
                 teacher_model=None,
                 num_train_optimization_steps=None):
        self.args = args
        self.device = device
        self.n_gpu = torch.cuda.device_count()
        self.student_model = student_model
        self.teacher_model = teacher_model
        self.num_train_optimization_steps = num_train_optimization_steps
        self._check_params()
        self.name = 'kd_'  # learner suffix for saving

    def build(self, lr=None):
        self.prev_global_step = 0
        if self.args.distill_rep_attn and not self.args.distill_logit:
            self.stage = 'kd_stage1'
        elif self.args.distill_logit and not self.args.distill_rep_attn:
            self.stage = 'kd_stage2'
        elif self.args.distill_logit and self.args.distill_rep_attn:
            self.stage = 'kd_joint'
        else:
            self.stage = 'nokd'
        self.output_dir = os.path.join(self.args.output_dir, self.stage)
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        param_optimizer = list(self.student_model.named_parameters())
        self.clip_params = {}
        for k, v in param_optimizer:
            if 'clip_' in k:
                self.clip_params[k] = v

        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {
                'params': [
                    p for n, p in param_optimizer
                    if (not any(nd in n
                                for nd in no_decay) and not 'clip_' in n)
                ],
                'weight_decay':
                self.args.weight_decay
            },
            {
                'params': [
                    p for n, p in param_optimizer
                    if (any(nd in n for nd in no_decay) and not 'clip_' in n)
                ],
                'weight_decay':
                0.0
            },
            {
                'params': [p for n, p in self.clip_params.items()],
                'lr': self.args.clip_lr,
                'weight_decay': self.args.clip_wd
            },
        ]

        schedule = 'warmup_linear'
        learning_rate = self.args.learning_rate if not lr else lr
        self.optimizer = BertAdam(optimizer_grouped_parameters,
                                  schedule=schedule,
                                  lr=learning_rate,
                                  warmup=self.args.warmup_proportion,
                                  t_total=self.num_train_optimization_steps)
        logging.info("Optimizer prepared.")
        self._check_quantized_modules()
        self._setup_grad_scale_stats()

    def eval(self, model, dataloader, features, examples, dataset):
        all_results = []
        for _, batch_ in tqdm(enumerate(dataloader)):
            batch_ = tuple(t.to(self.device) for t in batch_)
            input_ids, input_mask, segment_ids, example_indices = batch_
            with torch.no_grad():
                (batch_start_logits,
                 batch_end_logits), _, _ = model(input_ids, segment_ids,
                                                 input_mask)
            for i, example_index in enumerate(example_indices):
                start_logits = batch_start_logits[i].detach().cpu().tolist()
                end_logits = batch_end_logits[i].detach().cpu().tolist()
                eval_feature = features[example_index.item()]
                unique_id = int(eval_feature.unique_id)
                all_results.append(
                    RawResult(unique_id=unique_id,
                              start_logits=start_logits,
                              end_logits=end_logits))

        return write_predictions(examples, features, all_results,
                                 self.args.n_best_size,
                                 self.args.max_answer_length, True, False,
                                 self.args.version_2_with_negative,
                                 self.args.null_score_diff_threshold, dataset)

    def train(self, train_dataloader, eval_dataloader, eval_features,
              eval_examples, dev_dataset):
        """ quant-aware pretraining + KD """

        # Prepare loss functions
        loss_mse = MSELoss()
        self.teacher_model.eval()
        teacher_results = self.eval(self.teacher_model, eval_dataloader,
                                    eval_features, eval_examples, dev_dataset)
        logging.info("Teacher network evaluation")
        for key in sorted(teacher_results.keys()):
            logging.info("  %s = %s", key, str(teacher_results[key]))

        # self.teacher_model.train()  # switch to train mode to supervise students

        # Train and evaluate
        # num_layers = self.student_model.config.num_hidden_layers + 1
        global_step = 0
        best_dev_f1 = 0.0
        output_eval_file = os.path.join(self.output_dir, "eval_results.txt")

        logging.info(" Distill rep attn: %d, Distill logit: %d" %
                     (self.args.distill_rep_attn, self.args.distill_logit))
        logging.info("  Batch size = %d", self.args.batch_size)
        logging.info("  Num steps = %d", self.num_train_optimization_steps)

        global_tr_loss = 0  # record global average training loss to plot

        for epoch_ in range(int(self.args.num_train_epochs)):

            tr_loss = 0.
            tr_att_loss = 0.
            tr_rep_loss = 0.
            tr_cls_loss = 0.

            for step, batch in enumerate(train_dataloader):

                self.student_model.train()
                batch = tuple(t.to(self.device) for t in batch)

                input_ids, input_mask, segment_ids, start_positions, end_positions = batch

                att_loss = 0.
                rep_loss = 0.
                cls_loss = 0.
                rep_loss_layerwise = []
                att_loss_layerwise = []
                loss = 0.
                if self.args.distill_logit or self.args.distill_rep_attn:
                    # use distillation
                    student_logits, student_atts, student_reps = self.student_model(
                        input_ids, segment_ids, input_mask)
                    with torch.no_grad():
                        teacher_logits, teacher_atts, teacher_reps = self.teacher_model(
                            input_ids, segment_ids, input_mask)

                    # NOTE: config loss according to stage
                    if self.args.distill_logit:
                        soft_start_ce_loss = soft_cross_entropy(
                            student_logits[0], teacher_logits[0])
                        soft_end_ce_loss = soft_cross_entropy(
                            student_logits[1], teacher_logits[1])
                        cls_loss = soft_start_ce_loss + soft_end_ce_loss
                        loss += cls_loss
                        tr_cls_loss += cls_loss.item()

                    if self.args.distill_rep_attn:
                        for student_att, teacher_att in zip(
                                student_atts, teacher_atts):
                            student_att = torch.where(
                                student_att <= -1e2,
                                torch.zeros_like(student_att).to(self.device),
                                student_att)
                            teacher_att = torch.where(
                                teacher_att <= -1e2,
                                torch.zeros_like(teacher_att).to(self.device),
                                teacher_att)

                            tmp_loss = loss_mse(student_att, teacher_att)
                            att_loss += tmp_loss
                            att_loss_layerwise.append(tmp_loss.item())

                        for student_rep, teacher_rep in zip(
                                student_reps, teacher_reps):
                            tmp_loss = loss_mse(student_rep, teacher_rep)
                            rep_loss += tmp_loss
                            rep_loss_layerwise.append(tmp_loss.item())
                        # rep_loss_layerwise = rep_loss_layerwise[1:]  # remove embed dist

                        tr_att_loss += att_loss.item()
                        tr_rep_loss += rep_loss.item()
                        loss += rep_loss + att_loss

                else:
                    cls_loss, _, _ = self.student_model(
                        input_ids, segment_ids, input_mask, start_positions,
                        end_positions)
                    loss += cls_loss
                    tr_cls_loss += cls_loss.item()

                if self.n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if self.args.gradient_accumulation_steps > 1:
                    loss = loss / self.args.gradient_accumulation_steps

                loss.backward()

                tr_loss += loss.item()
                global_tr_loss += loss.item()

                # evaluation and save model
                if global_step % self.args.eval_step == 0 or \
                        global_step == len(train_dataloader)-1:

                    logging.info(
                        "***** KDLearner %s Running evaluation, Job_id: %s *****"
                        % (self.stage, self.args.job_id))
                    logging.info("  Epoch = {} iter {} step".format(
                        epoch_, global_step))
                    logging.info(f"  Previous best = {best_dev_f1}")

                    loss = tr_loss / (step + 1)
                    global_avg_loss = global_tr_loss / (global_step + 1)
                    cls_loss = tr_cls_loss / (step + 1)
                    att_loss = tr_att_loss / (step + 1)
                    rep_loss = tr_rep_loss / (step + 1)

                    self.student_model.eval()
                    result = self.eval(self.student_model, eval_dataloader,
                                       eval_features, eval_examples,
                                       dev_dataset)
                    result['global_step'] = global_step
                    result['train_cls_loss'] = cls_loss
                    result['att_loss'] = att_loss
                    result['rep_loss'] = rep_loss
                    result['loss'] = loss
                    result['global_loss'] = global_avg_loss

                    if self.args.distill_rep_attn:
                        # add the layerwise loss on rep and att
                        logging.info("embedding layer rep_loss: %.8f" %
                                     (rep_loss_layerwise[0]))
                        rep_loss_layerwise = rep_loss_layerwise[1:]
                        for lid in range(len(rep_loss_layerwise)):
                            logging.info("layer %d rep_loss: %.8f" %
                                         (lid + 1, rep_loss_layerwise[lid]))
                            logging.info("layer %d att_loss: %.8f" %
                                         (lid + 1, att_loss_layerwise[lid]))

                    result_to_file(result, output_eval_file)

                    save_model = False

                    if result['f1'] > best_dev_f1:
                        best_dev_f1 = result['f1']
                        save_model = True

                    if save_model:
                        self._save()

                # if self.args.quantize_weight:
                # self.quanter.restore()

                if (step + 1) % self.args.gradient_accumulation_steps == 0:
                    self.optimizer.step()
                    self.optimizer.zero_grad()
                    global_step += 1

    def _save(self):
        logging.info("******************** Save model ********************")
        model_to_save = self.student_model.module if hasattr(
            self.student_model, 'module') else self.student_model

        output_model_file = os.path.join(self.output_dir, WEIGHTS_NAME)
        output_config_file = os.path.join(self.output_dir, CONFIG_NAME)

        torch.save(model_to_save.state_dict(), output_model_file)
        model_to_save.config.to_json_file(output_config_file)

    def _check_params(self):
        if not self.args.do_eval:
            assert self.teacher_model, 'teacher model must not be None in train mode.'

    def _check_quantized_modules(self):
        logging.info("Checking module types.")
        for k, m in self.student_model.named_modules():
            if isinstance(m, torch.nn.Linear):
                logging.info('%s: %s' % (k, str(m)))

    def _setup_grad_scale_stats(self):
        self.grad_scale_stats = {'weight': None, \
                                 'bias': None, \
                                 'layer_norm': None, \
                                 'step_size/clip_val': None}
        self.ema_grad = 0.9

    def check_grad_scale(self):
        logging.info("Check grad scale ratio: grad/w")
        for k, v in self.student_model.named_parameters():
            if v.grad is not None:
                has_grad = True
                ratio = v.grad.norm(p=2) / v.data.norm(p=2)
                # print('%.6e, %s' % (ratio.float(), k))
            else:
                has_grad = False
                logging.info('params: %s has no gradient' % k)
                continue

            # update grad_scale stats
            if 'weight' in k and v.ndimension() == 2:
                key = 'weight'
            elif 'bias' in k and v.ndimension() == 1:
                key = 'bias'
            elif 'LayerNorm' in k and 'weight' in k and v.ndimension() == 1:
                key = 'layer_norm'
            elif 'clip_' in k:
                key = 'step_size/clip_val'
            else:
                key = None

            if key and has_grad:
                if self.grad_scale_stats[key]:
                    self.grad_scale_stats[
                        key] = self.ema_grad * self.grad_scale_stats[key] + (
                            1 - self.ema_grad) * ratio
                else:
                    self.grad_scale_stats[key] = ratio

        for (key, val) in self.grad_scale_stats.items():
            if val is not None:
                logging.info('%.6e, %s' % (val, key))
Exemple #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--train_file_path",
                        default=None,
                        type=str,
                        required=True)

    # Required parameters
    parser.add_argument("--teacher_model",
                        default=None,
                        type=str,
                        required=True)
    parser.add_argument("--student_model",
                        default=None,
                        type=str,
                        required=True)
    parser.add_argument("--output_dir", default=None, type=str, required=True)

    # Other parameters
    parser.add_argument(
        "--max_seq_len",
        default=128,
        type=int,
        help="The maximum total input sequence length after WordPiece \n"
        " tokenization. Sequences longer than this will be truncated, \n"
        "and sequences shorter than this will be padded.")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--do_lower_case",
        action='store_true',
        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument('--weight_decay',
                        '--wd',
                        default=1e-1,
                        type=float,
                        metavar='W',
                        help='weight decay')
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help="Number of updates steps to accumulate before performing \n"
        "a backward/update pass.")
    parser.add_argument(
        '--fp16',
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--continue_train',
                        action='store_true',
                        help='Whether to train from checkpoints')

    # Additional arguments
    parser.add_argument('--eval_step', type=int, default=1000)

    # This is used for running on Huawei Cloud.
    parser.add_argument('--data_url', type=str, default="")

    args = parser.parse_args()
    logger.info('args:{}'.format(args))

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)

    logger.info(
        "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError(
            "Output directory ({}) already exists and is not empty.".format(
                args.output_dir))
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    tokenizer = BertTokenizer.from_pretrained(args.teacher_model,
                                              do_lower_case=args.do_lower_case)

    dataset = PregeneratedDataset(args.train_file_path,
                                  tokenizer,
                                  max_seq_len=args.max_seq_len)
    total_train_examples = len(dataset)

    num_train_optimization_steps = int(
        total_train_examples / args.train_batch_size /
        args.gradient_accumulation_steps * args.num_train_epochs)
    if args.local_rank != -1:
        num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
        ) * args.num_train_epochs

    if args.continue_train:
        student_model = TinyBertForPreTraining.from_pretrained(
            args.student_model)
    else:
        student_model = TinyBertForPreTraining.from_scratch(args.student_model)
    teacher_model = BertModel.from_pretrained(args.teacher_model)

    # student_model = TinyBertForPreTraining.from_scratch(args.student_model, fit_size=teacher_model.config.hidden_size)
    student_model.to(device)
    teacher_model.to(device)

    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )
        teacher_model = DDP(teacher_model)
    elif n_gpu > 1:
        student_model = torch.nn.DataParallel(student_model)
        teacher_model = torch.nn.DataParallel(teacher_model)

    size = 0
    for n, p in student_model.named_parameters():
        logger.info('n: {}'.format(n))
        logger.info('p: {}'.format(p.nelement()))
        size += p.nelement()

    logger.info('Total parameters: {}'.format(size))

    # Prepare optimizer
    param_optimizer = list(student_model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    loss_mse = MSELoss()
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=args.learning_rate,
                         warmup=args.warmup_proportion,
                         t_total=num_train_optimization_steps)

    logging.info("***** Running training *****")
    logging.info("  Num examples = {}".format(total_train_examples))
    logging.info("  Batch size = %d", args.train_batch_size)
    logging.info("  Num steps = %d", num_train_optimization_steps)

    if 1:
        if args.local_rank == -1:
            train_sampler = RandomSampler(dataset)
        else:
            train_sampler = DistributedSampler(dataset)
        train_dataloader = DataLoader(dataset,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)
        tr_loss = 0.
        tr_att_loss = 0.
        tr_rep_loss = 0.
        student_model.train()
        global_step = 0
        nb_tr_examples, nb_tr_steps = 0, 0
        for epoch in range(int(args.num_train_epochs)):
            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="Iteration", ascii=True)):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids = batch
                if input_ids.size()[0] != args.train_batch_size:
                    continue

                att_loss = 0.
                rep_loss = 0.

                student_atts, student_reps = student_model(
                    input_ids, segment_ids, input_mask)
                teacher_reps, teacher_atts, _ = teacher_model(
                    input_ids, segment_ids, input_mask)
                # speedup 1.5x
                teacher_reps = [
                    teacher_rep.detach() for teacher_rep in teacher_reps
                ]
                teacher_atts = [
                    teacher_att.detach() for teacher_att in teacher_atts
                ]

                teacher_layer_num = len(teacher_atts)
                student_layer_num = len(student_atts)
                assert teacher_layer_num % student_layer_num == 0
                layers_per_block = int(teacher_layer_num / student_layer_num)
                new_teacher_atts = [
                    teacher_atts[i * layers_per_block + layers_per_block - 1]
                    for i in range(student_layer_num)
                ]

                for student_att, teacher_att in zip(student_atts,
                                                    new_teacher_atts):
                    student_att = torch.where(
                        student_att <= -1e2,
                        torch.zeros_like(student_att).to(device), student_att)
                    teacher_att = torch.where(
                        teacher_att <= -1e2,
                        torch.zeros_like(teacher_att).to(device), teacher_att)
                    att_loss += loss_mse(student_att, teacher_att)

                new_teacher_reps = [
                    teacher_reps[i * layers_per_block]
                    for i in range(student_layer_num + 1)
                ]
                new_student_reps = student_reps

                for student_rep, teacher_rep in zip(new_student_reps,
                                                    new_teacher_reps):
                    rep_loss += loss_mse(student_rep, teacher_rep)

                loss = att_loss + rep_loss

                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps
                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

                tr_att_loss += att_loss.item()
                tr_rep_loss += rep_loss.item()

                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1

                mean_loss = tr_loss * args.gradient_accumulation_steps / nb_tr_steps
                mean_att_loss = tr_att_loss * args.gradient_accumulation_steps / nb_tr_steps
                mean_rep_loss = tr_rep_loss * args.gradient_accumulation_steps / nb_tr_steps
                if step % 100 == 0:
                    logger.info(f'mean_loss = {mean_loss}')

                if (step + 1) % args.gradient_accumulation_steps == 0:
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

                    if (global_step + 1) % args.eval_step == 0:
                        result = {}
                        result['global_step'] = global_step
                        result['loss'] = mean_loss
                        result['att_loss'] = mean_att_loss
                        result['rep_loss'] = mean_rep_loss
                        output_eval_file = os.path.join(
                            args.output_dir, "log.txt")
                        with open(output_eval_file, "a") as writer:
                            logger.info("***** Eval results *****")
                            for key in sorted(result.keys()):
                                logger.info("  %s = %s", key, str(result[key]))
                                writer.write("%s = %s\n" %
                                             (key, str(result[key])))

                        # Save a trained model
                        prefix = f"step_{step}"
                        save_model(prefix, student_model, args.output_dir)

            prefix = f"epoch_{epoch}"
            save_model(prefix, student_model, args.output_dir)
Exemple #9
0
class Solver():
    ''' Super class Solver for all kinds of tasks'''
    def __init__(self, config, paras):

        # General Settings
        self.config = config
        self.paras = paras
        self.transformer_config = config['transformer']
        self.device = torch.device('cuda') if (
            self.paras.gpu
            and torch.cuda.is_available()) else torch.device('cpu')
        if torch.cuda.is_available(): self.verbose('CUDA is available!')

        # path and directories
        self.exp_name = paras.name
        if self.exp_name is None:
            self.exp_name = '_'.join([
                paras.config.split('/')[-1].replace('.yaml', ''),
                'sd' + str(paras.seed)
            ])
        self.ckpdir = paras.ckpdir
        self.expdir = os.path.join(self.ckpdir, self.exp_name)

        self.load = paras.load
        # only for test
        self.ckpt = os.path.join(self.ckpdir, paras.ckpt)

        # model
        self.load_model_list = config['solver']['load_model_list']
        self.duo_feature = config['solver']['duo_feature']
        self.output_dim = 1025 if self.duo_feature else None  # output dim is the same as input dim if not using duo features
        if 'input_dim' in self.transformer_config:
            self.input_dim = self.transformer_config['input_dim']
        else:
            raise ValueError(
                'Please update your config file to include the attribute `input_dim`.'
            )

    def verbose(self, msg, end='\n'):
        ''' Verbose function for print information to stdout'''
        if self.paras.verbose:
            print('[SOLVER] - ', msg, end=end)

    def load_data(self, split='train'):
        ''' Load data for training / testing'''
        if split == 'train':
            self.verbose('Loading source data ' +
                         str(self.config['dataloader']['train_set']) +
                         ' from ' + self.config['dataloader']['data_path'])
            if self.duo_feature:
                self.verbose('Loading target data ' +
                             str(self.config['dataloader']['train_set']) +
                             ' from ' +
                             self.config['dataloader']['target_path'])
        elif split == 'test':
            self.verbose('Loading testing data ' +
                         str(self.config['dataloader']['test_set']) +
                         ' from ' + self.config['dataloader']['data_path'])
        else:
            raise NotImplementedError('Invalid `split` argument!')

        if self.duo_feature:
            setattr(self, 'dataloader', get_Dataloader(split, load='duo', use_gpu=self.paras.gpu, \
                    mam_config=self.transformer_config, **self.config['dataloader'])) # run_mam is automatically performed
        else:
            setattr(self, 'dataloader', get_Dataloader(split, load='acoustic', use_gpu=self.paras.gpu, run_mam=True, \
                    mam_config=self.transformer_config, **self.config['dataloader']))

    def set_model(self,
                  inference=False,
                  with_head=False,
                  from_path=None,
                  output_attention=False):
        self.verbose('Initializing Transformer model.')

        # uild the Transformer model with speech prediction head
        self.model_config = TransformerConfig(self.config)
        self.dr = self.model_config.downsample_rate
        self.hidden_size = self.model_config.hidden_size
        self.with_head = with_head
        self.output_attention = output_attention

        if not inference or with_head:
            self.model = TransformerForMaskedAcousticModel(
                self.model_config, self.input_dim, self.output_dim,
                self.output_attention).to(self.device)
            self.transformer = self.model.Transformer
            if self.paras.multi_gpu:
                self.model = torch.nn.DataParallel(self.model)
                self.transformer = torch.nn.DataParallel(self.transformer)
                self.verbose('Multi-GPU training Enabled: ' +
                             str(torch.cuda.device_count()))
            self.verbose('Number of parameters: ' + str(
                sum(p.numel()
                    for p in self.model.parameters() if p.requires_grad)))

        if inference and not with_head:
            self.transformer = TransformerModel(
                self.model_config, self.input_dim,
                self.output_attention).to(self.device)
            if self.paras.multi_gpu:
                self.transformer = torch.nn.DataParallel(self.transformer)
                self.verbose('Multi-GPU training Enabled: ' +
                             str(torch.cuda.device_count()))
            self.verbose('Number of parameters: ' + str(
                sum(p.numel() for p in self.transformer.parameters()
                    if p.requires_grad)))
            self.transformer.eval()
        elif inference and with_head:
            self.model.eval()
        elif not inference:
            self.model.train()

            # Setup optimizer
            param_optimizer = list(self.model.named_parameters())

            no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
            optimizer_grouped_parameters = [{
                'params': [
                    p for n, p in param_optimizer
                    if not any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.01
            }, {
                'params': [
                    p for n, p in param_optimizer
                    if any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.0
            }]

            if self.apex:
                try:
                    from apex.optimizers import FP16_Optimizer
                    from apex.optimizers import FusedAdam
                except ImportError:
                    raise ImportError(
                        "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
                    )

                optimizer = FusedAdam(optimizer_grouped_parameters,
                                      lr=self.learning_rate,
                                      bias_correction=False,
                                      max_grad_norm=1.0)
                if self.config['optimizer']['loss_scale'] == 0:
                    self.optimizer = FP16_Optimizer(optimizer,
                                                    dynamic_loss_scale=True)
                else:
                    self.optimizer = FP16_Optimizer(
                        optimizer,
                        static_loss_scale=self.config['optimizer']
                        ['loss_scale'])
                self.warmup_linear = WarmupLinearSchedule(
                    warmup=self.warmup_proportion, t_total=self.total_steps)
            else:
                self.optimizer = BertAdam(optimizer_grouped_parameters,
                                          lr=self.learning_rate,
                                          warmup=self.warmup_proportion,
                                          t_total=self.total_steps)
        else:
            raise NotImplementedError('Invalid Arguments!')

        if self.load:  # This will be set to True by default when Tester is running set_model()
            self.load_model(inference=inference,
                            with_head=with_head,
                            from_path=from_path)

    def save_model(self, name='states', model_all=True, to_path=None):
        if model_all:
            all_states = {
                'SpecHead':
                self.model.SpecHead.state_dict() if not self.paras.multi_gpu
                else self.model.module.SpecHead.state_dict(),
                'Transformer':
                self.transformer.state_dict() if not self.paras.multi_gpu else
                self.transformer.module.state_dict(),
                'Optimizer':
                self.optimizer.state_dict(),
                'Global_step':
                self.global_step,
                'Settings': {
                    'Config': self.config,
                    'Paras': self.paras,
                },
            }
        else:
            all_states = {
                'Transformer':
                self.transformer.state_dict() if not self.paras.multi_gpu else
                self.transformer.module.state_dict(),
                'Settings': {
                    'Config': self.config,
                    'Paras': self.paras,
                },
            }
        if to_path is None:
            new_model_path = '{}/{}-{}.ckpt'.format(self.expdir, name,
                                                    self.global_step)
        else:
            new_model_path = to_path
        torch.save(all_states, new_model_path)
        self.model_kept.append(new_model_path)

        if len(self.model_kept) >= self.max_keep:
            os.remove(self.model_kept[0])
            self.model_kept.pop(0)

    def load_model(self, inference=False, with_head=False, from_path=None):
        if from_path is not None:
            self.verbose('Load model from {}'.format(from_path))
            all_states = torch.load(from_path, map_location='cpu')
            self.load_model_list = ['Transformer']
        else:
            self.verbose('Load model from {}'.format(self.ckpt))
            all_states = torch.load(self.ckpt, map_location='cpu')

        if 'SpecHead' in self.load_model_list:
            if not inference or with_head:
                try:
                    if not self.paras.multi_gpu:
                        self.model.SpecHead.load_state_dict(
                            all_states['SpecHead'])
                    else:
                        self.model.module.SpecHead.load_state_dict(
                            all_states['SpecHead'])
                    self.verbose('[SpecHead] - Loaded')
                except:
                    self.verbose('[SpecHead - X]')

        if 'Transformer' in self.load_model_list:
            try:
                state_dict = all_states['Transformer']

                # Load from a PyTorch state_dict
                old_keys = []
                new_keys = []
                for key in state_dict.keys():
                    new_key = None
                    if 'gamma' in key:
                        new_key = key.replace('gamma', 'weight')
                    if 'beta' in key:
                        new_key = key.replace('beta', 'bias')
                    if new_key:
                        old_keys.append(key)
                        new_keys.append(new_key)
                for old_key, new_key in zip(old_keys, new_keys):
                    state_dict[new_key] = state_dict.pop(old_key)

                missing_keys = []
                unexpected_keys = []
                error_msgs = []
                # copy state_dict so _load_from_state_dict can modify it
                metadata = getattr(state_dict, '_metadata', None)
                state_dict = state_dict.copy()
                if metadata is not None:
                    state_dict._metadata = metadata

                def load(module, prefix=''):
                    local_metadata = {} if metadata is None else metadata.get(
                        prefix[:-1], {})
                    module._load_from_state_dict(state_dict, prefix,
                                                 local_metadata, True,
                                                 missing_keys, unexpected_keys,
                                                 error_msgs)
                    for name, child in module._modules.items():
                        if child is not None:
                            load(child, prefix + name + '.')

                # perform load
                if not self.paras.multi_gpu:
                    load(self.transformer)
                else:
                    load(self.transformer.module)

                if len(missing_keys) > 0:
                    self.verbose(
                        "Weights of {} not initialized from pretrained model: {}"
                        .format(self.transformer.__class__.__name__,
                                missing_keys))
                if len(unexpected_keys) > 0:
                    self.verbose(
                        "Weights from pretrained model not used in {}: {}".
                        format(self.transformer.__class__.__name__,
                               unexpected_keys))
                if len(error_msgs) > 0:
                    raise RuntimeError(
                        'Error(s) in loading state_dict for {}:\n\t{}'.format(
                            self.transformer.__class__.__name__,
                            "\n\t".join(error_msgs)))
                self.verbose('[Transformer] - Loaded')
            except:
                self.verbose('[Transformer - X]')

        if 'Optimizer' in self.load_model_list and not inference:
            try:
                self.optimizer.load_state_dict(all_states['Optimizer'])
                for state in self.optimizer.state.values():
                    for k, v in state.items():
                        if torch.is_tensor(v):
                            state[k] = v.cuda()
                self.verbose('[Optimizer] - Loaded')
            except:
                self.verbose('[Optimizer - X]')

        if 'Global_step' in self.load_model_list and not inference:
            try:
                self.global_step = all_states['Global_step']
                self.verbose('[Global_step] - Loaded')
            except:
                self.verbose('[Global_step - X]')

        self.verbose('Model loading complete!')

    def up_sample_frames(self, spec, return_first=False):
        if len(spec.shape) != 3:
            spec = spec.unsqueeze(0)
            assert (len(spec.shape) == 3
                    ), 'Input should have acoustic feature of shape BxTxD'
        # spec shape: [batch_size, sequence_length // downsample_rate, output_dim * downsample_rate]
        spec_flatten = spec.view(spec.shape[0], spec.shape[1] * self.dr,
                                 spec.shape[2] // self.dr)
        if return_first: return spec_flatten[0]
        return spec_flatten  # spec_flatten shape: [batch_size, sequence_length * downsample_rate, output_dim // downsample_rate]

    def down_sample_frames(self, spec):
        left_over = spec.shape[1] % self.dr
        if left_over != 0: spec = spec[:, :-left_over, :]
        spec_stacked = spec.view(spec.shape[0], spec.shape[1] // self.dr,
                                 spec.shape[2] * self.dr)
        return spec_stacked

    def position_encoding(self, seq_len, batch_size=None, padding_idx=None):
        ''' Sinusoid position encoding table '''
        def cal_angle(position, hid_idx):
            return position / np.power(10000, 2 *
                                       (hid_idx // 2) / self.hidden_size)

        def get_posi_angle_vec(position):
            return [
                cal_angle(position, hid_j) for hid_j in range(self.hidden_size)
            ]

        sinusoid_table = np.array(
            [get_posi_angle_vec(pos_i) for pos_i in range(seq_len)])

        sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2])  # dim 2i
        sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2])  # dim 2i+1

        if padding_idx is not None:
            sinusoid_table[
                padding_idx:] = 0.  # zero vector for padding dimension

        if batch_size is not None:
            batch_sinusoid_table = np.repeat(sinusoid_table[np.newaxis, ...],
                                             batch_size,
                                             axis=0)
            return batch_sinusoid_table  # (batch_size, seq_len, hidden_size)
        else:
            return sinusoid_table  # (seq_len, hidden_size)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--data_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
    parser.add_argument("--teacher_model",
                        default=None,
                        type=str,
                        help="The teacher model dir.")
    parser.add_argument("--student_model",
                        default=None,
                        type=str,
                        required=True,
                        help="The student model dir.")
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The output directory where the model predictions and checkpoints will be written.")
    parser.add_argument("--cache_dir",
                        default="",
                        type=str,
                        help="Where do you want to store the pre-trained models downloaded from s3")
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--do_lower_case",
                        action='store_true',
                        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument('--weight_decay', '--wd',
                        default=1e-4,
                        type=float,
                        metavar='W',
                        help='weight decay')
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument('--fp16',
                        default=False,
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")

    # added arguments
    parser.add_argument('--aug_train',
                        action='store_true')
    parser.add_argument('--eval_step',
                        type=int,
                        default=50)
    parser.add_argument('--pred_distill',
                        action='store_true')
    parser.add_argument('--data_url',
                        type=str,
                        default="")
    parser.add_argument('--temperature',
                        type=float,
                        default=1.)

    args = parser.parse_args()
    logger.info('The args: {}'.format(args))
    wandb.config.update(args)

    processors = {
        "race": RaceProcessor,
    }

    # intermediate distillation default parameters
    default_params = {
        "race": {"num_train_epochs": 3, "max_seq_length": 80},
    }

    # Prepare devices
    device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    n_gpu = torch.cuda.device_count()

    logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt='%m/%d/%Y %H:%M:%S',
                        level=logging.INFO)

    logger.info("device: {} n_gpu: {}".format(device, n_gpu))

    # Prepare seed
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    # Prepare task settings
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    task_name = args.task_name.lower()

    if task_name in default_params:
        args.max_seq_len = default_params[task_name]["max_seq_length"]

    if not args.pred_distill and not args.do_eval:
        if task_name in default_params:
            args.num_train_epoch = default_params[task_name]["num_train_epochs"]

    if task_name not in processors:
        raise ValueError("Task not found: %s" % task_name)

    processor = processors[task_name]()
    label_list = processor.get_labels()
    num_labels = len(label_list)

    tokenizer = BertTokenizer.from_pretrained(args.student_model, do_lower_case=args.do_lower_case)

    if not args.do_eval:
        if not args.aug_train:
            train_examples = processor.get_train_examples(args.data_dir)
        else:
            train_examples = processor.get_aug_examples(args.data_dir)
        if args.gradient_accumulation_steps < 1:
            raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                args.gradient_accumulation_steps))

        args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

        num_train_optimization_steps = int(
            len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs

        cached_features_file_train = os.path.join(
            args.data_dir,
            "cached_train_{}_{}_{}_tinybert".format(tokenizer.__class__.__name__, str(args.max_seq_length),
                                                    task_name, ),
        )

        if os.path.exists(cached_features_file_train):
            train_features = torch.load(cached_features_file_train)
        else:
            train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer)
            torch.save(train_features, cached_features_file_train)

        train_data, _ = get_tensor_data(train_features)
        train_sampler = RandomSampler(train_data)
        train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)

    eval_examples = processor.get_dev_examples(args.data_dir)

    cached_features_file_eval = os.path.join(
        args.data_dir,
        "cached_dev_{}_{}_{}_tinybert".format(tokenizer.__class__.__name__, str(args.max_seq_length), task_name, ),
    )

    if os.path.exists(cached_features_file_eval):
        eval_features = torch.load(cached_features_file_eval)
    else:
        eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer)
        torch.save(eval_features, cached_features_file_eval)

    eval_data, eval_labels = get_tensor_data(eval_features)
    eval_sampler = SequentialSampler(eval_data)
    eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)

    if not args.do_eval:
        teacher_model = TinyBertForMultipleChoice.from_pretrained(args.teacher_model)
        teacher_model.to(device)

    student_model = TinyBertForMultipleChoice.from_pretrained(args.student_model)
    student_model.to(device)
    wandb.watch(student_model, log='all')

    if args.do_eval:
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)

        student_model.eval()
        result = do_eval(student_model, task_name, eval_dataloader,
                         device, eval_labels, num_labels)
        logger.info("***** Eval results *****")
        for key in sorted(result.keys()):
            logger.info("  %s = %s", key, str(result[key]))
            wandb.log(result)
    else:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)

        # Prepare optimizer
        param_optimizer = list(student_model.named_parameters())
        size = 0
        for n, p in student_model.named_parameters():
            logger.info('n: {}'.format(n))
            size += p.nelement()

        logger.info('Total parameters: {}'.format(size))
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
            {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]
        schedule = 'warmup_linear'
        if not args.pred_distill:
            schedule = 'none'

        optimizer = BertAdam(optimizer_grouped_parameters,
                             schedule=schedule,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)

        if args.fp16:
            if not _has_apex:
                raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
            student_model, optimizer = amp.initialize(student_model, optimizer, opt_level='O1')

        if n_gpu > 1:
            student_model = torch.nn.DataParallel(student_model)
            teacher_model = torch.nn.DataParallel(teacher_model)

        # Prepare loss functions
        loss_mse = MSELoss()

        def soft_cross_entropy(predicts, targets):
            student_likelihood = torch.nn.functional.log_softmax(predicts, dim=-1)
            targets_prob = torch.nn.functional.softmax(targets, dim=-1)
            return (- targets_prob * student_likelihood).mean()

        # Train and evaluate
        global_step = 0
        best_dev_acc = 0.0
        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")

        for epoch_ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0.
            tr_att_loss = 0.
            tr_rep_loss = 0.
            tr_cls_loss = 0.

            student_model.train()
            nb_tr_examples, nb_tr_steps = 0, 0

            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration", ascii=True)):
                batch = tuple(t.to(device) for t in batch)

                input_ids, input_mask, segment_ids, label_ids = batch
                if input_ids.size()[0] != args.train_batch_size:
                    continue

                att_loss = 0.
                rep_loss = 0.
                cls_loss = 0.

                student_logits, student_atts, student_reps = student_model(input_ids=input_ids,
                                                                           token_type_ids=segment_ids,
                                                                           attention_mask=input_mask,
                                                                           is_student=True)

                with torch.no_grad():
                    teacher_logits, teacher_atts, teacher_reps = teacher_model(input_ids=input_ids,
                                                                               token_type_ids=segment_ids,
                                                                               attention_mask=input_mask)

                if not args.pred_distill:
                    teacher_layer_num = len(teacher_atts)
                    student_layer_num = len(student_atts)
                    assert teacher_layer_num % student_layer_num == 0
                    layers_per_block = int(teacher_layer_num / student_layer_num)
                    new_teacher_atts = [teacher_atts[i * layers_per_block + layers_per_block - 1]
                                        for i in range(student_layer_num)]

                    for student_att, teacher_att in zip(student_atts, new_teacher_atts):
                        student_att = torch.where(student_att <= -1e2, torch.zeros_like(student_att).to(device),
                                                  student_att)
                        teacher_att = torch.where(teacher_att <= -1e2, torch.zeros_like(teacher_att).to(device),
                                                  teacher_att)

                        tmp_loss = loss_mse(student_att, teacher_att)
                        att_loss += tmp_loss

                    new_teacher_reps = [teacher_reps[i * layers_per_block] for i in range(student_layer_num + 1)]
                    new_student_reps = student_reps
                    for student_rep, teacher_rep in zip(new_student_reps, new_teacher_reps):
                        tmp_loss = loss_mse(student_rep, teacher_rep)
                        rep_loss += tmp_loss

                    loss = rep_loss + att_loss
                    tr_att_loss += att_loss.item()
                    tr_rep_loss += rep_loss.item()
                else:
                    cls_loss = soft_cross_entropy(student_logits / args.temperature, teacher_logits / args.temperature)

                    loss = cls_loss
                    tr_cls_loss += cls_loss.item()

                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                if args.fp16:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                else:
                    loss.backward()

                tr_loss += loss.item()
                nb_tr_examples += label_ids.size(0)
                nb_tr_steps += 1

                if (step + 1) % args.gradient_accumulation_steps == 0:
                    if args.fp16:
                        torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), 1)

                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

                if (global_step + 1) % args.eval_step == 0:
                    logger.info("***** Running evaluation *****")
                    logger.info("  Epoch = {} iter {} step".format(epoch_, global_step))
                    logger.info("  Num examples = %d", len(eval_examples))
                    logger.info("  Batch size = %d", args.eval_batch_size)

                    student_model.eval()

                    loss = tr_loss / (step + 1)
                    cls_loss = tr_cls_loss / (step + 1)
                    att_loss = tr_att_loss / (step + 1)
                    rep_loss = tr_rep_loss / (step + 1)

                    result = {}
                    if args.pred_distill:
                        result = do_eval(student_model, task_name, eval_dataloader,
                                         device, eval_labels, num_labels)
                    result['global_step'] = global_step
                    result['cls_loss'] = cls_loss
                    result['att_loss'] = att_loss
                    result['rep_loss'] = rep_loss
                    result['loss'] = loss

                    wandb.log(result, step=global_step)

                    result_to_file(result, output_eval_file)

                    if not args.pred_distill:
                        save_model = True
                    else:
                        save_model = False

                        if result['acc'] > best_dev_acc:
                            best_dev_acc = result['acc']
                            save_model = True

                    if save_model:
                        logger.info("***** Save model *****")

                        model_to_save = student_model.module if hasattr(student_model, 'module') else student_model

                        model_name = WEIGHTS_NAME
                        # if not args.pred_distill:
                        #     model_name = "step_{}_{}".format(global_step, WEIGHTS_NAME)
                        output_model_file = os.path.join(args.output_dir, model_name)
                        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)

                        torch.save(model_to_save.state_dict(), output_model_file)
                        model_to_save.config.to_json_file(output_config_file)
                        tokenizer.save_vocabulary(args.output_dir)

                        if oncloud:
                            logging.info(mox.file.list_directory(args.output_dir, recursive=True))
                            logging.info(mox.file.list_directory('.', recursive=True))
                            mox.file.copy_parallel(args.output_dir, args.data_url)
                            mox.file.copy_parallel('.', args.data_url)

                    student_model.train()
Exemple #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task."
    )
    parser.add_argument("--model",
                        default=None,
                        type=str,
                        required=True,
                        help="The model dir.")
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model predictions and checkpoints will be written."
    )
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help=
        "Where do you want to store the pre-trained models downloaded from s3")
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--do_eval",
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--do_lower_case",
        action='store_true',
        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument('--weight_decay',
                        '--wd',
                        default=1e-4,
                        type=float,
                        metavar='W',
                        help='weight decay')
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.06,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )

    parser.add_argument('--weight_bit',
                        type=int,
                        default=4,
                        help="Number of bits for weight.")
    parser.add_argument('--quant_group_number',
                        type=int,
                        default=1,
                        help="Number of bits for weight.")
    parser.add_argument('--activation_bit',
                        type=int,
                        default=8,
                        help="Number of bits for weight.")
    # added arguments
    parser.add_argument('--aug_train',
                        type=str,
                        default='none',
                        help="Whether to train with augmented data.")
    parser.add_argument('--eval_step', type=int, default=50)
    parser.add_argument('--data_url', type=str, default="")
    parser.add_argument('--temperature', type=float, default=1.)

    args = parser.parse_args()
    logger.info('The args: {}'.format(args))

    processors = {
        "cola": ColaProcessor,
        "mnli": MnliProcessor,
        "mnli-mm": MnliMismatchedProcessor,
        "mrpc": MrpcProcessor,
        "sst-2": Sst2Processor,
        "sts-b": StsbProcessor,
        "qqp": QqpProcessor,
        "qnli": QnliProcessor,
        "rte": RteProcessor,
        "wnli": WnliProcessor
    }

    output_modes = {
        "cola": "classification",
        "mnli": "classification",
        "mrpc": "classification",
        "sst-2": "classification",
        "sts-b": "regression",
        "qqp": "classification",
        "qnli": "classification",
        "rte": "classification",
        "wnli": "classification"
    }

    # intermediate distillation default parameters
    default_params = {
        "cola": {
            "num_train_epochs": 10,
            "max_seq_length": 64,
            "learning_rate": 2e-5,
            "train_batch_size": 32
        },
        "sst-2": {
            "num_train_epochs": 10,
            "max_seq_length": 64,
            "learning_rate": 2e-5,
            "train_batch_size": 32
        },
        "mnli": {
            "num_train_epochs": 10,
            "max_seq_length": 128,
            "learning_rate": 1e-5,
            "train_batch_size": 32
        },
        "mrpc": {
            "num_train_epochs": 10,
            "max_seq_length": 128,
            "learning_rate": 1e-5,
            "train_batch_size": 32
        },
        "sts-b": {
            "num_train_epochs": 10,
            "max_seq_length": 128,
            "learning_rate": 2e-5,
            "train_batch_size": 16
        },
        "qqp": {
            "num_train_epochs": 10,
            "max_seq_length": 128,
            "learning_rate": 1e-5,
            "train_batch_size": 32
        },
        "qnli": {
            "num_train_epochs": 10,
            "max_seq_length": 128,
            "learning_rate": 1e-5,
            "train_batch_size": 32
        },
        "rte": {
            "num_train_epochs": 10,
            "max_seq_length": 128,
            "learning_rate": 2e-5,
            "train_batch_size": 16
        }
    }

    acc_tasks = ["mnli", "mrpc", "sst-2", "qqp", "qnli", "rte"]
    corr_tasks = ["sts-b"]
    mcc_tasks = ["cola"]

    # Prepare devices
    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    n_gpu = torch.cuda.device_count()

    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO)

    logger.info("device: {} n_gpu: {}".format(device, n_gpu))

    # Prepare seed
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    # Prepare task settings
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        # raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
        if os.path.exists(os.path.join(args.output_dir, "eval_results.txt")):
            os.remove(os.path.join(args.output_dir, "eval_results.txt"))

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    task_name = args.task_name.lower()

    if not args.num_train_epochs:
        args.num_train_epochs = default_params[task_name]["num_train_epochs"]
    if not args.learning_rate:
        args.learning_rate = default_params[task_name]["learning_rate"]
    if not args.train_batch_size:
        args.train_batch_size = default_params[task_name]["train_batch_size"]
    if not args.max_seq_length:
        args.max_seq_len = default_params[task_name]["max_seq_length"]

    # print(task_name in default_params, args.num_train_epochs, args.max_seq_length)
    if task_name not in processors:
        raise ValueError("Task not found: %s" % task_name)

    processor = processors[task_name]()
    output_mode = output_modes[task_name]
    label_list = processor.get_labels()
    num_labels = len(label_list)

    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              do_lower_case=args.do_lower_case)

    if not args.do_eval:
        if args.aug_train == 'none':
            train_examples = processor.get_train_examples(args.data_dir)
        else:
            train_examples = processor.get_aug_examples(
                args.data_dir, args.aug_train)
        if args.gradient_accumulation_steps < 1:
            raise ValueError(
                "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
                .format(args.gradient_accumulation_steps))

        args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

        num_train_optimization_steps = int(
            len(train_examples) / args.train_batch_size /
            args.gradient_accumulation_steps) * args.num_train_epochs

        train_features = convert_examples_to_features(train_examples,
                                                      label_list,
                                                      args.max_seq_length,
                                                      tokenizer, output_mode)
        train_data, _ = get_tensor_data(output_mode, train_features)
        train_sampler = RandomSampler(train_data)
        train_dataloader = DataLoader(train_data,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)

    eval_examples = processor.get_dev_examples(args.data_dir)
    eval_features = convert_examples_to_features(eval_examples, label_list,
                                                 args.max_seq_length,
                                                 tokenizer, output_mode)
    eval_data, eval_labels = get_tensor_data(output_mode, eval_features)
    eval_sampler = SequentialSampler(eval_data)
    eval_dataloader = DataLoader(eval_data,
                                 sampler=eval_sampler,
                                 batch_size=args.eval_batch_size)

    # load config file from here
    quant_config = BertConfig.from_json_file("config/new_example_config.json")

    # change config if specified in command
    if "quant_group_number" in quant_config.__dict__:
        quant_config.__dict__["quant_group_number"] = args.quant_group_number
    for item in quant_config.__dict__:
        if "layer_bits" in item:
            for b_item in quant_config.__dict__[item]:
                quant_config.__dict__[item][b_item] = args.weight_bit
        elif "embed_bits" in item:
            for b_item in quant_config.__dict__[item]:
                quant_config.__dict__[item][b_item] = args.weight_bit
        elif "activation_bits" in item:
            quant_config.__dict__[item] = args.activation_bit

    model = QBertForSequenceClassification.from_pretrained(
        args.model, num_labels=num_labels, quant_config=quant_config)
    model.to(device)
    if args.do_eval:
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)

        model.eval()
        result = do_eval(model, task_name, eval_dataloader, device,
                         output_mode, eval_labels, num_labels)
        logger.info("***** Eval results *****")
        for key in sorted(result.keys()):
            logger.info("  %s = %s", key, str(result[key]))
    else:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)
        if n_gpu > 1:
            model = torch.nn.DataParallel(model)

        param_optimizer = list(model.named_parameters())
        size = 0
        for n, p in model.named_parameters():
            logger.info('n: {}'.format(n))
            size += p.nelement()

        logger.info('Total parameters: {}'.format(size))

        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [{
            'params': [
                p for n, p in param_optimizer
                if not any(nd in n for nd in no_decay)
            ],
            'weight_decay':
            0.01
        }, {
            'params':
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            'weight_decay':
            0.0
        }]

        # optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=1e-8)
        # scheduler = NewWarmupLinearSchedule(optimizer, warmup_steps=int(args.warmup_proportion*num_train_optimization_steps),
        #     t_total=num_train_optimization_steps)
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)

        # Prepare loss functions
        loss_mse = MSELoss()

        def soft_cross_entropy(predicts, targets):
            student_likelihood = torch.nn.functional.log_softmax(predicts,
                                                                 dim=-1)
            targets_prob = torch.nn.functional.softmax(targets, dim=-1)
            return (-targets_prob * student_likelihood).mean()

        # Train and evaluate
        global_step = 0
        best_dev_acc = 0.0
        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")

        for epoch_ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0.
            tr_att_loss = 0.
            tr_rep_loss = 0.
            tr_cls_loss = 0.

            model.train()
            nb_tr_examples, nb_tr_steps = 0, 0

            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="Iteration", ascii=True)):
                batch = tuple(t.to(device) for t in batch)

                input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch
                if input_ids.size()[0] != args.train_batch_size:
                    continue

                att_loss = 0.
                rep_loss = 0.
                cls_loss = 0.

                student_logits, student_atts, student_reps = model(
                    input_ids, segment_ids, input_mask, is_student=True)

                if output_mode == "classification":
                    loss_fct = CrossEntropyLoss()
                    cls_loss = loss_fct(student_logits.view(-1, num_labels),
                                        label_ids.view(-1))

                elif output_mode == "regression":
                    loss_mse = MSELoss()
                    cls_loss = loss_mse(student_logits.view(-1),
                                        label_ids.view(-1))

                loss = cls_loss
                tr_cls_loss += cls_loss.item()

                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                loss.backward()

                tr_loss += loss.item()
                nb_tr_examples += label_ids.size(0)
                nb_tr_steps += 1

                if (step + 1) % args.gradient_accumulation_steps == 0:
                    optimizer.step()
                    # scheduler.step()
                    model.zero_grad()
                    global_step += 1

            logger.info("***** Running evaluation *****")
            logger.info("  Epoch = {} iter {} step".format(
                epoch_, global_step))
            logger.info("  Num examples = %d", len(eval_examples))
            logger.info("  Batch size = %d", args.eval_batch_size)

            model.eval()

            loss = tr_loss / (step + 1)
            cls_loss = tr_cls_loss / (step + 1)
            att_loss = tr_att_loss / (step + 1)
            rep_loss = tr_rep_loss / (step + 1)

            result = {}

            result = do_eval(model, task_name, eval_dataloader, device,
                             output_mode, eval_labels, num_labels)
            result['global_step'] = global_step
            result['cls_loss'] = cls_loss
            result['att_loss'] = att_loss
            result['rep_loss'] = rep_loss
            result['loss'] = loss

            result_to_file(result, output_eval_file)

            save_model = False

            if task_name in acc_tasks and result['acc'] > best_dev_acc:
                best_dev_acc = result['acc']
                save_model = True

            if task_name in corr_tasks and result['corr'] > best_dev_acc:
                best_dev_acc = result['corr']
                save_model = True

            if task_name in mcc_tasks and result['mcc'] > best_dev_acc:
                best_dev_acc = result['mcc']
                save_model = True

            if save_model:
                logger.info("***** Save model *****")

                model_to_save = model.module if hasattr(model,
                                                        'module') else model

                model_name = WEIGHTS_NAME
                output_model_file = os.path.join(args.output_dir, model_name)
                output_config_file = os.path.join(args.output_dir, CONFIG_NAME)

                torch.save(model_to_save.state_dict(), output_model_file)
                model_to_save.config.to_json_file(output_config_file)
                tokenizer.save_vocabulary(args.output_dir)

                # Test mnli-mm
                if task_name == "mnli":
                    task_name = "mnli-mm"
                    processor = processors[task_name]()
                    if not os.path.exists(args.output_dir + '-MM'):
                        os.makedirs(args.output_dir + '-MM')

                    eval_examples = processor.get_dev_examples(args.data_dir)

                    eval_features = convert_examples_to_features(
                        eval_examples, label_list, args.max_seq_length,
                        tokenizer, output_mode)
                    eval_data, eval_labels = get_tensor_data(
                        output_mode, eval_features)

                    logger.info("***** Running mm evaluation *****")
                    logger.info("  Num examples = %d", len(eval_examples))
                    logger.info("  Batch size = %d", args.eval_batch_size)

                    eval_sampler = SequentialSampler(eval_data)
                    eval_dataloader = DataLoader(
                        eval_data,
                        sampler=eval_sampler,
                        batch_size=args.eval_batch_size)

                    result = do_eval(model, task_name, eval_dataloader, device,
                                     output_mode, eval_labels, num_labels)

                    result['global_step'] = global_step

                    tmp_output_eval_file = os.path.join(
                        args.output_dir + '-MM', "eval_results.txt")
                    result_to_file(result, tmp_output_eval_file)

                    task_name = 'mnli'

                # if oncloud:
                #     logging.info(mox.file.list_directory(args.output_dir, recursive=True))
                #     logging.info(mox.file.list_directory('.', recursive=True))
                #     mox.file.copy_parallel(args.output_dir, args.data_url)
                #     mox.file.copy_parallel('.', args.data_url)

            model.train()
def main():
    parser = ArgumentParser()
    parser.add_argument(
        '--pregenerated_data',
        type=str,
        required=True,
        default='/nas/hebin/data/english-exp/books_wiki_tokens_ngrams')
    parser.add_argument('--s3_output_dir', type=str, default='huawei_yun')
    parser.add_argument('--student_model',
                        type=str,
                        default='8layer_bert',
                        required=True)
    parser.add_argument('--teacher_model', type=str, default='electra_base')
    parser.add_argument('--cache_dir', type=str, default='/cache', help='')

    parser.add_argument("--epochs",
                        type=int,
                        default=2,
                        help="Number of epochs to train for")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )
    parser.add_argument("--train_batch_size",
                        default=16,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--learning_rate",
                        default=1e-4,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--max_seq_length", type=int, default=512)

    parser.add_argument("--do_lower_case", action="store_true")
    parser.add_argument(
        '--fp16',
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--scratch',
                        action='store_true',
                        help="Whether to train from scratch")
    parser.add_argument(
        "--reduce_memory",
        action="store_true",
        help=
        "Store training data as on-disc memmaps to massively reduce memory usage"
    )
    parser.add_argument('--debug',
                        action='store_true',
                        help="Whether to debug")

    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument(
        "--fp16_opt_level",
        type=str,
        default="O1",
        help=
        "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
        "See details at https://nvidia.github.io/apex/amp.html",
    )

    parser.add_argument(
        '--loss_scale',
        type=float,
        default=0,
        help=
        "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
        "0 (default value): dynamic loss scaling.\n"
        "Positive power of 2: static loss scaling value.\n")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument("--already_trained_epoch", default=0, type=int)
    parser.add_argument(
        "--masked_lm_prob",
        type=float,
        default=0.0,
        help="Probability of masking each token for the LM task")
    parser.add_argument(
        "--max_predictions_per_seq",
        type=int,
        default=77,
        help="Maximum number of tokens to mask in each sequence")
    parser.add_argument("--adam_epsilon",
                        default=1e-8,
                        type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--logging_steps",
                        type=int,
                        default=500,
                        help="Log every X updates steps.")
    parser.add_argument("--warmup_steps",
                        default=10000,
                        type=int,
                        help="Linear warmup over warmup_steps.")

    parser.add_argument("--max_grad_norm",
                        default=1.0,
                        type=float,
                        help="Max gradient norm.")

    parser.add_argument("--num_workers",
                        type=int,
                        default=4,
                        help="num_workers.")
    parser.add_argument("--continue_index", type=int, default=0, help="")
    parser.add_argument("--threads",
                        type=int,
                        default=27,
                        help="Number of threads to preprocess input data")

    # Search space for sub_bart architecture
    parser.add_argument('--layer_num_space',
                        nargs='+',
                        type=int,
                        default=[1, 8])
    parser.add_argument('--hidden_size_space',
                        nargs='+',
                        type=int,
                        default=[128, 768])
    parser.add_argument('--qkv_size_space',
                        nargs='+',
                        type=int,
                        default=[180, 768])
    parser.add_argument('--intermediate_size_space',
                        nargs='+',
                        type=int,
                        default=[128, 3072])
    parser.add_argument('--head_num_space',
                        nargs='+',
                        type=int,
                        default=[1, 12])
    parser.add_argument('--sample_times_per_batch', type=int, default=1)
    parser.add_argument('--further_train', action='store_true')
    parser.add_argument('--mlm_loss', action='store_true')

    # Argument for Huawei yun
    parser.add_argument('--data_url', type=str, default='', help='s3 url')
    parser.add_argument("--train_url", type=str, default="", help="s3 url")

    args = parser.parse_args()

    assert (torch.cuda.is_available())
    device_count = torch.cuda.device_count()
    args.rank = int(os.getenv('RANK', '0'))
    args.world_size = int(os.getenv("WORLD_SIZE", '1'))

    # Call the init process
    # init_method = 'tcp://'
    init_method = ''
    master_ip = os.getenv('MASTER_ADDR', 'localhost')
    master_port = os.getenv('MASTER_PORT', '6000')
    init_method += master_ip + ':' + master_port

    # Manually set the device ids.
    # if device_count > 0:
    # args.local_rank = args.rank % device_count
    torch.cuda.set_device(args.local_rank)
    device = torch.device("cuda", args.local_rank)
    print('device_id: %s' % args.local_rank)
    print('device_count: %s, rank: %s, world_size: %s' %
          (device_count, args.rank, args.world_size))
    print(init_method)

    torch.distributed.init_process_group(backend='nccl',
                                         world_size=args.world_size,
                                         rank=args.rank,
                                         init_method=init_method)

    LOCAL_DIR = args.cache_dir
    if oncloud:
        assert mox.file.exists(LOCAL_DIR)

    if args.local_rank == 0 and oncloud:
        logging.info(
            mox.file.list_directory(args.pregenerated_data, recursive=True))
        logging.info(
            mox.file.list_directory(args.student_model, recursive=True))

    local_save_dir = os.path.join(LOCAL_DIR, 'output', 'superbert',
                                  'checkpoints')
    local_tsbd_dir = os.path.join(LOCAL_DIR, 'output', 'superbert',
                                  'tensorboard')
    save_name = '_'.join([
        'superbert',
        'epoch',
        str(args.epochs),
        'lr',
        str(args.learning_rate),
        'bsz',
        str(args.train_batch_size),
        'grad_accu',
        str(args.gradient_accumulation_steps),
        str(args.max_seq_length),
        'gpu',
        str(args.world_size),
    ])
    bash_save_dir = os.path.join(local_save_dir, save_name)
    bash_tsbd_dir = os.path.join(local_tsbd_dir, save_name)
    if args.local_rank == 0:
        if not os.path.exists(bash_save_dir):
            os.makedirs(bash_save_dir)
            logger.info(bash_save_dir + ' created!')
        if not os.path.exists(bash_tsbd_dir):
            os.makedirs(bash_tsbd_dir)
            logger.info(bash_tsbd_dir + ' created!')

    local_data_dir_tmp = '/cache/data/tmp/'
    local_data_dir = local_data_dir_tmp + save_name

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    args.tokenizer = BertTokenizer.from_pretrained(
        args.student_model, do_lower_case=args.do_lower_case)
    args.vocab_list = list(args.tokenizer.vocab.keys())

    config = BertConfig.from_pretrained(
        os.path.join(args.student_model, CONFIG_NAME))
    logger.info("Model config {}".format(config))

    if args.further_train:
        if args.mlm_loss:
            student_model = SuperBertForPreTraining.from_pretrained(
                args.student_model, config)
        else:
            student_model = SuperTinyBertForPreTraining.from_pretrained(
                args.student_model, config)
    else:
        if args.mlm_loss:
            student_model = SuperBertForPreTraining.from_scratch(
                args.student_model, config)
        else:
            student_model = SuperTinyBertForPreTraining.from_scratch(
                args.student_model, config)

    student_model.to(device)

    if not args.mlm_loss:
        teacher_model = BertModel.from_pretrained(args.teacher_model)
        teacher_model.to(device)

    # build arch space
    min_hidden_size, max_hidden_size = args.hidden_size_space
    min_ffn_size, max_ffn_size = args.intermediate_size_space
    min_qkv_size, max_qkv_size = args.qkv_size_space
    min_head_num, max_head_num = args.head_num_space

    hidden_step = 4
    ffn_step = 4
    qkv_step = 12
    head_step = 1

    number_hidden_step = int((max_hidden_size - min_hidden_size) / hidden_step)
    number_ffn_step = int((max_ffn_size - min_ffn_size) / ffn_step)
    number_qkv_step = int((max_qkv_size - min_qkv_size) / qkv_step)
    number_head_step = int((max_head_num - min_head_num) / head_step)

    layer_numbers = list(
        range(args.layer_num_space[0], args.layer_num_space[1] + 1))
    hidden_sizes = [
        i * hidden_step + min_hidden_size
        for i in range(number_hidden_step + 1)
    ]
    ffn_sizes = [
        i * ffn_step + min_ffn_size for i in range(number_ffn_step + 1)
    ]
    qkv_sizes = [
        i * qkv_step + min_qkv_size for i in range(number_qkv_step + 1)
    ]
    head_numbers = [
        i * head_step + min_head_num for i in range(number_head_step + 1)
    ]

    ######
    if args.local_rank == 0:
        tb_writer = SummaryWriter(bash_tsbd_dir)

    global_step = 0
    step = 0
    tr_loss, tr_rep_loss, tr_att_loss = 0.0, 0.0, 0.0
    logging_loss, rep_logging_loss, att_logging_loss = 0.0, 0.0, 0.0
    end_time, start_time = 0, 0

    submodel_config = dict()

    if args.further_train:
        submodel_config['sample_layer_num'] = config.num_hidden_layers
        submodel_config['sample_hidden_size'] = config.hidden_size
        submodel_config[
            'sample_intermediate_sizes'] = config.num_hidden_layers * [
                config.intermediate_size
            ]
        submodel_config[
            'sample_num_attention_heads'] = config.num_hidden_layers * [
                config.num_attention_heads
            ]
        submodel_config['sample_qkv_sizes'] = config.num_hidden_layers * [
            config.qkv_size
        ]

    for epoch in range(args.epochs):
        if epoch < args.continue_index:
            args.warmup_steps = 0
            continue

        args.local_data_dir = os.path.join(local_data_dir, str(epoch))
        if args.local_rank == 0:
            os.makedirs(args.local_data_dir)
        while 1:
            if os.path.exists(args.local_data_dir):
                epoch_dataset = load_doc_tokens_ngrams(args)
                break

        if args.local_rank == 0 and oncloud:
            logging.info('Dataset in epoch %s', epoch)
            logging.info(
                mox.file.list_directory(args.local_data_dir, recursive=True))

        train_sampler = DistributedSampler(epoch_dataset,
                                           num_replicas=1,
                                           rank=0)

        train_dataloader = DataLoader(epoch_dataset,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)

        step_in_each_epoch = len(
            train_dataloader) // args.gradient_accumulation_steps
        num_train_optimization_steps = step_in_each_epoch * args.epochs
        logging.info("***** Running training *****")
        logging.info("  Num examples = %d",
                     len(epoch_dataset) * args.world_size)
        logger.info("  Num Epochs = %d", args.epochs)
        logging.info(
            "  Total train batch size (w. parallel, distributed & accumulation) = %d",
            args.train_batch_size * args.gradient_accumulation_steps *
            args.world_size)
        logger.info("  Gradient Accumulation steps = %d",
                    args.gradient_accumulation_steps)
        logging.info("  Num steps = %d", num_train_optimization_steps)

        if epoch == args.continue_index:
            # Prepare optimizer
            param_optimizer = list(student_model.named_parameters())
            no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
            optimizer_grouped_parameters = [{
                'params': [
                    p for n, p in param_optimizer
                    if not any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.01
            }, {
                'params': [
                    p for n, p in param_optimizer
                    if any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.0
            }]

            warm_up_ratio = args.warmup_steps / num_train_optimization_steps
            print('warm_up_ratio: {}'.format(warm_up_ratio))
            optimizer = BertAdam(optimizer_grouped_parameters,
                                 lr=args.learning_rate,
                                 e=args.adam_epsilon,
                                 schedule='warmup_linear',
                                 t_total=num_train_optimization_steps,
                                 warmup=warm_up_ratio)

            if args.fp16:
                try:
                    from apex import amp
                except ImportError:
                    raise ImportError(
                        "Please install apex from https://www.github.com/nvidia/apex"
                        " to use fp16 training.")
                student_model, optimizer = amp.initialize(
                    student_model,
                    optimizer,
                    opt_level=args.fp16_opt_level,
                    min_loss_scale=1)  #

            # apex
            student_model = DDP(
                student_model,
                message_size=10000000,
                gradient_predivide_factor=torch.distributed.get_world_size(),
                delay_allreduce=True)

            if not args.mlm_loss:
                teacher_model = DDP(teacher_model,
                                    message_size=10000000,
                                    gradient_predivide_factor=torch.
                                    distributed.get_world_size(),
                                    delay_allreduce=True)
                teacher_model.eval()

            logger.info('apex data paralleled!')

        from torch.nn import MSELoss
        loss_mse = MSELoss()

        student_model.train()
        for step_, batch in enumerate(train_dataloader):
            step += 1
            batch = tuple(t.to(device) for t in batch)
            input_ids, input_masks, lm_label_ids = batch

            if not args.mlm_loss:
                teacher_last_rep, teacher_last_att = teacher_model(
                    input_ids, input_masks)
                teacher_last_att = torch.where(
                    teacher_last_att <= -1e2,
                    torch.zeros_like(teacher_last_att).to(device),
                    teacher_last_att)
                teacher_last_rep.detach()
                teacher_last_att.detach()

            for sample_idx in range(args.sample_times_per_batch):
                att_loss = 0.
                rep_loss = 0.
                rand_seed = int(global_step * args.world_size +
                                sample_idx)  # + args.rank % args.world_size)

                if not args.mlm_loss:
                    if not args.further_train:
                        submodel_config = sample_arch_4_kd(
                            layer_numbers,
                            hidden_sizes,
                            ffn_sizes,
                            qkv_sizes,
                            reset_rand_seed=True,
                            rand_seed=rand_seed)
                    # knowledge distillation
                    student_last_rep, student_last_att = student_model(
                        input_ids, submodel_config, attention_mask=input_masks)
                    student_last_att = torch.where(
                        student_last_att <= -1e2,
                        torch.zeros_like(student_last_att).to(device),
                        student_last_att)

                    att_loss += loss_mse(student_last_att, teacher_last_att)
                    rep_loss += loss_mse(student_last_rep, teacher_last_rep)
                    loss = att_loss + rep_loss

                    if args.gradient_accumulation_steps > 1:
                        rep_loss = rep_loss / args.gradient_accumulation_steps
                        att_loss = att_loss / args.gradient_accumulation_steps
                        loss = loss / args.gradient_accumulation_steps

                    tr_rep_loss += rep_loss.item()
                    tr_att_loss += att_loss.item()
                else:
                    if not args.further_train:
                        submodel_config = sample_arch_4_mlm(
                            layer_numbers,
                            hidden_sizes,
                            ffn_sizes,
                            head_numbers,
                            reset_rand_seed=True,
                            rand_seed=rand_seed)
                    loss = student_model(input_ids,
                                         submodel_config,
                                         attention_mask=input_masks,
                                         masked_lm_labels=lm_label_ids)

                tr_loss += loss.item()
                if args.fp16:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward(retain_graph=True)
                else:
                    loss.backward(retain_graph=True)

            if (step + 1) % args.gradient_accumulation_steps == 0:
                if args.fp16:
                    torch.nn.utils.clip_grad_norm_(
                        amp.master_params(optimizer), args.max_grad_norm)
                else:
                    torch.nn.utils.clip_grad_norm_(student_model.parameters(),
                                                   args.max_grad_norm)

                optimizer.step()
                optimizer.zero_grad()
                global_step += 1

                if (step + 1) % (args.gradient_accumulation_steps * args.logging_steps) == 0 \
                        and args.local_rank < 2 or global_step < 100:
                    end_time = time.time()

                    if not args.mlm_loss:
                        logger.info(
                            'Epoch: %s, global_step: %s/%s, lr: %s, loss is %s; '
                            'rep_loss is %s; att_loss is %s; (%.2f sec)' %
                            (epoch, global_step + 1, step_in_each_epoch,
                             optimizer.get_lr()[0],
                             loss.item() * args.gradient_accumulation_steps,
                             rep_loss.item() *
                             args.gradient_accumulation_steps, att_loss.item()
                             * args.gradient_accumulation_steps,
                             end_time - start_time))
                    else:
                        logger.info(
                            'Epoch: %s, global_step: %s/%s, lr: %s, loss is %s; '
                            ' (%.2f sec)' %
                            (epoch, global_step + 1, step_in_each_epoch,
                             optimizer.get_lr()[0],
                             loss.item() * args.gradient_accumulation_steps,
                             end_time - start_time))
                    start_time = time.time()

                if args.logging_steps > 0 and global_step % args.logging_steps == 0 and args.local_rank == 0:
                    tb_writer.add_scalar("lr",
                                         optimizer.get_lr()[0], global_step)
                    tb_writer.add_scalar("loss", (tr_loss - logging_loss) /
                                         args.logging_steps, global_step)

                    if not args.mlm_loss:
                        tb_writer.add_scalar("rep_loss",
                                             (tr_rep_loss - rep_logging_loss) /
                                             args.logging_steps, global_step)
                        tb_writer.add_scalar("att_loss",
                                             (tr_att_loss - att_logging_loss) /
                                             args.logging_steps, global_step)
                        rep_logging_loss = tr_rep_loss
                        att_logging_loss = tr_att_loss

                    logging_loss = tr_loss

        # Save a trained model
        if args.rank == 0:
            saving_path = bash_save_dir
            saving_path = Path(os.path.join(saving_path,
                                            "epoch_" + str(epoch)))

            if saving_path.is_dir() and list(saving_path.iterdir()):
                logging.warning(
                    f"Output directory ({ saving_path }) already exists and is not empty!"
                )
            saving_path.mkdir(parents=True, exist_ok=True)

            logging.info("** ** * Saving fine-tuned model ** ** * ")
            model_to_save = student_model.module if hasattr(student_model, 'module')\
                else student_model  # Only save the model it-self

            output_model_file = os.path.join(saving_path, WEIGHTS_NAME)
            output_config_file = os.path.join(saving_path, CONFIG_NAME)

            torch.save(model_to_save.state_dict(), output_model_file)
            model_to_save.config.to_json_file(output_config_file)
            args.tokenizer.save_vocabulary(saving_path)

            torch.save(optimizer.state_dict(),
                       os.path.join(saving_path, "optimizer.pt"))
            logger.info("Saving optimizer and scheduler states to %s",
                        saving_path)

            # debug
            if oncloud:
                local_output_dir = os.path.join(LOCAL_DIR, 'output')
                logger.info(
                    mox.file.list_directory(local_output_dir, recursive=True))
                logger.info('s3_output_dir: ' + args.s3_output_dir)
                mox.file.copy_parallel(local_output_dir, args.s3_output_dir)

    if args.local_rank == 0:
        tb_writer.close()
Exemple #13
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--data_dir",
        default="data/MNLI",
        type=str,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task."
    )
    parser.add_argument("--teacher_model",
                        default="pretrained/checkpoint-31280/",
                        type=str,
                        help="The teacher model dir.")
    parser.add_argument("--student_model",
                        default="pretrained/generalbert",
                        type=str,
                        help="The student model dir.")
    parser.add_argument("--task_name",
                        default="MNLI",
                        type=str,
                        help="The name of the task to train.")
    parser.add_argument(
        "--output_dir",
        default="output",
        type=str,
        help=
        "The output directory where the model predictions and checkpoints will be written."
    )
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--do_lower_case",
        action='store_true',
        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=384,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=128,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument('--weight_decay',
                        '--wd',
                        default=1e-4,
                        type=float,
                        metavar='W',
                        help='weight decay')
    parser.add_argument("--num_train_epochs",
                        default=5.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )

    # added arguments
    parser.add_argument('--aug_train', action='store_true')
    parser.add_argument('--eval_step', type=float, default=0.1)
    parser.add_argument('--pred_distill', action='store_true')
    parser.add_argument('--data_url', type=str, default="")
    parser.add_argument('--temperature', type=float, default=1.)

    args = parser.parse_args()
    logger.info('The args: {}'.format(args))

    # intermediate distillation default parameters
    default_params = {
        "cola": {
            "num_train_epochs": 50,
            "max_seq_length": 64
        },
        "mnli": {
            "num_train_epochs": 5,
            "max_seq_length": 128
        },
        "mrpc": {
            "num_train_epochs": 20,
            "max_seq_length": 128
        },
        "sst-2": {
            "num_train_epochs": 10,
            "max_seq_length": 64
        },
        "sts-b": {
            "num_train_epochs": 20,
            "max_seq_length": 128
        },
        "qqp": {
            "num_train_epochs": 5,
            "max_seq_length": 128
        },
        "qnli": {
            "num_train_epochs": 10,
            "max_seq_length": 128
        },
        "rte": {
            "num_train_epochs": 20,
            "max_seq_length": 128
        }
    }

    acc_tasks = ["mnli", "mrpc", "sst-2", "qqp", "qnli", "rte"]
    corr_tasks = ["sts-b"]
    mcc_tasks = ["cola"]

    # Prepare devices
    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    n_gpu = torch.cuda.device_count()

    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO)

    logger.info("device: {} n_gpu: {}".format(device, n_gpu))
    tb = SummaryWriter("./runs")

    # Prepare seed
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    # Prepare task settings
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError(
            "Output directory ({}) already exists and is not empty.".format(
                args.output_dir))
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    task_name = args.task_name.lower()

    if task_name in default_params:
        args.max_seq_len = default_params[task_name]["max_seq_length"]

    if not args.pred_distill and not args.do_eval:
        if task_name in default_params:
            args.num_train_epoch = default_params[task_name][
                "num_train_epochs"]

    if task_name not in processors:
        raise ValueError("Task not found: %s" % task_name)

    processor = processors[task_name]()
    output_mode = output_modes[task_name]
    label_list = processor.get_labels()
    num_labels = len(label_list)

    tokenizer = BertTokenizer.from_pretrained(args.student_model,
                                              do_lower_case=args.do_lower_case)

    if not args.do_eval:
        if args.gradient_accumulation_steps < 1:
            raise ValueError(
                "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
                .format(args.gradient_accumulation_steps))

        args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

        train_data, _ = get_tensor_data(args, task_name, tokenizer, False,
                                        args.aug_train)
        train_sampler = RandomSampler(train_data)
        train_dataloader = DataLoader(train_data,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)
        num_train_optimization_steps = int(
            len(train_dataloader) /
            args.gradient_accumulation_steps) * args.num_train_epochs

    eval_data, eval_labels = get_tensor_data(args, task_name, tokenizer, True,
                                             False)
    eval_sampler = SequentialSampler(eval_data)
    eval_dataloader = DataLoader(eval_data,
                                 sampler=eval_sampler,
                                 batch_size=args.eval_batch_size)

    if not args.do_eval:
        teacher_model = TinyBertForSequenceClassification.from_pretrained(
            args.teacher_model, num_labels=num_labels)
        teacher_model.to(device)

    student_model = TinyBertForSequenceClassification.from_pretrained(
        args.student_model, num_labels=num_labels)
    student_model.to(device)
    if args.do_eval:
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_data))
        logger.info("  Batch size = %d", args.eval_batch_size)

        student_model.eval()
        result = do_eval(student_model, task_name, eval_dataloader, device,
                         output_mode, eval_labels, num_labels)
        logger.info("***** Eval results *****")
        for key in sorted(result.keys()):
            logger.info("  %s = %s", key, str(result[key]))
    else:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_data))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)
        if n_gpu > 1:
            student_model = torch.nn.DataParallel(student_model)
            teacher_model = torch.nn.DataParallel(teacher_model)
        # Prepare optimizer
        param_optimizer = list(student_model.named_parameters())
        size = 0
        for n, p in student_model.named_parameters():
            logger.info('n: {}'.format(n))
            size += p.nelement()

        logger.info('Total parameters: {}'.format(size))
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [{
            'params': [
                p for n, p in param_optimizer
                if not any(nd in n for nd in no_decay)
            ],
            'weight_decay':
            0.01
        }, {
            'params':
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            'weight_decay':
            0.0
        }]
        schedule = 'warmup_linear'
        if not args.pred_distill:
            schedule = 'none'
        optimizer = BertAdam(optimizer_grouped_parameters,
                             schedule=schedule,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)
        # Prepare loss functions
        loss_mse = MSELoss()

        def soft_cross_entropy(predicts, targets):
            student_likelihood = torch.nn.functional.log_softmax(predicts,
                                                                 dim=-1)
            targets_prob = torch.nn.functional.softmax(targets, dim=-1)
            return (-targets_prob * student_likelihood).mean()

        # Train and evaluate
        global_step = 0
        best_dev_acc = 0.0
        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")

        for epoch_ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0.
            tr_att_loss = 0.
            tr_rep_loss = 0.
            tr_cls_loss = 0.

            student_model.train()
            nb_tr_examples, nb_tr_steps = 0, 0

            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="Iteration", ascii=True)):
                batch = tuple(t.to(device) for t in batch)

                input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch
                if input_ids.size()[0] != args.train_batch_size:
                    continue

                att_loss = 0.
                rep_loss = 0.
                cls_loss = 0.

                student_logits, student_atts, student_reps = student_model(
                    input_ids, segment_ids, input_mask, is_student=True)

                with torch.no_grad():
                    teacher_logits, teacher_atts, teacher_reps = teacher_model(
                        input_ids, segment_ids, input_mask)

                if not args.pred_distill:
                    teacher_layer_num = len(teacher_atts)
                    student_layer_num = len(student_atts)
                    assert teacher_layer_num % student_layer_num == 0
                    layers_per_block = int(teacher_layer_num /
                                           student_layer_num)
                    new_teacher_atts = [
                        teacher_atts[i * layers_per_block + layers_per_block -
                                     1] for i in range(student_layer_num)
                    ]

                    for student_att, teacher_att in zip(
                            student_atts, new_teacher_atts):
                        student_att = torch.where(
                            student_att <= -1e2,
                            torch.zeros_like(student_att).to(device),
                            student_att)
                        teacher_att = torch.where(
                            teacher_att <= -1e2,
                            torch.zeros_like(teacher_att).to(device),
                            teacher_att)

                        tmp_loss = loss_mse(student_att, teacher_att)
                        att_loss += tmp_loss

                    new_teacher_reps = [
                        teacher_reps[i * layers_per_block]
                        for i in range(student_layer_num + 1)
                    ]
                    new_student_reps = student_reps
                    for student_rep, teacher_rep in zip(
                            new_student_reps, new_teacher_reps):
                        tmp_loss = loss_mse(student_rep, teacher_rep)
                        rep_loss += tmp_loss

                    loss = rep_loss + att_loss
                    tr_att_loss += att_loss.item()
                    tr_rep_loss += rep_loss.item()
                else:
                    if output_mode == "classification":
                        cls_loss = soft_cross_entropy(
                            student_logits / args.temperature,
                            teacher_logits / args.temperature)
                    elif output_mode == "regression":
                        loss_mse = MSELoss()
                        cls_loss = loss_mse(student_logits.view(-1),
                                            label_ids.view(-1))

                    loss = cls_loss
                    tr_cls_loss += cls_loss.item()

                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                loss.backward()
                tb.add_scalar("loss", loss.item(), global_step)
                tr_loss += loss.item()
                nb_tr_examples += label_ids.size(0)
                nb_tr_steps += 1

                if (step + 1) % args.gradient_accumulation_steps == 0:
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

                if (global_step + 1) % int(
                        args.eval_step * num_train_optimization_steps) == 0:
                    logger.info("***** Running evaluation *****")
                    logger.info("  Epoch = {} iter {} step".format(
                        epoch_, global_step))
                    logger.info("  Num examples = %d", len(eval_data))
                    logger.info("  Batch size = %d", args.eval_batch_size)

                    student_model.eval()

                    loss = tr_loss / (step + 1)
                    cls_loss = tr_cls_loss / (step + 1)
                    att_loss = tr_att_loss / (step + 1)
                    rep_loss = tr_rep_loss / (step + 1)

                    result = {}
                    if args.pred_distill:
                        result = do_eval(student_model, task_name,
                                         eval_dataloader, device, output_mode,
                                         eval_labels, num_labels)
                    result['global_step'] = global_step
                    result['cls_loss'] = cls_loss
                    result['att_loss'] = att_loss
                    result['rep_loss'] = rep_loss
                    result['loss'] = loss

                    result_to_file(result, output_eval_file)

                    if not args.pred_distill:
                        save_model = True
                    else:
                        save_model = False

                        if task_name in acc_tasks and result[
                                'acc'] > best_dev_acc:
                            best_dev_acc = result['acc']
                            save_model = True

                        if task_name in corr_tasks and result[
                                'corr'] > best_dev_acc:
                            best_dev_acc = result['corr']
                            save_model = True

                        if task_name in mcc_tasks and result[
                                'mcc'] > best_dev_acc:
                            best_dev_acc = result['mcc']
                            save_model = True

                    if save_model:
                        logger.info("***** Save model *****")

                        model_to_save = student_model.module if hasattr(
                            student_model, 'module') else student_model

                        model_name = WEIGHTS_NAME
                        # if not args.pred_distill:
                        #     model_name = "step_{}_{}".format(global_step, WEIGHTS_NAME)
                        output_model_file = os.path.join(
                            args.output_dir, model_name)
                        output_config_file = os.path.join(
                            args.output_dir, CONFIG_NAME)

                        torch.save(model_to_save.state_dict(),
                                   output_model_file)
                        model_to_save.config.to_json_file(output_config_file)
                        tokenizer.save_vocabulary(args.output_dir)

                        # Test mnli-mm
                        if args.pred_distill and task_name == "mnli":
                            task_name = "mnli-mm"
                            if not os.path.exists(args.output_dir + '-MM'):
                                os.makedirs(args.output_dir + '-MM')

                            eval_data, eval_labels = get_tensor_data(
                                args, task_name, tokenizer, True, False)

                            eval_sampler = SequentialSampler(eval_data)
                            eval_dataloader = DataLoader(
                                eval_data,
                                sampler=eval_sampler,
                                batch_size=args.eval_batch_size)
                            logger.info("***** Running mm evaluation *****")
                            logger.info("  Num examples = %d", len(eval_data))
                            logger.info("  Batch size = %d",
                                        args.eval_batch_size)

                            result = do_eval(student_model, task_name,
                                             eval_dataloader, device,
                                             output_mode, eval_labels,
                                             num_labels)

                            result['global_step'] = global_step

                            tmp_output_eval_file = os.path.join(
                                args.output_dir + '-MM', "eval_results.txt")
                            result_to_file(result, tmp_output_eval_file)

                            task_name = 'mnli'

                    student_model.train()
class KDLearner(object):
    def __init__(self,
                 args,
                 device,
                 student_model,
                 teacher_model=None,
                 num_train_optimization_steps=None):
        self.args = args
        self.device = device
        self.n_gpu = torch.cuda.device_count()
        self.student_model = student_model
        self.teacher_model = teacher_model
        self.num_train_optimization_steps = num_train_optimization_steps
        self._check_params()

    def build(self, lr=None):
        self.prev_global_step = 0
        if self.args.distill_rep_attn and not self.args.distill_logit:
            stage = 'kd_stage1'
        elif self.args.distill_logit and not self.args.distill_rep_attn:
            stage = 'kd_stage2'
        elif self.args.distill_logit and self.args.distill_rep_attn:
            stage = 'kd_joint'
        else:
            stage = 'nokd'
        self.output_dir = os.path.join(self.args.output_dir, stage)
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        param_optimizer = list(self.student_model.named_parameters())
        self.clip_params = {}
        for k, v in param_optimizer:
            if 'clip_' in k:
                self.clip_params[k] = v

        # if self.args.input_quant_method == 'uniform' and self.args.restore_clip:
        #     self._restore_clip_params()
        # elif self.args.input_quant_method == 'uniform':
        #     logging.info("All clipping vals initialized at (%.4f, %.4f)" % (-self.args.clip_init_val, self.args.clip_init_val))
        # else:
        #     pass

        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {
                'params': [
                    p for n, p in param_optimizer
                    if (not any(nd in n
                                for nd in no_decay) and not 'clip_' in n)
                ],
                'weight_decay':
                self.args.weight_decay
            },
            {
                'params': [
                    p for n, p in param_optimizer
                    if (any(nd in n for nd in no_decay) and not 'clip_' in n)
                ],
                'weight_decay':
                0.0
            },
            {
                'params': [p for n, p in self.clip_params.items()],
                'lr': self.args.clip_lr,
                'weight_decay': self.args.clip_wd
            },
        ]

        schedule = 'warmup_linear'
        learning_rate = self.args.learning_rate if not lr else lr
        self.optimizer = BertAdam(optimizer_grouped_parameters,
                                  schedule=schedule,
                                  lr=learning_rate,
                                  warmup=self.args.warmup_proportion,
                                  t_total=self.num_train_optimization_steps)
        logging.info("Optimizer prepared.")
        self._check_quantized_modules()
        self._setup_grad_scale_stats()

    def _do_eval(self, model, task_name, eval_dataloader, output_mode,
                 eval_labels, num_labels):
        eval_loss = 0
        nb_eval_steps = 0
        preds = []

        for batch_ in tqdm(eval_dataloader, desc="Evaluating"):
            batch_ = tuple(t.to(self.device) for t in batch_)
            with torch.no_grad():
                input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch_

                logits, _, _ = model(input_ids, segment_ids, input_mask)

            # create eval loss and other metric required by the task
            if output_mode == "classification":
                loss_fct = CrossEntropyLoss()
                tmp_eval_loss = loss_fct(logits.view(-1, num_labels),
                                         label_ids.view(-1))
            elif output_mode == "regression":
                loss_fct = MSELoss()
                tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))

            eval_loss += tmp_eval_loss.mean().item()
            nb_eval_steps += 1
            if len(preds) == 0:
                preds.append(logits.detach().cpu().numpy())
            else:
                preds[0] = np.append(preds[0],
                                     logits.detach().cpu().numpy(),
                                     axis=0)

        eval_loss = eval_loss / nb_eval_steps

        preds = preds[0]
        if output_mode == "classification":
            preds = np.argmax(preds, axis=1)
        elif output_mode == "regression":
            preds = np.squeeze(preds)
        result = compute_metrics(task_name, preds, eval_labels.numpy())
        result['eval_loss'] = eval_loss

        return result

    def evaluate(self, task_name, eval_dataloader, output_mode, eval_labels,
                 num_labels, eval_examples, mm_eval_dataloader,
                 mm_eval_labels):
        """ Evalutaion of checkpoints from models/. directly use args.student_model """

        self.student_model.eval()
        result = self._do_eval(self.student_model, task_name, eval_dataloader,
                               output_mode, eval_labels, num_labels)

        logging.info("***** Running evaluation, Task: %s, Job_id: %s *****" %
                     (self.args.task_name, self.args.job_id))
        logging.info("  Num examples = %d", len(eval_examples))
        logging.info("  Batch size = %d", self.args.batch_size)
        logging.info("***** Eval results, Task: %s, Job_id: %s *****" %
                     (self.args.task_name, self.args.job_id))
        for key in sorted(result.keys()):
            logging.info("  %s = %s", key, str(result[key]))

        if task_name == "mnli":
            logging.info('MNLI-mm Evaluation')
            result = self._do_eval(self.student_model, 'mnli-mm',
                                   mm_eval_dataloader, output_mode,
                                   mm_eval_labels, num_labels)
            tmp_output_eval_file = os.path.join(self.args.output_dir + '-MM',
                                                "eval_results.txt")
            result_to_file(result, tmp_output_eval_file)

    def train(self, train_examples, task_name, output_mode, eval_labels,
              num_labels, train_dataloader, eval_dataloader, eval_examples,
              tokenizer, mm_eval_labels, mm_eval_dataloader):
        """ quant-aware pretraining + KD """

        # Prepare loss functions
        loss_mse = MSELoss()

        self.teacher_model.eval()
        teacher_results = self._do_eval(self.teacher_model, task_name,
                                        eval_dataloader, output_mode,
                                        eval_labels, num_labels)
        logging.info("Teacher network evaluation")
        for key in sorted(teacher_results.keys()):
            logging.info("  %s = %s", key, str(teacher_results[key]))

        self.teacher_model.train(
        )  # switch to train mode to supervise students

        # Train and evaluate
        # num_layers = self.student_model.config.num_hidden_layers + 1
        global_step = self.prev_global_step
        best_dev_acc = 0.0
        output_eval_file = os.path.join(self.args.output_dir,
                                        "eval_results.txt")

        logging.info("***** Running training, Task: %s, Job id: %s*****" %
                     (self.args.task_name, self.args.job_id))
        logging.info(" Distill rep attn: %d, Distill logit: %d" %
                     (self.args.distill_rep_attn, self.args.distill_logit))
        logging.info("  Num examples = %d", len(train_examples))
        logging.info("  Batch size = %d", self.args.batch_size)
        logging.info("  Num steps = %d", self.num_train_optimization_steps)

        global_tr_loss = 0  # record global average training loss to plot

        for epoch_ in range(self.args.num_train_epochs):

            tr_loss = 0.
            tr_att_loss = 0.
            tr_rep_loss = 0.
            tr_cls_loss = 0.

            nb_tr_examples, nb_tr_steps = 0, 0

            for step, batch in enumerate(train_dataloader):

                self.student_model.train()

                batch = tuple(t.to(self.device) for t in batch)

                input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch

                att_loss = 0.
                rep_loss = 0.
                cls_loss = 0.
                rep_loss_layerwise = []
                att_loss_layerwise = []

                student_logits, student_atts, student_reps = self.student_model(
                    input_ids, segment_ids, input_mask)

                if self.args.distill_logit or self.args.distill_rep_attn:
                    # use distillation

                    with torch.no_grad():
                        teacher_logits, teacher_atts, teacher_reps = self.teacher_model(
                            input_ids, segment_ids, input_mask)

                    # NOTE: config loss according to stage
                    loss = 0.
                    if self.args.distill_logit:
                        cls_loss = soft_cross_entropy(
                            student_logits / self.args.temperature,
                            teacher_logits / self.args.temperature)
                        loss += cls_loss
                        tr_cls_loss += cls_loss.item()

                    if self.args.distill_rep_attn:
                        for student_att, teacher_att in zip(
                                student_atts, teacher_atts):
                            student_att = torch.where(
                                student_att <= -1e2,
                                torch.zeros_like(student_att).to(self.device),
                                student_att)
                            teacher_att = torch.where(
                                teacher_att <= -1e2,
                                torch.zeros_like(teacher_att).to(self.device),
                                teacher_att)

                            tmp_loss = loss_mse(student_att, teacher_att)
                            att_loss += tmp_loss
                            att_loss_layerwise.append(tmp_loss.item())

                        for student_rep, teacher_rep in zip(
                                student_reps, teacher_reps):
                            tmp_loss = loss_mse(student_rep, teacher_rep)
                            rep_loss += tmp_loss
                            rep_loss_layerwise.append(tmp_loss.item())

                        tr_att_loss += att_loss.item()
                        tr_rep_loss += rep_loss.item()

                        loss += rep_loss + att_loss

                else:
                    if output_mode == "classification":
                        loss_fct = CrossEntropyLoss()
                        loss = loss_fct(student_logits, label_ids.view(-1))
                    elif output_mode == "regression":
                        loss_mse = MSELoss()
                        loss = loss_mse(student_logits.view(-1),
                                        label_ids.view(-1))

                if self.n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if self.args.gradient_accumulation_steps > 1:
                    loss = loss / self.args.gradient_accumulation_steps

                loss.backward()

                tr_loss += loss.item()
                global_tr_loss += loss.item()
                nb_tr_examples += label_ids.size(0)
                nb_tr_steps += 1

                # evaluation and save model
                if global_step % self.args.eval_step == 0 or \
                        global_step == len(train_dataloader)-1:

                    # logging.info("***** KDLearner %s Running evaluation, Task: %s, Job_id: %s *****" % (stage, self.args.task_name, self.args.job_id))
                    logging.info("  Epoch = {} iter {} step".format(
                        epoch_, global_step))
                    logging.info("  Num examples = %d", len(eval_examples))
                    logging.info(f"  Previous best = {best_dev_acc}")

                    loss = tr_loss / (step + 1)
                    global_avg_loss = global_tr_loss / (global_step + 1)
                    cls_loss = tr_cls_loss / (step + 1)
                    att_loss = tr_att_loss / (step + 1)
                    rep_loss = tr_rep_loss / (step + 1)

                    self.student_model.eval()
                    result = self._do_eval(self.student_model, task_name,
                                           eval_dataloader, output_mode,
                                           eval_labels, num_labels)
                    result['global_step'] = global_step
                    result['cls_loss'] = cls_loss
                    result['att_loss'] = att_loss
                    result['rep_loss'] = rep_loss
                    result['loss'] = loss
                    result['global_loss'] = global_avg_loss

                    preds = student_logits.detach().cpu().numpy()
                    train_label = label_ids.cpu().numpy()
                    if output_mode == "classification":
                        preds = np.argmax(preds, axis=1)
                    elif output_mode == "regression":
                        preds = np.squeeze(preds)
                    result['train_batch_acc'] = list(
                        compute_metrics(task_name, preds,
                                        train_label).values())[0]

                    if self.args.distill_rep_attn:
                        logging.info("embedding layer rep_loss: %.8f" %
                                     (rep_loss_layerwise[0]))
                        rep_loss_layerwise = rep_loss_layerwise[1:]
                        for lid in range(len(rep_loss_layerwise)):
                            logging.info("layer %d rep_loss: %.8f" %
                                         (lid + 1, rep_loss_layerwise[lid]))
                            logging.info("layer %d att_loss: %.8f" %
                                         (lid + 1, att_loss_layerwise[lid]))

                    result_to_file(result, output_eval_file)

                    save_model = False

                    if task_name in acc_tasks and result['acc'] > best_dev_acc:
                        best_dev_acc = result['acc']
                        save_model = True

                    if task_name in corr_tasks and result[
                            'corr'] > best_dev_acc:
                        best_dev_acc = result['corr']
                        save_model = True

                    if task_name in mcc_tasks and result['mcc'] > best_dev_acc:
                        best_dev_acc = result['mcc']
                        save_model = True

                    if save_model:
                        self._save()

                        if task_name == "mnli":
                            logging.info('MNLI-mm Evaluation')
                            result = self._do_eval(self.student_model,
                                                   'mnli-mm',
                                                   mm_eval_dataloader,
                                                   output_mode, mm_eval_labels,
                                                   num_labels)
                            result['global_step'] = global_step
                            tmp_output_eval_file = os.path.join(
                                self.output_dir + '-MM', "eval_results.txt")
                            result_to_file(result, tmp_output_eval_file)

                # if self.args.quantize_weight:
                # self.quanter.restore()

                if (step + 1) % self.args.gradient_accumulation_steps == 0:
                    self.optimizer.step()
                    self.optimizer.zero_grad()
                    global_step += 1

    def _save(self):
        logging.info("******************** Save model ********************")
        model_to_save = self.student_model.module if hasattr(
            self.student_model, 'module') else self.student_model
        output_model_file = os.path.join(self.output_dir, WEIGHTS_NAME)
        output_config_file = os.path.join(self.output_dir, CONFIG_NAME)
        torch.save(model_to_save.state_dict(), output_model_file)
        model_to_save.config.to_json_file(output_config_file)

    def _check_params(self):
        if not self.args.do_eval:
            assert self.teacher_model, 'teacher model must not be None in train mode.'

    def _check_quantized_modules(self):
        logging.info("Checking module types.")
        for k, m in self.student_model.named_modules():
            if isinstance(m, torch.nn.Linear):
                logging.info('%s: %s' % (k, str(m)))

    def _setup_grad_scale_stats(self):
        self.grad_scale_stats = {'weight': None, \
                                 'bias': None, \
                                 'layer_norm': None, \
                                 'step_size/clip_val': None}
        self.ema_grad = 0.9

    def check_grad_scale(self):
        logging.info("Check grad scale ratio: grad/w")
        for k, v in self.student_model.named_parameters():
            if v.grad is not None:
                has_grad = True
                ratio = v.grad.norm(p=2) / v.data.norm(p=2)
                # print('%.6e, %s' % (ratio.float(), k))
            else:
                has_grad = False
                logging.info('params: %s has no gradient' % k)
                continue

            # update grad_scale stats
            if 'weight' in k and v.ndimension() == 2:
                key = 'weight'
            elif 'bias' in k and v.ndimension() == 1:
                key = 'bias'
            elif 'LayerNorm' in k and 'weight' in k and v.ndimension() == 1:
                key = 'layer_norm'
            elif 'clip_' in k:
                key = 'step_size/clip_val'
            else:
                key = None

            if key and has_grad:
                if self.grad_scale_stats[key]:
                    self.grad_scale_stats[
                        key] = self.ema_grad * self.grad_scale_stats[key] + (
                            1 - self.ema_grad) * ratio
                else:
                    self.grad_scale_stats[key] = ratio

        for (key, val) in self.grad_scale_stats.items():
            if val is not None:
                logging.info('%.6e, %s' % (val, key))
Exemple #15
0
def main():
    parser = argparse.ArgumentParser()

    # Required parameters
    parser.add_argument('--pregenerated_data', type=Path, required=True)
    parser.add_argument('--teacher_model',
                        default=None,
                        type=str,
                        required=True)
    parser.add_argument('--student_model',
                        default=None,
                        type=str,
                        required=True)
    parser.add_argument('--output_dir', default=None, type=str, required=True)

    # Other parameters
    parser.add_argument(
        '--max_seq_length',
        default=128,
        type=int,
        help=
        'The maximum total input sequence length after WordPiece tokenization. \n'
        'Sequences longer than this will be truncated, and sequences shorter \n'
        'than this will be padded.',
    )

    parser.add_argument(
        '--reduce_memory',
        action='store_true',
        help=
        'Store training data as on-disc memmaps to massively reduce memory usage',
    )
    parser.add_argument(
        '--do_eval',
        action='store_true',
        help='Whether to run eval on the dev set.',
    )
    parser.add_argument(
        '--do_lower_case',
        action='store_true',
        help='Set this flag if you are using an uncased model.',
    )
    parser.add_argument(
        '--train_batch_size',
        default=32,
        type=int,
        help='Total batch size for training.',
    )
    parser.add_argument(
        '--eval_batch_size',
        default=8,
        type=int,
        help='Total batch size for eval.',
    )
    parser.add_argument(
        '--learning_rate',
        default=5e-5,
        type=float,
        help='The initial learning rate for Adam.',
    )
    parser.add_argument(
        '--weight_decay',
        '--wd',
        default=1e-4,
        type=float,
        metavar='W',
        help='weight decay',
    )
    parser.add_argument(
        '--num_train_epochs',
        default=3.0,
        type=float,
        help='Total number of training epochs to perform.',
    )
    parser.add_argument(
        '--warmup_proportion',
        default=0.1,
        type=float,
        help=
        'Proportion of training to perform linear learning rate warmup for. '
        'E.g., 0.1 = 10%% of training.',
    )
    parser.add_argument(
        '--no_cuda',
        action='store_true',
        help='Whether not to use CUDA when available',
    )
    parser.add_argument(
        '--local_rank',
        type=int,
        default=-1,
        help='local_rank for distributed training on gpus',
    )
    parser.add_argument(
        '--seed',
        type=int,
        default=42,
        help='random seed for initialization',
    )
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        'Number of updates steps to accumulate before performing a backward/update pass.',
    )
    parser.add_argument(
        '--fp16',
        action='store_true',
        help='Whether to use 16-bit float precision instead of 32-bit',
    )
    parser.add_argument(
        '--continue_train',
        action='store_true',
        help='Whether to train from checkpoints',
    )

    # Additional arguments
    parser.add_argument('--eval_step', type=int, default=1000)

    # This is used for running on Huawei Cloud.
    parser.add_argument('--data_url', type=str, default='')

    args = parser.parse_args()
    logger.info('args:{}'.format(args))

    samples_per_epoch = []
    for i in range(int(args.num_train_epochs)):
        epoch_file = args.pregenerated_data / 'epoch_{}.json'.format(i)
        metrics_file = args.pregenerated_data / 'epoch_{}_metrics.json'.format(
            i)
        if epoch_file.is_file() and metrics_file.is_file():
            metrics = json.loads(metrics_file.read_text())
            samples_per_epoch.append(metrics['num_training_examples'])
        else:
            if i == 0:
                exit('No training data was found!')
            print(
                'Warning! There are fewer epochs of pregenerated data ({}) than training epochs ({}).'
                .format(i, args.num_train_epochs))
            print(
                'This script will loop over the available data, but training diversity may be negatively impacted.'
            )
            num_data_epochs = i
            break
    else:
        num_data_epochs = args.num_train_epochs

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device('cuda' if torch.cuda.is_available()
                              and not args.no_cuda else 'cpu')
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device('cuda', args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
    )

    logger.info(
        'device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}'.
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            'Invalid gradient_accumulation_steps parameter: {}, should be >= 1'
            .format(args.gradient_accumulation_steps))

    args.train_batch_size = (args.train_batch_size //
                             args.gradient_accumulation_steps)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError(
            'Output directory ({}) already exists and is not empty.'.format(
                args.output_dir))
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    total_train_examples = 0
    for i in range(int(args.num_train_epochs)):
        # The modulo takes into account the fact that we may loop over limited epochs of data
        total_train_examples += samples_per_epoch[i % len(samples_per_epoch)]

    num_train_optimization_steps = int(total_train_examples /
                                       args.train_batch_size /
                                       args.gradient_accumulation_steps)
    if args.local_rank != -1:
        num_train_optimization_steps = (num_train_optimization_steps //
                                        torch.distributed.get_world_size())

    if args.continue_train:
        student_model = TinyBertForPreTraining.from_pretrained(
            args.student_model)
    else:
        student_model = TinyBertForPreTraining.from_scratch(args.student_model)
    teacher_model = BertModel.from_pretrained(args.teacher_model)

    # student_model = TinyBertForPreTraining.from_scratch(args.student_model, fit_size=teacher_model.config.hidden_size)
    student_model.to(device)
    teacher_model.to(device)

    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                'Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.'
            )

        teacher_model = DDP(teacher_model)
    elif n_gpu > 1:
        student_model = torch.nn.DataParallel(student_model)
        teacher_model = torch.nn.DataParallel(teacher_model)

    size = 0
    for n, p in student_model.named_parameters():
        logger.info('n: {}'.format(n))
        logger.info('p: {}'.format(p.nelement()))
        size += p.nelement()

    logger.info('Total parameters: {}'.format(size))

    # Prepare optimizer
    param_optimizer = list(student_model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {
            'params': [
                p for n, p in param_optimizer
                if not any(nd in n for nd in no_decay)
            ],
            'weight_decay':
            0.01,
        },
        {
            'params':
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            'weight_decay':
            0.0,
        },
    ]

    loss_mse = MSELoss()
    optimizer = BertAdam(
        optimizer_grouped_parameters,
        lr=args.learning_rate,
        warmup=args.warmup_proportion,
        t_total=num_train_optimization_steps,
    )

    global_step = 0
    logging.info('***** Running training *****')
    logging.info('  Num examples = {}'.format(total_train_examples))
    logging.info('  Batch size = %d', args.train_batch_size)
    logging.info('  Num steps = %d', num_train_optimization_steps)

    for epoch in trange(int(args.num_train_epochs), desc='Epoch'):
        epoch_dataset = PregeneratedDataset(
            epoch=epoch,
            training_path=args.pregenerated_data,
            tokenizer=tokenizer,
            num_data_epochs=num_data_epochs,
            reduce_memory=args.reduce_memory,
        )
        if args.local_rank == -1:
            train_sampler = RandomSampler(epoch_dataset)
        else:
            train_sampler = DistributedSampler(epoch_dataset)
        train_dataloader = DataLoader(
            epoch_dataset,
            sampler=train_sampler,
            batch_size=args.train_batch_size,
        )

        tr_loss = 0.0
        tr_att_loss = 0.0
        tr_rep_loss = 0.0
        student_model.train()
        nb_tr_examples, nb_tr_steps = 0, 0
        with tqdm(total=len(train_dataloader),
                  desc='Epoch {}'.format(epoch)) as pbar:
            for step, batch in enumerate(
                    tqdm(train_dataloader, desc='Iteration', ascii=True)):
                batch = tuple(t.to(device) for t in batch)

                input_ids, input_mask, segment_ids, lm_label_ids, is_next = (
                    batch)
                if input_ids.size()[0] != args.train_batch_size:
                    continue

                att_loss = 0.0
                rep_loss = 0.0

                student_atts, student_reps = student_model(
                    input_ids, segment_ids, input_mask)
                teacher_reps, teacher_atts, _ = teacher_model(
                    input_ids, segment_ids, input_mask)
                teacher_reps = [
                    teacher_rep.detach() for teacher_rep in teacher_reps
                ]  # speedup 1.5x
                teacher_atts = [
                    teacher_att.detach() for teacher_att in teacher_atts
                ]

                teacher_layer_num = len(teacher_atts)
                student_layer_num = len(student_atts)
                assert teacher_layer_num % student_layer_num == 0
                layers_per_block = int(teacher_layer_num / student_layer_num)
                new_teacher_atts = [
                    teacher_atts[i * layers_per_block + layers_per_block - 1]
                    for i in range(student_layer_num)
                ]

                for student_att, teacher_att in zip(student_atts,
                                                    new_teacher_atts):
                    student_att = torch.where(
                        student_att <= -1e2,
                        torch.zeros_like(student_att).to(device),
                        student_att,
                    )
                    teacher_att = torch.where(
                        teacher_att <= -1e2,
                        torch.zeros_like(teacher_att).to(device),
                        teacher_att,
                    )
                    att_loss += loss_mse(student_att, teacher_att)

                new_teacher_reps = [
                    teacher_reps[i * layers_per_block]
                    for i in range(student_layer_num + 1)
                ]
                new_student_reps = student_reps

                for student_rep, teacher_rep in zip(new_student_reps,
                                                    new_teacher_reps):
                    rep_loss += loss_mse(student_rep, teacher_rep)

                loss = att_loss + rep_loss

                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

                tr_att_loss += att_loss.item()
                tr_rep_loss += rep_loss.item()

                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                pbar.update(1)

                mean_loss = (tr_loss * args.gradient_accumulation_steps /
                             nb_tr_steps)
                mean_att_loss = (tr_att_loss *
                                 args.gradient_accumulation_steps /
                                 nb_tr_steps)
                mean_rep_loss = (tr_rep_loss *
                                 args.gradient_accumulation_steps /
                                 nb_tr_steps)

                if (step + 1) % args.gradient_accumulation_steps == 0:
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

                    if (global_step + 1) % args.eval_step == 0:
                        result = {}
                        result['global_step'] = global_step
                        result['loss'] = mean_loss
                        result['att_loss'] = mean_att_loss
                        result['rep_loss'] = mean_rep_loss
                        output_eval_file = os.path.join(
                            args.output_dir, 'log.txt')
                        with open(output_eval_file, 'a') as writer:
                            logger.info('***** Eval results *****')
                            for key in sorted(result.keys()):
                                logger.info('  %s = %s', key, str(result[key]))
                                writer.write('%s = %s\n' %
                                             (key, str(result[key])))

                        # Save a trained model
                        model_name = 'step_{}_{}'.format(
                            global_step, WEIGHTS_NAME)
                        logging.info(
                            '** ** * Saving fine-tuned model ** ** * ')
                        # Only save the model it-self
                        model_to_save = (student_model.module if hasattr(
                            student_model, 'module') else student_model)

                        output_model_file = os.path.join(
                            args.output_dir, model_name)
                        output_config_file = os.path.join(
                            args.output_dir, CONFIG_NAME)

                        torch.save(model_to_save.state_dict(),
                                   output_model_file)
                        model_to_save.config.to_json_file(output_config_file)
                        tokenizer.save_vocabulary(args.output_dir)

                        if oncloud:
                            logging.info(
                                mox.file.list_directory(args.output_dir,
                                                        recursive=True))
                            logging.info(
                                mox.file.list_directory('.', recursive=True))
                            mox.file.copy_parallel(args.output_dir,
                                                   args.data_url)
                            mox.file.copy_parallel('.', args.data_url)

            model_name = 'step_{}_{}'.format(global_step, WEIGHTS_NAME)
            logging.info('** ** * Saving fine-tuned model ** ** * ')
            model_to_save = (student_model.module if hasattr(
                student_model, 'module') else student_model)

            output_model_file = os.path.join(args.output_dir, model_name)
            output_config_file = os.path.join(args.output_dir, CONFIG_NAME)

            torch.save(model_to_save.state_dict(), output_model_file)
            model_to_save.config.to_json_file(output_config_file)
            tokenizer.save_vocabulary(args.output_dir)

            if oncloud:
                logging.info(
                    mox.file.list_directory(args.output_dir, recursive=True))
                logging.info(mox.file.list_directory('.', recursive=True))
                mox.file.copy_parallel(args.output_dir, args.data_url)
                mox.file.copy_parallel('.', args.data_url)
Exemple #16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task."
    )
    parser.add_argument("--pretrain_model_name_or_path",
                        default=None,
                        type=str,
                        help="The pretrain model name or path.")
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--domain",
                        default='all',
                        type=str,
                        required=True,
                        help="The domain of given model.")
    parser.add_argument("--use_domain_loss",
                        default=False,
                        type=bool,
                        help="Whether to use domain loss.")
    parser.add_argument("--data_portion",
                        default=1.0,
                        type=float,
                        required=False,
                        help="How many data selected.")
    parser.add_argument("--domain_loss_weight",
                        default=0.2,
                        type=float,
                        help="The loss weight of domain.")
    parser.add_argument("--use_sample_weights",
                        default=False,
                        type=bool,
                        help="The loss weight of domain.")
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model predictions and checkpoints will be written."
    )
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help=
        "Where do you want to store the pre-trained models downloaded from s3")
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--do_lower_case",
        action='store_true',
        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument('--weight_decay',
                        '--wd',
                        default=1e-4,
                        type=float,
                        metavar='W',
                        help='weight decay')
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )

    # added arguments
    parser.add_argument('--aug_train', action='store_true')
    parser.add_argument('--eval_step', type=int, default=50)
    parser.add_argument('--pred_distill', action='store_true')
    parser.add_argument('--data_url', type=str, default="")
    parser.add_argument('--temperature', type=float, default=1.)

    args = parser.parse_args()
    logger.info('The args: {}'.format(args))

    processors = {
        "mnli": MnliProcessor,
        "mnli-mm": MnliMismatchedProcessor,
        "senti": SentiProcessor
    }

    output_modes = {"mnli": "classification", "senti": "classification"}

    if args.task_name.lower() == "mnli":
        domain_idx_mapping = {
            domain: idx
            for idx, domain in enumerate(
                "telephone,government,slate,fiction,travel".split(","))
        }
    else:
        domain_idx_mapping = {
            domain: idx
            for idx, domain in enumerate("books,dvd,electronics,kitchen".split(
                ","))
        }
    num_domains = len(domain_idx_mapping)

    # intermediate distillation default parameters
    default_params = {
        "mnli": {
            "num_train_epochs": 5,
            "max_seq_length": 128
        },
        "senti": {
            "num_train_epochs": 5,
            "max_seq_length": 128
        },
    }

    acc_tasks = ["mnli", "mrpc", "sst-2", "qqp", "qnli", "rte", "senti"]
    corr_tasks = ["sts-b"]
    mcc_tasks = ["cola"]

    # Prepare devices
    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    n_gpu = torch.cuda.device_count()

    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO)

    logger.info("device: {} n_gpu: {}".format(device, n_gpu))

    # Prepare seed
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    # Prepare task settings
    # if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
    #     raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    task_name = args.task_name.lower()

    if task_name in default_params:
        args.max_seq_len = default_params[task_name]["max_seq_length"]

    if not args.do_eval:
        if task_name in default_params:
            args.num_train_epoch = default_params[task_name][
                "num_train_epochs"]

    if task_name not in processors:
        raise ValueError("Task not found: %s" % task_name)

    processor = processors[task_name](portion=args.data_portion)
    output_mode = output_modes[task_name]
    label_list = processor.get_labels()
    num_labels = len(label_list)

    tokenizer = BertTokenizer.from_pretrained(args.pretrain_model_name_or_path,
                                              do_lower_case=args.do_lower_case)

    if not args.do_eval:
        if not args.aug_train:
            train_examples = processor.get_train_examples(
                args.data_dir, args.domain)
        else:
            train_examples = processor.get_aug_examples(
                args.data_dir, args.domain)
        if args.gradient_accumulation_steps < 1:
            raise ValueError(
                "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
                .format(args.gradient_accumulation_steps))

        args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

        num_train_optimization_steps = int(
            len(train_examples) / args.train_batch_size /
            args.gradient_accumulation_steps) * args.num_train_epochs

        portion_str = "_{}".format(
            args.data_portion) if args.data_portion != 1.0 else ""
        meta_str = "meta" if args.use_domain_loss or args.use_sample_weights else ""
        cached_train_path = os.path.join(
            args.data_dir, "cached_train_features_{}{}{}{}.pt".format(
                args.domain, meta_str,
                "_with_weights" if args.use_sample_weights else "",
                portion_str))
        if os.path.exists(cached_train_path):
            train_features = torch.load(cached_train_path)
        else:
            train_features = convert_examples_to_features(
                train_examples, label_list, args.max_seq_length, tokenizer,
                output_mode, domain_idx_mapping)
            torch.save(train_features, cached_train_path)
            print("Save to cached path %s" % cached_train_path)
        train_data, _ = get_tensor_data(output_mode, train_features)
        train_sampler = RandomSampler(train_data)
        train_dataloader = DataLoader(train_data,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)

    if args.do_eval:
        eval_examples = processor.get_test_examples(args.data_dir, args.domain)
    else:
        eval_examples = processor.get_dev_examples(args.data_dir, args.domain)
    eval_features = convert_examples_to_features(eval_examples, label_list,
                                                 args.max_seq_length,
                                                 tokenizer, output_mode,
                                                 domain_idx_mapping)
    eval_data, eval_labels = get_tensor_data(output_mode, eval_features)
    eval_sampler = SequentialSampler(eval_data)
    eval_dataloader = DataLoader(eval_data,
                                 sampler=eval_sampler,
                                 batch_size=args.eval_batch_size)

    meta_teacher_model = MetaTeacherForSequenceClassification.from_pretrained(
        args.pretrain_model_name_or_path,
        num_labels=num_labels,
        num_domains=num_domains)
    meta_teacher_model.to(device)

    if args.do_eval:
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)

        meta_teacher_model.eval()
        result = do_eval(meta_teacher_model, task_name, eval_dataloader,
                         device, output_mode, eval_labels, num_labels)
        logger.info("***** Eval results *****")
        for key in sorted(result.keys()):
            logger.info("  %s = %s", key, str(result[key]))
    else:
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)
        if n_gpu > 1:
            meta_teacher_model = torch.nn.DataParallel(meta_teacher_model)
        # Prepare optimizer
        param_optimizer = list(meta_teacher_model.named_parameters())
        size = 0
        for n, p in meta_teacher_model.named_parameters():
            logger.info('n: {}'.format(n))
            size += p.nelement()

        logger.info('Total parameters: {}'.format(size))
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [{
            'params': [
                p for n, p in param_optimizer
                if not any(nd in n for nd in no_decay)
            ],
            'weight_decay':
            0.01
        }, {
            'params':
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            'weight_decay':
            0.0
        }]
        schedule = 'warmup_linear'
        optimizer = BertAdam(optimizer_grouped_parameters,
                             schedule=schedule,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)

        # Train and evaluate
        global_step = 0
        best_dev_acc = 0.0
        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
        ce_loss_fn = CrossEntropyLoss(reduction="none")

        for epoch_ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0.
            tr_cls_loss = 0.

            meta_teacher_model.train()
            nb_tr_examples, nb_tr_steps = 0, 0

            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="Iteration", ascii=True)):
                batch = tuple(t.to(device) for t in batch)

                input_ids, input_mask, segment_ids, label_ids, seq_lengths, domain_ids, sample_weights = batch
                if input_ids.size()[0] != args.train_batch_size:
                    continue

                logits, domain_logits, *_ = meta_teacher_model(
                    input_ids, segment_ids, input_mask, domain_ids)
                losses = ce_loss_fn(logits, label_ids)

                if args.use_domain_loss:
                    shuffled_domain_ids = domain_ids[torch.randperm(
                        domain_ids.shape[0])]
                    domain_losses = ce_loss_fn(domain_logits,
                                               shuffled_domain_ids)
                    losses += args.domain_loss_weight * domain_losses
                if args.use_sample_weights:
                    loss = torch.mean(losses * sample_weights)
                else:
                    loss = torch.mean(losses)

                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                loss.backward()

                tr_loss += loss.item()
                nb_tr_examples += label_ids.size(0)
                nb_tr_steps += 1

                if (step + 1) % args.gradient_accumulation_steps == 0:
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

                if (global_step + 1) % args.eval_step == 0:
                    logger.info("***** Running evaluation *****")
                    logger.info("  Epoch = {} iter {} step".format(
                        epoch_, global_step))
                    logger.info("  Num examples = %d", len(eval_examples))
                    logger.info("  Batch size = %d", args.eval_batch_size)

                    meta_teacher_model.eval()

                    loss = tr_loss / (step + 1)
                    cls_loss = tr_cls_loss / (step + 1)

                    result = do_eval(meta_teacher_model, task_name,
                                     eval_dataloader, device, output_mode,
                                     eval_labels, num_labels)
                    result['global_step'] = global_step
                    result['cls_loss'] = cls_loss
                    result['loss'] = loss

                    result_to_file(result, output_eval_file)

                    save_model = False
                    if task_name in acc_tasks and result['acc'] > best_dev_acc:
                        best_dev_acc = result['acc']
                        save_model = True

                    if task_name in corr_tasks and result[
                            'corr'] > best_dev_acc:
                        best_dev_acc = result['corr']
                        save_model = True

                    if task_name in mcc_tasks and result['mcc'] > best_dev_acc:
                        best_dev_acc = result['mcc']
                        save_model = True

                    if save_model:
                        logger.info("***** Save model *****")

                        model_to_save = meta_teacher_model.module if hasattr(meta_teacher_model, 'module') \
                            else meta_teacher_model

                        model_name = WEIGHTS_NAME
                        output_model_file = os.path.join(
                            args.output_dir, model_name)
                        output_config_file = os.path.join(
                            args.output_dir, CONFIG_NAME)

                        torch.save(model_to_save.state_dict(),
                                   output_model_file)
                        model_to_save.config.to_json_file(output_config_file)
                        tokenizer.save_vocabulary(args.output_dir)

                        if oncloud:
                            logging.info(
                                mox.file.list_directory(args.output_dir,
                                                        recursive=True))
                            logging.info(
                                mox.file.list_directory('.', recursive=True))
                            mox.file.copy_parallel(args.output_dir,
                                                   args.data_url)
                            mox.file.copy_parallel('.', args.data_url)

                    meta_teacher_model.train()
Exemple #17
0
    def set_model(self,
                  inference=False,
                  with_head=False,
                  from_path=None,
                  output_attention=False):
        self.verbose('Initializing Transformer model.')

        # uild the Transformer model with speech prediction head
        self.model_config = TransformerConfig(self.config)
        self.dr = self.model_config.downsample_rate
        self.hidden_size = self.model_config.hidden_size
        self.with_head = with_head
        self.output_attention = output_attention

        if not inference or with_head:
            self.model = TransformerForMaskedAcousticModel(
                self.model_config, self.input_dim, self.output_dim,
                self.output_attention).to(self.device)
            self.transformer = self.model.Transformer
            if self.paras.multi_gpu:
                self.model = torch.nn.DataParallel(self.model)
                self.transformer = torch.nn.DataParallel(self.transformer)
                self.verbose('Multi-GPU training Enabled: ' +
                             str(torch.cuda.device_count()))
            self.verbose('Number of parameters: ' + str(
                sum(p.numel()
                    for p in self.model.parameters() if p.requires_grad)))

        if inference and not with_head:
            self.transformer = TransformerModel(
                self.model_config, self.input_dim,
                self.output_attention).to(self.device)
            if self.paras.multi_gpu:
                self.transformer = torch.nn.DataParallel(self.transformer)
                self.verbose('Multi-GPU training Enabled: ' +
                             str(torch.cuda.device_count()))
            self.verbose('Number of parameters: ' + str(
                sum(p.numel() for p in self.transformer.parameters()
                    if p.requires_grad)))
            self.transformer.eval()
        elif inference and with_head:
            self.model.eval()
        elif not inference:
            self.model.train()

            # Setup optimizer
            param_optimizer = list(self.model.named_parameters())

            no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
            optimizer_grouped_parameters = [{
                'params': [
                    p for n, p in param_optimizer
                    if not any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.01
            }, {
                'params': [
                    p for n, p in param_optimizer
                    if any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.0
            }]

            if self.apex:
                try:
                    from apex.optimizers import FP16_Optimizer
                    from apex.optimizers import FusedAdam
                except ImportError:
                    raise ImportError(
                        "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
                    )

                optimizer = FusedAdam(optimizer_grouped_parameters,
                                      lr=self.learning_rate,
                                      bias_correction=False,
                                      max_grad_norm=1.0)
                if self.config['optimizer']['loss_scale'] == 0:
                    self.optimizer = FP16_Optimizer(optimizer,
                                                    dynamic_loss_scale=True)
                else:
                    self.optimizer = FP16_Optimizer(
                        optimizer,
                        static_loss_scale=self.config['optimizer']
                        ['loss_scale'])
                self.warmup_linear = WarmupLinearSchedule(
                    warmup=self.warmup_proportion, t_total=self.total_steps)
            else:
                self.optimizer = BertAdam(optimizer_grouped_parameters,
                                          lr=self.learning_rate,
                                          warmup=self.warmup_proportion,
                                          t_total=self.total_steps)
        else:
            raise NotImplementedError('Invalid Arguments!')

        if self.load:  # This will be set to True by default when Tester is running set_model()
            self.load_model(inference=inference,
                            with_head=with_head,
                            from_path=from_path)