def get_model_and_tokenizer(model_name, device):
    save_ckpt_path = CHECK_POINT[model_name]

    if model_name == "koelectra":
        model_name_or_path = "monologg/koelectra-base-discriminator"

        tokenizer = ElectraTokenizer.from_pretrained(model_name_or_path)
        electra_config = ElectraConfig.from_pretrained(model_name_or_path)
        model = koElectraForSequenceClassification.from_pretrained(
            pretrained_model_name_or_path=model_name_or_path,
            config=electra_config,
            num_labels=359)
    elif model_name == 'kobert':
        tokenizer = get_tokenizer()
        model = KoBERTforSequenceClassfication()

    if os.path.isfile(save_ckpt_path):
        checkpoint = torch.load(save_ckpt_path, map_location=device)
        pre_epoch = checkpoint['epoch']
        # pre_loss = checkpoint['loss']
        model.load_state_dict(checkpoint['model_state_dict'])

        print(f"load pretrain from: {save_ckpt_path}, epoch={pre_epoch}")

    return model, tokenizer
Exemplo n.º 2
0
def token_num(data_path='./data/train.jsonl'):
    data = []
    with open(data_path, 'r') as json_file:
        json_list = list(json_file)

    bert_tok = get_tokenizer()
    gpt_tok = get_kogpt2_tokenizer()

    bert_tok_num = 0
    gpt_tok_num = 0

    count = 0
    for json_str in json_list:
        json_data = json.loads(json_str)
        tmp_str = json_data['abstractive']
        # for arti_str in json_data['article_original']:
        #   tmp_str += arti_str
        bert_tok_num = max(
            bert_tok_num,
            len(bert_tok.encode(tmp_str, max_length=512, truncation=True)))
        gpt_tok_num = max(
            gpt_tok_num,
            len(gpt_tok.encode(tmp_str, max_length=512, truncation=True)))

        # print(len(json_data['article_original']))
        # sum_len += len(json_data['article_original'])
        # count += 1
    # print('average article_original len - ', sum_len/count)
    print('max bert token len:', bert_tok_num)
    print('max gpt token len:', gpt_tok_num)
Exemplo n.º 3
0
    def _load_custom_model(self, bert_name):
        custom_config = AutoConfig.from_pretrained(bert_name)
        custom_config.output_hidden_states = True
        custom_tokenizer = get_tokenizer()
        custom_model = AutoModel.from_pretrained(bert_name,
                                                 config=custom_config)

        self.model = Summarizer(custom_model=custom_model,
                                custom_tokenizer=custom_tokenizer)
Exemplo n.º 4
0
    def __init__(
            self,
            file_path="wellness_classification.txt",
            num_label=359,
            device='cpu',
            max_seq_len=512,  # KoBERT max_length
            tokenizer=None):
        self.file_path = file_path
        self.device = device
        self.data = []
        tokenizer = tokenizer or get_tokenizer()
        labels = {}

        with open(self.file_path, 'r', encoding='utf-8') as f:
            for line in f:
                line = line.rstrip()
                if not line:
                    continue
                datas = line.split('\t')
                index_of_words = tokenizer.encode(datas[0])
                token_type_ids = [0] * len(index_of_words)
                attention_mask = [1] * len(index_of_words)

                # Padding Length
                padding_length = max_seq_len - len(index_of_words)

                # Zero Padding
                index_of_words += [0] * padding_length
                token_type_ids += [0] * padding_length
                attention_mask += [0] * padding_length

                labels.setdefault(datas[1], len(labels))

                # Label
                label = int(labels[datas[1]])

                data = {
                    'input_ids': torch.tensor(index_of_words).to(self.device),
                    'token_type_ids':
                    torch.tensor(token_type_ids).to(self.device),
                    'attention_mask':
                    torch.tensor(attention_mask).to(self.device),
                    'labels': torch.tensor(label).to(self.device)
                }

                self.data.append(data)
        print(label)
        print(len(labels))
Exemplo n.º 5
0
def create_loader(data_dir, model, mode, batch_size, ratio=0.8):
    dataset = pd.read_csv(data_dir)
    if 'bert' in model:
        tokenizer = get_tokenizer()
    else:
        tokenizer = ElectraTokenizer.from_pretrained(
            "monologg/koelectra-base-v3-discriminator")
    if mode == 'train':
        data, data_label = dataset['content'], dataset['info']
        encodings = tokenizer(list(data.values), truncation=True, padding=True)
        dataset = NewsDataset(encodings, data_label)
        data_loader = DataLoader(dataset,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 num_workers=4)
        return data_loader, None
    elif mode == 'val':
        data, val, data_label, val_label = train_test_split(
            dataset['content'],
            dataset['info'],
            train_size=ratio,
            stratify=dataset['info'])
        encodings = tokenizer(list(data.values), truncation=True, padding=True)
        val_encodings = tokenizer(list(val.values),
                                  truncation=True,
                                  padding=True)
        dataset = NewsDataset(encodings, data_label)
        val_dataset = NewsDataset(val_encodings, val_label)
        data_loader = DataLoader(dataset,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 num_workers=4)
        val_loader = DataLoader(val_dataset,
                                batch_size=batch_size,
                                shuffle=True,
                                num_workers=4)
        return data_loader, val_loader
    else:
        data = dataset['content']
        encodings = tokenizer(list(data.values), truncation=True, padding=True)
        dataset = NewsDataset(encodings)
        data_loader = DataLoader(dataset,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 num_workers=4)
        return data_loader, None
Exemplo n.º 6
0
  def __init__(self,
               file_path = "../data/wellness_dialog_for_text_classification.txt",
               num_label = 359,
               device = 'cpu',
               max_seq_len = 512, # KoBERT max_length
               tokenizer = None
               ):
    self.file_path = file_path
    self.device = device
    self.data =[]
    self.tokenizer = tokenizer if tokenizer is not None else get_tokenizer()


    file = open(self.file_path, 'r', encoding='utf-8')

    while True:
      line = file.readline()
      if not line:
        break
      datas = line.split("    ")
      index_of_words = tokenizer.encode(datas[0])
      token_type_ids = [0] * len(index_of_words)
      attention_mask = [1] * len(index_of_words)

      # Padding Length
      padding_length = max_seq_len - len(index_of_words)

      # Zero Padding
      index_of_words += [0] * padding_length
      token_type_ids += [0] * padding_length
      attention_mask += [0] * padding_length

      # Label
      label = int(datas[1][:-1])

      data = {
              'input_ids': torch.tensor(index_of_words).to(self.device),
              'token_type_ids': torch.tensor(token_type_ids).to(self.device),
              'attention_mask': torch.tensor(attention_mask).to(self.device),
              'labels': torch.tensor(label).to(self.device)
             }

      self.data.append(data)

    file.close()
Exemplo n.º 7
0
    def __init__(self):
        self.root_path = '..'
        self.checkpoint_path = f"{self.root_path}/checkpoint"
        self.save_ckpt_path = f"{self.checkpoint_path}/kobert-wellnesee-text-classification.pth"
        #답변과 카테고리 불러오기
        self.category, self.answer = load_wellness_answer()

        ctx = "cuda" if torch.cuda.is_available() else "cpu"
        self.device = torch.device(ctx)

        # 저장한 Checkpoint 불러오기
        checkpoint = torch.load(self.save_ckpt_path, map_location=self.device)

        self.model = KoBERTforSequenceClassfication()
        self.model.load_state_dict(checkpoint['model_state_dict'])

        self.model.eval()

        self.tokenizer = get_tokenizer()
Exemplo n.º 8
0
 def __init__(self, config):
   """
   Constructor for EmbeddingBERTWordPhr_kor.
   
   @param self The object pointer.
   @param config Dictionary. Configuration for EmbeddingBERTWordPhr_kor
   """
   super(EmbeddingBERTWordPhr_kor, self).__init__()
   self.tokenizer = get_tokenizer()
   self.model = BertModel.from_pretrained('monologg/kobert')
   self.embed_size = 1536
   self.special_tokens = config['special_tokens']
   self.fine_tune = bool(config['train'])
   if self.fine_tune:
     self.model.train()
     self.model.requires_grad = True
   else:
     self.model.eval()
     self.model.requires_grad = False
Exemplo n.º 9
0
    def setting_similarity(standard, targets):
        # combine separated sentences to the only one sentence & setting encoding form
        def setting_encoding_form(separated_sentences_list):
            for idx, content in enumerate(separated_sentences_list):
                res = ""
                for sentence in content:
                    res += sentence + " "
                separated_sentences_list[idx] = "[CLS] " + res + "[SEP]"
            return separated_sentences_list

        # calculate similarity
        def cos_sim(A, B):
            return dot(A, B) / (norm(A) * norm(B))

        # merge data & separate from ids to contents
        merge_data = standard + targets
        ids = list(i[0] for i in merge_data)
        contents = list(i[1] for i in merge_data)

        # similarity function
        contents = setting_encoding_form(contents)
        tokenizer = get_tokenizer()
        tokenized_texts = [tokenizer.tokenize(content) for content in contents]
        input_ids = [
            tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts
        ]
        input_ids = pad_sequences(input_ids,
                                  maxlen=1000,
                                  dtype=int,
                                  truncating="post",
                                  padding="post")

        res = {}
        for i in range(1, len(input_ids)):
            similar_val = round(cos_sim(input_ids[0], input_ids[i]) * 100, 1)
            res.update({ids[i]: {'similarity': similar_val}})
        return res
Exemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_type",
                        default=None,
                        type=str,
                        required=True,
                        help="Model type selected in the list: " +
                        ", ".join(MODEL_CLASSES.keys()))
    parser.add_argument("--model_name_or_path",
                        default=None,
                        type=str,
                        required=True,
                        help="Path to pre-trained model or shortcut name")
    parser.add_argument("--prompt", type=str, default="")
    parser.add_argument("--padding_text", type=str, default="")
    parser.add_argument("--length", type=int, default=20)
    parser.add_argument("--temperature", type=float, default=1.0)
    parser.add_argument("--top_k", type=int, default=0)
    parser.add_argument("--top_p", type=float, default=0.9)
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Avoid using CUDA when available")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    args = parser.parse_args()

    args.device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    args.n_gpu = torch.cuda.device_count()

    set_seed(args)

    args.model_type = args.model_type.lower()
    model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
    # This, too, should be args-based
    #tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
    tokenizer = get_tokenizer()
    model = model_class.from_pretrained(args.model_name_or_path)
    model.to(args.device)
    model.eval()

    if args.length < 0 and model.config.max_position_embeddings > 0:
        args.length = model.config.max_position_embeddings
    elif 0 < model.config.max_position_embeddings < args.length:
        args.length = model.config.max_position_embeddings  # No generation bigger than model size
    elif args.length < 0:
        args.length = MAX_LENGTH  # avoid infinite loop

    print(args)
    while True:
        raw_text = args.prompt if args.prompt else input("Model prompt >>> ")
        if args.model_type in ["transfo-xl", "xlnet"]:
            # Models with memory likes to have a long prompt for short inputs.
            raw_text = (args.padding_text
                        if args.padding_text else PADDING_TEXT) + raw_text
        context_tokens = tokenizer.encode(raw_text)
        out = sample_sequence(
            model=model,
            context=context_tokens,
            length=args.length,
            temperature=args.temperature,
            top_k=args.top_k,
            top_p=args.top_p,
            device=args.device,
            is_xlnet=bool(args.model_type == "xlnet"),
        )
        out = out[0, len(context_tokens):].tolist()
        text = tokenizer.decode(out, clean_up_tokenization_spaces=True)
        print(text)
        if args.prompt:
            break
    return text
Exemplo n.º 11
0
def main(conf):
    # Prepare data
    train_dev = koco.load_dataset("korean-hate-speech", mode="train_dev")
    train, valid = train_dev["train"], train_dev["dev"]

    # Prepare tokenizer
    tokenizer = (
        get_tokenizer()
        if "kobert" in conf.pretrained_model
        else AutoTokenizer.from_pretrained(conf.pretrained_model)
    )
    if conf.tokenizer.register_names:
        names = pd.read_csv("entertainement_biographical_db.tsv", sep="\t")[
            "name_wo_parenthesis"
        ].tolist()
        tokenizer.add_tokens(names)

    # Mapping string y_label to integer label
    if conf.label.hate:
        train, label2idx = map_label2idx(train, "hate")
        valid, _ = map_label2idx(valid, "hate")
    elif conf.label.bias:
        train, label2idx = map_label2idx(train, "bias")
        valid, _ = map_label2idx(valid, "bias")

    # Use bias as an additional context for predicting hate
    if conf.label.hate and conf.label.bias:
        biases = ["gender", "others", "none"]
        tokenizer.add_tokens([f"<{label}>" for label in biases])

    # Prepare DataLoader
    train_dataset = KoreanHateSpeechDataset(train)
    valid_dataset = KoreanHateSpeechDataset(valid)
    collator = KoreanHateSpeechCollator(
        tokenizer, predict_hate_with_bias=(conf.label.hate and conf.label.bias)
    )
    train_loader = DataLoader(
        train_dataset,
        batch_size=conf.train_hparams.batch_size,
        shuffle=True,
        collate_fn=collator.collate,
    )
    valid_loader = DataLoader(
        valid_dataset,
        batch_size=conf.train_hparams.batch_size,
        shuffle=False,
        collate_fn=collator.collate,
    )

    # Prepare model
    set_seeds(conf.train_hparams.seed)
    model = BertForSequenceClassification.from_pretrained(
        conf.pretrained_model, num_labels=len(label2idx)
    )
    if conf.tokenizer.register_names:
        model.resize_token_embeddings(len(tokenizer))
    elif conf.label.hate and conf.label.bias:
        model.resize_token_embeddings(len(tokenizer))
    model = model.to(device)

    # Prepare optimizer and scheduler
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {
            "params": [
                p
                for n, p in model.named_parameters()
                if not any(nd in n for nd in no_decay)
            ],
            "weight_decay": 0.01,
        },
        {
            "params": [
                p
                for n, p in model.named_parameters()
                if any(nd in n for nd in no_decay)
            ],
            "weight_decay": 0.0,
        },
    ]
    optimizer = optim.AdamW(
        optimizer_grouped_parameters,
        lr=conf.train_hparams.lr,
        eps=conf.train_hparams.adam_epsilon,
    )

    n_total_iterations = len(train_loader) * conf.train_hparams.n_epochs
    n_warmup_steps = int(n_total_iterations * conf.train_hparams.warmup_ratio)
    scheduler = get_linear_schedule_with_warmup(
        optimizer, n_warmup_steps, n_total_iterations
    )

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.to(device)

    # Train!
    trainer = BertTrainer(conf.train_hparams)
    model = trainer.train(
        model, criterion, optimizer, scheduler, train_loader, valid_loader
    )

    makedirs(conf.checkpoint_dir)
    makedirs(conf.log_dir)
    checkpoint_path = f"{conf.checkpoint_dir}/{conf.model_name}.pt"
    log_path = f"{conf.log_dir}/{conf.model_name}.log"
    torch.save({"model": model.state_dict()}, checkpoint_path)
    torch.save({"config": conf, "classes": label2idx, "tokenizer": tokenizer}, log_path)
Exemplo n.º 12
0
    def __init__(
            self,
            data_path='./data/train.jsonl',
            num_label=2,  # 추출할것과 추출하지 않을 것들로
            device='cpu',
            max_seq_len=512,  # KoBERT max_length
    ):
        self.device = device
        self.data = []
        self.tokenizer = get_tokenizer()

        cls_token_id = self.tokenizer.cls_token_id  # [CLS]
        sep_token_id = self.tokenizer.sep_token_id  # [SEP]
        pad_token_id = self.tokenizer.pad_token_id  # [PAD]

        jsonl_datas = jsonl_load(data_path=data_path)
        # for dict_data in jsonl_datas:
        for dict_data in tqdm(jsonl_datas):
            articles = dict_data['article_original']
            extractive_indices = dict_data['extractive']

            index_of_words = None
            token_type_ids = None
            label = None
            token_num = None

            token_type_state = False

            for idx in range(len(articles)):
                label_state = True if idx in extractive_indices else False

                if idx == 0:  # 맨 처음 문장인 경우
                    index_of_words = [cls_token_id]
                    token_type_ids = [int(token_type_state)]
                    label = [int(label_state)]
                    token_num = 1

                article = articles[idx]
                tmp_index = self.tokenizer.encode(article,
                                                  add_special_tokens=False)
                num_tmp_index = len(tmp_index) + 1

                if token_num + num_tmp_index <= max_seq_len:
                    index_of_words += tmp_index + [sep_token_id]
                    token_type_ids += [int(token_type_state)] * num_tmp_index

                    label += [int(label_state)] * num_tmp_index
                    token_num += num_tmp_index
                    token_type_state = not token_type_state

                if token_num + num_tmp_index > max_seq_len or idx == len(
                        articles) - 1:
                    # attention mask
                    attention_mask = [1] * token_num

                    # Padding Length
                    padding_length = max_seq_len - token_num

                    # Padding
                    index_of_words += [pad_token_id
                                       ] * padding_length  # [PAD] padding
                    token_type_ids += [
                        token_type_state
                    ] * padding_length  # last token_type_state padding
                    attention_mask += [0] * padding_length  # zero padding

                    # Label Zero Padding
                    label += [0] * padding_length

                    # Data Append
                    data = {
                        'input_ids':
                        torch.tensor(index_of_words).to(self.device),
                        'token_type_ids':
                        torch.tensor(token_type_ids).to(self.device),
                        'attention_mask':
                        torch.tensor(attention_mask).to(self.device),
                        'labels':
                        torch.tensor(label).to(self.device)
                    }
                    self.data.append(data)

                    # Data Initialization
                    index_of_words = [cls_token_id]
                    token_type_ids = [int(token_type_state)]
                    label = [int(label_state)]
                    token_num = 1
                    token_type_state = False
Exemplo n.º 13
0
    target = []
    valid_length = []
    for s in sentence:
        s = "[CLS] " + s
        input_ids = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(s))
        valid_length.append(len(input_ids))
        target_ids = [1] * 32
        target_ids[:len(input_ids)] = input_ids
        target.append(target_ids)
    return target, valid_length


from kobert_transformers import get_kobert_model
from kobert_transformers import get_tokenizer

tokenizer = get_tokenizer()
input_ids, valid_length = gen_input_ids(
    tokenizer=tokenizer, sentence=["한국어 모델을 공유합니다.", "두번째 문장입니다."])

model = get_kobert_model()
model.eval()

input_ids = torch.LongTensor(input_ids)
attention_mask = gen_attention_mask(input_ids, valid_length)
attention_mask = torch.LongTensor(attention_mask)
token_type_ids = torch.zeros_like(input_ids)
sequence_output, pooled_output = model(input_ids, attention_mask,
                                       token_type_ids)

pooled_output
Exemplo n.º 14
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--train_data_file",
                        default=None,
                        type=str,
                        required=True,
                        help="The input training data file (a text file).")
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model predictions and checkpoints will be written."
    )

    ## Other parameters
    parser.add_argument(
        "--eval_data_file",
        default=None,
        type=str,
        help=
        "An optional input evaluation data file to evaluate the perplexity on (a text file)."
    )

    parser.add_argument("--model_type",
                        default="bert",
                        type=str,
                        help="The model architecture to be fine-tuned.")
    parser.add_argument(
        "--model_name_or_path",
        default="bert-base-cased",
        type=str,
        help="The model checkpoint for weights initialization.")

    parser.add_argument(
        "--mlm",
        action='store_true',
        help=
        "Train with masked-language modeling loss instead of language modeling."
    )
    parser.add_argument(
        "--mlm_probability",
        type=float,
        default=0.15,
        help="Ratio of tokens to mask for masked language modeling loss")

    parser.add_argument(
        "--config_name",
        default="",
        type=str,
        help=
        "Optional pretrained config name or path if not the same as model_name_or_path"
    )
    parser.add_argument(
        "--tokenizer_name",
        default="",
        type=str,
        help=
        "Optional pretrained tokenizer name or path if not the same as model_name_or_path"
    )
    parser.add_argument("--tokenizer_class",
                        default="",
                        type=str,
                        help="Optional pretrained tokenizer clas")
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help=
        "Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)"
    )
    parser.add_argument(
        "--block_size",
        default=-1,
        type=int,
        help="Optional input sequence length after tokenization."
        "The training dataset will be truncated in block of this size for training."
        "Default to the model max input length for single sentence inputs (take into account special tokens)."
    )
    parser.add_argument("--do_train",
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--evaluate_during_training",
        action='store_true',
        help="Run evaluation during training at each logging step.")
    parser.add_argument('--eval_steps',
                        type=int,
                        default=100,
                        help="Evaluate every X updates steps.")
    parser.add_argument(
        "--do_lower_case",
        action='store_true',
        help="Set this flag if you are using an uncased model.")

    parser.add_argument("--per_gpu_train_batch_size",
                        default=4,
                        type=int,
                        help="Batch size per GPU/CPU for training.")
    parser.add_argument("--per_gpu_eval_batch_size",
                        default=4,
                        type=int,
                        help="Batch size per GPU/CPU for evaluation.")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--weight_decay",
                        default=0.0,
                        type=float,
                        help="Weight deay if we apply some.")
    parser.add_argument("--adam_epsilon",
                        default=1e-6,
                        type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm",
                        default=1.0,
                        type=float,
                        help="Max gradient norm.")
    parser.add_argument("--num_train_epochs",
                        default=1.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
        help=
        "If > 0: set total number of training steps to perform. Override num_train_epochs."
    )
    parser.add_argument("--warmup_samples",
                        default=0,
                        type=int,
                        help="Linear warmup over warmup_samples.")
    parser.add_argument("--lr_decay",
                        action='store_true',
                        help="Decay LR using get_linear_schedule_with_warmup.")

    parser.add_argument(
        "--unfreeze_level",
        default=-1,
        type=int,
        help="If > 0: freeze all layers except few first and last.")

    parser.add_argument('--logging_steps',
                        type=int,
                        default=50,
                        help="Log every X updates steps.")
    parser.add_argument('--save_steps',
                        type=int,
                        default=50,
                        help="Save checkpoint every X updates steps.")
    parser.add_argument(
        '--save_total_limit',
        type=int,
        default=None,
        help=
        'Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default'
    )
    parser.add_argument(
        "--eval_all_checkpoints",
        action='store_true',
        help=
        "Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number"
    )
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Avoid using CUDA when available")
    parser.add_argument('--overwrite_output_dir',
                        action='store_true',
                        help="Overwrite the content of the output directory")
    parser.add_argument(
        '--overwrite_cache',
        action='store_true',
        help="Overwrite the cached training and evaluation sets")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")

    parser.add_argument(
        '--fp16',
        action='store_true',
        help=
        "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"
    )
    parser.add_argument(
        '--fp16_opt_level',
        type=str,
        default='O1',
        help=
        "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
        "See details at https://nvidia.github.io/apex/amp.html")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="For distributed training: local_rank")
    parser.add_argument('--server_ip',
                        type=str,
                        default='',
                        help="For distant debugging.")
    parser.add_argument('--server_port',
                        type=str,
                        default='',
                        help="For distant debugging.")
    args = parser.parse_args()

    if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm:
        raise ValueError(
            "BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
            "flag (masked language modeling).")
    if args.eval_data_file is None and args.do_eval:
        raise ValueError(
            "Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
            "or remove the --do_eval argument.")

    if os.path.exists(args.output_dir) and os.listdir(
            args.output_dir
    ) and args.do_train and not args.overwrite_output_dir:
        raise ValueError(
            f"Output directory ({args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
        )

    # Setup distant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port),
                            redirect_output=True)
        ptvsd.wait_for_attach()

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        args.n_gpu = torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend='nccl')
        args.n_gpu = 1
    args.device = device

    # Setup logging
    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
    logger.warning(
        "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
        args.local_rank, args.device, args.n_gpu, bool(args.local_rank != -1),
        args.fp16)

    # Set seed
    set_seed(args)

    # Load pretrained model and tokenizer
    if args.local_rank not in [-1, 0]:
        torch.distributed.barrier(
        )  # Barrier to make sure only the first process in distributed training download model & vocab

    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
    config = config_class.from_pretrained(
        args.config_name if args.config_name else args.model_name_or_path)
    #if args.tokenizer_class: tokenizer_class = globals()[args.tokenizer_class]
    #tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
    # Okay, okay, I know, will be selectable from commandline in some future
    tokenizer = get_tokenizer()
    if args.block_size <= 0:
        args.block_size = tokenizer.max_len_single_sentence  # Our input block size will be the max possible for the model
    args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
    model = model_class.from_pretrained(
        args.model_name_or_path,
        from_tf=bool('.ckpt' in args.model_name_or_path),
        config=config)
    model.to(args.device)

    print(200 * '/')
    print(
        len([
            param for item in flatten_model(model)
            for param in item.parameters() if param.requires_grad
        ]))  # freeze all layers but few first and last
    if args.unfreeze_level >= 0:
        flat = flatten_model(model)
        flat = [item for item in flat if list(item.parameters())]
        i_start = 3
        i_end = 1
        need_grads = set(flat[:i_start + args.unfreeze_level * 3]) | set(
            flat[-(i_end + args.unfreeze_level * 3):])
        for item in flat:
            requires_grad(item, item in need_grads)
        print(200 * '/')
        print(
            len([
                param for item in flatten_model(model)
                for param in item.parameters() if param.requires_grad
            ]))

    if args.local_rank == 0:
        torch.distributed.barrier(
        )  # End of barrier to make sure only the first process in distributed training download model & vocab

    logger.info("Training/evaluation parameters %s", args)

    # Training
    if args.do_train:
        if args.local_rank not in [-1, 0]:
            torch.distributed.barrier(
            )  # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache

        train_dataset = load_and_cache_examples(args,
                                                tokenizer,
                                                evaluate=False)

        if args.local_rank == 0:
            torch.distributed.barrier()

        global_step, tr_loss = train(args, train_dataset, model, tokenizer)
        logger.info(" global_step = %s, average loss = %s", global_step,
                    tr_loss)

    # Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained()
    if args.do_train and (args.local_rank == -1
                          or torch.distributed.get_rank() == 0):
        save_state(args, model, tokenizer, global_step)

        # Load a trained model and vocabulary that you have fine-tuned
        model = model_class.from_pretrained(args.output_dir)
        tokenizer = tokenizer_class.from_pretrained(
            args.output_dir, do_lower_case=args.do_lower_case)
        model.to(args.device)

    # Evaluation
    results = {}
    if args.do_eval and args.local_rank in [-1, 0]:
        checkpoints = [args.output_dir]
        if args.eval_all_checkpoints:
            checkpoints = list(
                os.path.dirname(c) for c in sorted(
                    glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME,
                              recursive=True)))
            logging.getLogger("transformers.modeling_utils").setLevel(
                logging.WARN)  # Reduce logging
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
        for checkpoint in checkpoints:
            global_step = checkpoint.split(
                '-')[-1] if len(checkpoints) > 1 else ""
            model = model_class.from_pretrained(checkpoint)
            model.to(args.device)
            result = evaluate(args, model, tokenizer, prefix=global_step)
            result = dict(
                (k + '_{}'.format(global_step), v) for k, v in result.items())
            results.update(result)

    return results
Exemplo n.º 15
0
 def __init__(self):
     self.tokenizer = get_tokenizer()
     self.token2idx = self.tokenizer.token2idx
     self.idx2token = {v: k for k, v in self.token2idx.items()}