Example #1
0
 def test_TokenClassifier_module_onnx_export(self):
     t_class = nemo_nlp.TokenClassifier(hidden_size=512, num_classes=16, use_transformer_pretrained=False)
     self.__test_export_route(
         module=t_class,
         out_name="t_class.onnx",
         mode=nemo.core.DeploymentFormat.ONNX,
         input_example=torch.randn(16, 16, 512).cuda(),
     )
Example #2
0
pretrained_bert_model = nemo_nlp.huggingface.BERT(
    pretrained_model_name=args.pretrained_bert_model)
hidden_size = pretrained_bert_model.local_parameters["hidden_size"]
tokenizer = NemoBertTokenizer(args.pretrained_bert_model)

data_layer = nemo_nlp.BertTokenClassificationInferDataLayer(
    queries=args.queries,
    tokenizer=tokenizer,
    max_seq_length=args.max_seq_length,
    batch_size=1,
)

punct_classifier = nemo_nlp.TokenClassifier(
    hidden_size=hidden_size,
    num_classes=len(punct_labels_dict),
    dropout=args.fc_dropout,
    num_layers=args.punct_num_fc_layers,
    name='Punctuation',
)

capit_classifier = nemo_nlp.TokenClassifier(
    hidden_size=hidden_size,
    num_classes=len(capit_labels_dict),
    dropout=args.fc_dropout,
    name='Capitalization',
)

input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask = data_layer()

hidden_states = pretrained_bert_model(
    input_ids=input_ids,
Example #3
0
    def test_squad_v1(self):
        version_2_with_negative = False
        pretrained_bert_model = 'bert-base-uncased'
        batch_size = 3
        data_dir = os.path.abspath(
            os.path.join(os.path.dirname(__file__), '../data/nlp/squad/v1.1'))
        max_query_length = 64
        max_seq_length = 384
        doc_stride = 128
        max_steps = 100
        lr_warmup_proportion = 0
        eval_step_freq = 50
        lr = 3e-6
        do_lower_case = True
        n_best_size = 5
        max_answer_length = 20
        null_score_diff_threshold = 0.0

        tokenizer = nemo_nlp.NemoBertTokenizer(pretrained_bert_model)
        neural_factory = nemo.core.NeuralModuleFactory(
            backend=nemo.core.Backend.PyTorch,
            local_rank=None,
            create_tb_writer=False,
        )
        model = nemo_nlp.huggingface.BERT(
            pretrained_model_name=pretrained_bert_model)
        hidden_size = model.local_parameters["hidden_size"]
        qa_head = nemo_nlp.TokenClassifier(
            hidden_size=hidden_size,
            num_classes=2,
            num_layers=1,
            log_softmax=False,
        )
        squad_loss = nemo_nlp.QuestionAnsweringLoss()

        data_layer = nemo_nlp.BertQuestionAnsweringDataLayer(
            mode='train',
            version_2_with_negative=version_2_with_negative,
            batch_size=batch_size,
            tokenizer=tokenizer,
            data_dir=data_dir,
            max_query_length=max_query_length,
            max_seq_length=max_seq_length,
            doc_stride=doc_stride,
        )

        (
            input_ids,
            input_type_ids,
            input_mask,
            start_positions,
            end_positions,
            _,
        ) = data_layer()

        hidden_states = model(
            input_ids=input_ids,
            token_type_ids=input_type_ids,
            attention_mask=input_mask,
        )

        qa_output = qa_head(hidden_states=hidden_states)
        loss, _, _ = squad_loss(
            logits=qa_output,
            start_positions=start_positions,
            end_positions=end_positions,
        )

        data_layer_eval = nemo_nlp.BertQuestionAnsweringDataLayer(
            mode='dev',
            version_2_with_negative=version_2_with_negative,
            batch_size=batch_size,
            tokenizer=tokenizer,
            data_dir=data_dir,
            max_query_length=max_query_length,
            max_seq_length=max_seq_length,
            doc_stride=doc_stride,
        )
        (
            input_ids_eval,
            input_type_ids_eval,
            input_mask_eval,
            start_positions_eval,
            end_positions_eval,
            unique_ids_eval,
        ) = data_layer_eval()

        hidden_states_eval = model(
            input_ids=input_ids_eval,
            token_type_ids=input_type_ids_eval,
            attention_mask=input_mask_eval,
        )

        qa_output_eval = qa_head(hidden_states=hidden_states_eval)
        _, start_logits_eval, end_logits_eval = squad_loss(
            logits=qa_output_eval,
            start_positions=start_positions_eval,
            end_positions=end_positions_eval,
        )
        eval_output = [start_logits_eval, end_logits_eval, unique_ids_eval]

        callback_train = nemo.core.SimpleLossLoggerCallback(
            tensors=[loss],
            print_func=lambda x: print("Loss: {:.3f}".format(x[0].item())),
            get_tb_values=lambda x: [["loss", x[0]]],
            step_freq=10,
            tb_writer=neural_factory.tb_writer,
        )

        callbacks_eval = nemo.core.EvaluatorCallback(
            eval_tensors=eval_output,
            user_iter_callback=lambda x, y: eval_iter_callback(x, y),
            user_epochs_done_callback=lambda x: eval_epochs_done_callback(
                x,
                eval_data_layer=data_layer_eval,
                do_lower_case=do_lower_case,
                n_best_size=n_best_size,
                max_answer_length=max_answer_length,
                version_2_with_negative=version_2_with_negative,
                null_score_diff_threshold=null_score_diff_threshold,
            ),
            tb_writer=neural_factory.tb_writer,
            eval_step=eval_step_freq,
        )

        lr_policy_fn = get_lr_policy(
            'WarmupAnnealing',
            total_steps=max_steps,
            warmup_ratio=lr_warmup_proportion,
        )

        neural_factory.train(
            tensors_to_optimize=[loss],
            callbacks=[callback_train, callbacks_eval],
            lr_policy=lr_policy_fn,
            optimizer='adam_w',
            optimization_params={
                "max_steps": max_steps,
                "lr": lr
            },
        )
Example #4
0
labels_dict = get_vocab(args.labels_dict)

""" Load the pretrained BERT parameters
See the list of pretrained models, call:
nemo_nlp.huggingface.BERT.list_pretrained_models()
"""
pretrained_bert_model = nemo_nlp.huggingface.BERT(pretrained_model_name=args.pretrained_bert_model)
hidden_size = pretrained_bert_model.local_parameters["hidden_size"]
tokenizer = NemoBertTokenizer(args.pretrained_bert_model)

data_layer = nemo_nlp.BertTokenClassificationInferDataLayer(
    queries=args.queries, tokenizer=tokenizer, max_seq_length=args.max_seq_length, batch_size=1,
)

classifier = nemo_nlp.TokenClassifier(hidden_size=hidden_size, num_classes=len(labels_dict), dropout=args.fc_dropout,)

input_ids, input_type_ids, input_mask, _, subtokens_mask = data_layer()

hidden_states = pretrained_bert_model(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask,)
logits = classifier(hidden_states=hidden_states)

###########################################################################

# Instantiate an optimizer to perform `infer` action
evaluated_tensors = nf.infer(tensors=[logits, subtokens_mask], checkpoint_dir=args.work_dir,)


def concatenate(lists):
    return np.concatenate([t.cpu() for t in lists])
Example #5
0
encoder = nemo_nlp.TransformerEncoderNM(
    d_model=args.d_model,
    d_inner=args.d_inner,
    num_layers=args.num_layers,
    embedding_dropout=args.embedding_dropout,
    num_attn_heads=args.num_attn_heads,
    ffn_dropout=args.ffn_dropout,
    vocab_size=vocab_size,
    mask_future=True,
    attn_score_dropout=args.attn_score_dropout,
    attn_layer_dropout=args.attn_layer_dropout,
    max_seq_length=args.max_seq_length,
)

log_softmax = nemo_nlp.TokenClassifier(args.d_model, num_classes=vocab_size, num_layers=1, log_softmax=True)

loss = nemo_nlp.PaddedSmoothedCrossEntropyLossNM(pad_id=tokenizer.pad_id(), label_smoothing=args.label_smoothing)

# tie weight of embedding and log_softmax layers
log_softmax.mlp.last_linear_layer.weight = encoder.embedding_layer.token_embedding.weight


def create_pipeline(
    dataset, max_seq_length=args.max_seq_length, batch_step=args.max_seq_length, batch_size=args.batch_size,
):
    data_layer = nemo_nlp.LanguageModelingDataLayer(
        dataset, tokenizer, max_seq_length, batch_step, batch_size=batch_size
    )
    src, src_mask, labels = data_layer()
    src_hiddens = encoder(input_ids=src, input_mask_src=src_mask)
Example #6
0
File: squad.py Project: vsl9/NeMo
    if args.bert_config is not None:
        with open(args.bert_config) as json_file:
            config = json.load(json_file)
        model = nemo_nlp.huggingface.BERT(**config)
    else:
        """ Use this if you're using a standard BERT model.
        To see the list of pretrained models, call:
        nemo_nlp.huggingface.BERT.list_pretrained_models()
        """
        model = nemo_nlp.huggingface.BERT(
            pretrained_model_name=args.pretrained_bert_model)

    hidden_size = model.local_parameters["hidden_size"]

    qa_head = nemo_nlp.TokenClassifier(hidden_size=hidden_size,
                                       num_classes=2,
                                       num_layers=1,
                                       log_softmax=False)
    squad_loss = nemo_nlp.QuestionAnsweringLoss()
    if args.bert_checkpoint is not None:
        model.restore_from(args.bert_checkpoint)

    if not args.evaluation_only:
        train_loss, train_steps_per_epoch, _, _ = create_pipeline(
            data_dir=args.data_dir,
            model=model,
            head=qa_head,
            loss_fn=squad_loss,
            max_query_length=args.max_query_length,
            max_seq_length=args.max_seq_length,
            doc_stride=args.doc_stride,
            batch_size=args.batch_size,