コード例 #1
0
def test_sit_auto_mix_precision_model_o0():
    input_data = np.random.randn(32, 3, 224, 224).astype(np.float32)
    dataset1 = FakeData(size=32,
                        batch_size=32,
                        image_size=(3, 224, 224),
                        num_classes=10,
                        fakedata_mode=FakeDataInitMode.OnesInit)
    dataset1.set_label_data_type(np.float16)
    # graph mode
    context.set_context(mode=context.GRAPH_MODE)
    context.set_context(save_graphs=True, save_graphs_path='./test_amp_o0')
    net = Net(3, 10)
    net.to_float(dtype.float16)
    opt = nn.Momentum(params=net.trainable_params(),
                      learning_rate=0.001,
                      momentum=0.0009)
    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
    model = Model(net, loss, opt, amp_level="O0")
    model.train(1, dataset1, dataset_sink_mode=False)
    contend = read_validateir_file('./test_amp_o0')
    castnum = re.findall("Cast", contend)
    assert len(castnum) == 17
    model.predict(Tensor(input_data))
    contend = read_validateir_file('./test_amp_o0')
    castnum = re.findall("Cast", contend)
    assert len(castnum) == 11
コード例 #2
0
def test_sit_auto_mix_precision_model_o2():
    input_data = np.random.randn(32, 3, 224, 224).astype(np.float32)
    dataset1 = FakeData(size=32,
                        batch_size=32,
                        image_size=(3, 224, 224),
                        num_classes=10,
                        fakedata_mode=FakeDataInitMode.OnesInit)
    dataset2 = FakeData(size=32,
                        batch_size=32,
                        image_size=(3, 224, 224),
                        num_classes=10,
                        fakedata_mode=FakeDataInitMode.OnesInit)
    # graph mode
    context.set_context(mode=context.GRAPH_MODE)
    context.set_context(save_graphs=True, save_graphs_path='./test_amp_o2')
    net = Net(3, 10)
    opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.001, momentum=0.0009)
    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
    model = Model(net, loss, opt, amp_level="O2")
    model.train(1, dataset1, dataset_sink_mode=False)
    contend = read_validateir_file('./test_amp_o2')
    castnum = re.findall("Cast", contend)
    assert len(castnum) == 14
    clean_all_ir_files('./test_amp_o2')
    out_graph = model.predict(Tensor(input_data))

    # pynative mode
    context.set_context(mode=context.PYNATIVE_MODE)
    net_pynative = Net(3, 10)
    opt_pynative = nn.Momentum(params=net_pynative.trainable_params(), learning_rate=0.001, momentum=0.0009)
    loss_pynative = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
    model_pynative = Model(net_pynative, loss_pynative, opt_pynative, amp_level="O2")
    model_pynative.train(1, dataset2, dataset_sink_mode=False)
    out_pynative = model_pynative.predict(Tensor(input_data))
    allclose_nparray(out_graph.asnumpy(), out_pynative.asnumpy(), 0.001, 0.001)
コード例 #3
0
def test_train_32k_8p(batch_size=32, num_classes=32768):
    dev_num = 8
    context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL,
                                      device_num=dev_num)
    set_algo_parameters(elementwise_op_strategy_follow=True)
    np.random.seed(6)
    input_np = Tensor(np.ones([batch_size, 3, 224, 224]).astype(np.float32))
    net = resnet50(num_classes)
    model = Model(net)
    model.predict(input_np)
コード例 #4
0
ファイル: dataset.py プロジェクト: huxian123/mindspore
def extract_features(net, dataset_path, config):
    features_folder = dataset_path + '_features'
    if not os.path.exists(features_folder):
        os.makedirs(features_folder)
    dataset = create_dataset(dataset_path=dataset_path,
                             do_train=False,
                             config=config,
                             repeat_num=1)
    step_size = dataset.get_dataset_size()
    pbar = tqdm(list(dataset.create_dict_iterator(output_numpy=True)))
    model = Model(net)
    i = 0
    for data in pbar:
        features_path = os.path.join(features_folder, f"feature_{i}.npy")
        label_path = os.path.join(features_folder, f"label_{i}.npy")
        if not (os.path.exists(features_path) and os.path.exists(label_path)):
            image = data["image"]
            label = data["label"]
            features = model.predict(Tensor(image))
            np.save(features_path, features.asnumpy())
            np.save(label_path, label)
        pbar.set_description("Process dataset batch: %d" % (i + 1))
        i += 1

    return step_size
コード例 #5
0
def me_un_seg_sum(input_, indices, num_segments):
    context.set_context(mode=context.GRAPH_MODE)
    net = Net(num_segments)
    net.set_train()
    model = Model(net)
    out = model.predict(Tensor(input_), Tensor(indices))
    return out.asnumpy()
コード例 #6
0
def do_eval(dataset=None, network=None, metric=None, load_checkpoint_path="", eval_type=None):
    """
    Do eval
    Args:
        dataset: the eval dataset.
        network:  the network with loss.
        metric: the evaluation method.
        load_checkpoint_path: the file path which saved finetuned model checkpoint.
        eval_type:
    """
    if load_checkpoint_path == "":
        raise ValueError("Finetune model missed, evaluation task must load finetune model!")

    if metric.lower() == "ppl":
        print("Prepare to calculate the ppl score ...")
        gpt2_loss = network(config=gpt2_net_cfg,
                            is_training=True,
                            use_one_hot_embeddings=False)
        gpt2_loss.set_train(False)
        param_dict = load_checkpoint(load_checkpoint_path)

        if eval_type == "zero-shot":
            final_param_dict = {}
            for name, _ in param_dict.items():
                final_param_dict['gpt2.gpt2.' + name] = param_dict[name]
            final_param_dict['gpt2.dense1.weight'] = param_dict['gpt2_embedding_lookup.embedding_table']
            load_param_into_net(gpt2_loss, final_param_dict)
            print("load pretrained parameter successfully!\n")
        elif eval_type == "finetuned":
            load_param_into_net(gpt2_loss, param_dict)
            print("load finetuned parameter successfully!\n")
        else:
            raise ValueError("Evaluation type missed, eval_type should be [zero-shot, finetuned]")

        model = Model(gpt2_loss)
        columns_list = ["input_ids", "input_mask", "label_ids"]
        print("==================== [PPL] Testing ====================")
        num_data = 1
        total_loss = 0.0
        avg_loss = 0.0
        for data in dataset.create_dict_iterator():
            input_data = []
            for i in columns_list:
                input_data.append(data[i])
            input_ids, input_mask, label_ids = input_data
            loss = model.predict(input_ids, input_mask, label_ids)
            loss = float(loss.asnumpy())
            total_loss += loss
            avg_loss = float(total_loss / num_data)
            print(" | Current Loss: {:.6f}".format(avg_loss))
            print(" | Current PPL: {}\n\n".format(math.exp(avg_loss)))
            num_data += 1

        print("\n\n")
        print("**************************************************************")
        print("Average Loss: {:.6f}".format(avg_loss))
        print("Average PPL: {:.6f}".format(math.exp(avg_loss)))
        print("********************** Testing Finished **********************")
    else:
        raise ValueError("metric method not supported, support: [ppl]")
コード例 #7
0
def extract_features(net, dataset_path, config):
    features_folder = dataset_path + '_features'
    if not os.path.exists(features_folder):
        os.makedirs(features_folder)
    dataset = create_dataset(dataset_path=dataset_path,
                             do_train=False,
                             config=config)
    step_size = dataset.get_dataset_size()
    if step_size == 0:
        raise ValueError(
            "The step_size of dataset is zero. Check if the images count of train dataset is more \
            than batch_size in config.py")

    model = Model(net)

    for i, data in enumerate(dataset.create_dict_iterator(output_numpy=True)):
        features_path = os.path.join(features_folder, f"feature_{i}.npy")
        label_path = os.path.join(features_folder, f"label_{i}.npy")
        if not os.path.exists(features_path) or not os.path.exists(label_path):
            image = data["image"]
            label = data["label"]
            features = model.predict(Tensor(image))
            np.save(features_path, features.asnumpy())
            np.save(label_path, label)
        print(f"Complete the batch {i+1}/{step_size}")
    return step_size
コード例 #8
0
def do_eval(dataset=None, network=None, num_class=2, assessment_method="accuracy", load_checkpoint_path=""):
    """ do eval """
    if load_checkpoint_path == "":
        raise ValueError("Finetune model missed, evaluation task must load finetune model!")
    net_for_pretraining = network(bert_net_cfg, False, num_class)
    net_for_pretraining.set_train(False)
    param_dict = load_checkpoint(load_checkpoint_path)
    load_param_into_net(net_for_pretraining, param_dict)
    model = Model(net_for_pretraining)

    if assessment_method == "accuracy":
        callback = Accuracy()
    elif assessment_method == "f1":
        callback = F1(False, num_class)
    elif assessment_method == "mcc":
        callback = MCC()
    elif assessment_method == "spearman_correlation":
        callback = Spearman_Correlation()
    else:
        raise ValueError("Assessment method not supported, support: [accuracy, f1, mcc, spearman_correlation]")

    columns_list = ["input_ids", "input_mask", "segment_ids", "label_ids"]
    for data in dataset.create_dict_iterator():
        input_data = []
        for i in columns_list:
            input_data.append(Tensor(data[i]))
        input_ids, input_mask, token_type_id, label_ids = input_data
        logits = model.predict(input_ids, input_mask, token_type_id, label_ids)
        callback.update(logits, label_ids)
    print("==============================================================")
    eval_result_print(assessment_method, callback)
    print("==============================================================")
コード例 #9
0
def run_fasttext_infer():
    """run infer with FastText"""
    dataset = load_infer_dataset(batch_size=config.batch_size, datafile=args.data_path)
    fasttext_model = FastText(config.vocab_size, config.embedding_dims, config.num_class)

    parameter_dict = load_checkpoint(args.model_ckpt)
    load_param_into_net(fasttext_model, parameter_dict=parameter_dict)

    ft_infer = FastTextInferCell(fasttext_model)

    model = Model(ft_infer)

    predictions = []
    target_sens = []

    for batch in dataset.create_dict_iterator(output_numpy=True, num_epochs=1):
        target_sens.append(batch['label_idx'])
        src_tokens = Tensor(batch['src_tokens'], mstype.int32)
        src_tokens_length = Tensor(batch['src_tokens_length'], mstype.int32)
        predicted_idx = model.predict(src_tokens, src_tokens_length)
        predictions.append(predicted_idx.asnumpy())

    from sklearn.metrics import accuracy_score, classification_report
    target_sens = np.array(target_sens).flatten()
    predictions = np.array(predictions).flatten()
    acc = accuracy_score(target_sens, predictions)

    result_report = classification_report(target_sens, predictions, target_names=target_label1)
    print("********Accuracy: ", acc)
    print(result_report)
コード例 #10
0
def me_stridedslice(input1, begin, end, stride):
    input_me = Tensor(input1)
    net = Net(begin, end, stride)
    net.set_train()
    model = Model(net)
    output = model.predict(input_me)
    print(output.asnumpy())
コード例 #11
0
ファイル: eval.py プロジェクト: mindspore-ai/course
def run_transformer_eval(out_url):
    """
    Transformer evaluation.
    """
    context.set_context(mode=context.GRAPH_MODE,
                        device_target="Ascend",
                        reserve_class_name_in_scope=False)

    tfm_model = TransformerModel(config=transformer_net_cfg,
                                 is_training=False,
                                 use_one_hot_embeddings=False)
    print(cfg.model_file)
    parameter_dict = load_weights(cfg.model_file)
    load_param_into_net(tfm_model, parameter_dict)
    tfm_infer = TransformerInferCell(tfm_model)
    model = Model(tfm_infer)

    tokenizer = tokenization.WhiteSpaceTokenizer(vocab_file=cfg.vocab_file)
    dataset = load_test_data(batch_size=cfg.batch_size,
                             data_file=cfg.data_file)
    predictions = []
    source_sents = []
    target_sents = []
    f = open(cfg.token_file, 'w', encoding='utf-8')
    f1 = open(cfg.pred_file, 'w', encoding='utf-8')
    f2 = open(cfg.test_source_file, 'r', encoding='utf-8')
    for batch in dataset.create_dict_iterator():
        source_sents.append(batch["source_eos_ids"])
        target_sents.append(batch["target_eos_ids"])
        source_ids = Tensor(batch["source_eos_ids"], mstype.int32)
        source_mask = Tensor(batch["source_eos_mask"], mstype.int32)
        predicted_ids = model.predict(source_ids, source_mask)
        #predictions.append(predicted_ids.asnumpy())
        # ----------------------------------------decode and write to file(token file)---------------------
        batch_out = predicted_ids.asnumpy()
        for i in range(transformer_net_cfg.batch_size):
            if batch_out.ndim == 3:
                batch_out = batch_out[:, 0]
            token_ids = [str(x) for x in batch_out[i].tolist()]
            print(" ".join(token_ids))
            token = " ".join(token_ids)
            f.write(token + "\n")
            #-------------------------------token_ids to real output file-------------------------------
            token_ids = [int(x) for x in token.strip().split()]
            tokens = tokenizer.convert_ids_to_tokens(token_ids)
            sent = " ".join(tokens)
            sent = sent.split("<s>")[-1]
            sent = sent.split("</s>")[0]
            print(sent.strip())
            f1.write(f2.readline().strip() + '\t')
            f1.write(sent.strip() + '\n')
    f.close()
    f1.close()
    f2.close()

    import moxing as mox
    mox.file.copy_parallel(src_url=cfg.token_file,
                           dst_url=os.path.join(out_url, cfg.token_file))
    mox.file.copy_parallel(src_url=cfg.pred_file,
                           dst_url=os.path.join(out_url, cfg.pred_file))
コード例 #12
0
ファイル: run_ner.py プロジェクト: egnlife/mindspore
def do_eval(dataset=None,
            network=None,
            use_crf="",
            num_class=2,
            assessment_method="accuracy",
            data_file="",
            load_checkpoint_path="",
            vocab_file="",
            label2id_file="",
            tag_to_index=None):
    """ do eval """
    if load_checkpoint_path == "":
        raise ValueError(
            "Finetune model missed, evaluation task must load finetune model!")
    if assessment_method == "clue_benchmark":
        bert_net_cfg.batch_size = 1
    net_for_pretraining = network(bert_net_cfg,
                                  False,
                                  num_class,
                                  use_crf=(use_crf.lower() == "true"),
                                  tag_to_index=tag_to_index)
    net_for_pretraining.set_train(False)
    param_dict = load_checkpoint(load_checkpoint_path)
    load_param_into_net(net_for_pretraining, param_dict)
    model = Model(net_for_pretraining)

    if assessment_method == "clue_benchmark":
        from src.cluener_evaluation import submit
        submit(model=model,
               path=data_file,
               vocab_file=vocab_file,
               use_crf=use_crf,
               label2id_file=label2id_file)
    else:
        if assessment_method == "accuracy":
            callback = Accuracy()
        elif assessment_method == "f1":
            callback = F1((use_crf.lower() == "true"), num_class)
        elif assessment_method == "mcc":
            callback = MCC()
        elif assessment_method == "spearman_correlation":
            callback = Spearman_Correlation()
        else:
            raise ValueError(
                "Assessment method not supported, support: [accuracy, f1, mcc, spearman_correlation]"
            )

        columns_list = ["input_ids", "input_mask", "segment_ids", "label_ids"]
        for data in dataset.create_dict_iterator():
            input_data = []
            for i in columns_list:
                input_data.append(Tensor(data[i]))
            input_ids, input_mask, token_type_id, label_ids = input_data
            logits = model.predict(input_ids, input_mask, token_type_id,
                                   label_ids)
            callback.update(logits, label_ids)
        print("==============================================================")
        eval_result_print(assessment_method, callback)
        print("==============================================================")
コード例 #13
0
ファイル: infer_mass.py プロジェクト: yrpang/mindspore
def transformer_infer_ppl(config, dataset):
    """
    Run infer with Transformer for PPL.

    Args:
        config (TransformerConfig): Config.
        dataset (Dataset): Dataset.

    Returns:
        List[Dict], prediction, each example has 4 keys, "source",
        "target", "log_prob" and "length".
    """
    tfm_infer = TransformerInferPPLCell(config=config)
    tfm_infer.init_parameters_data()

    parameter_dict = load_checkpoint(config.existed_ckpt)
    load_param_into_net(tfm_infer, parameter_dict)

    model = Model(tfm_infer)

    log_probs = []
    lengths = []
    source_sentences = []
    target_sentences = []
    for batch in dataset.create_dict_iterator(output_numpy=True, num_epochs=1):
        source_sentences.append(batch["source_eos_ids"])
        target_sentences.append(batch["target_eos_ids"])

        source_ids = Tensor(batch["source_eos_ids"], mstype.int32)
        source_mask = Tensor(batch["source_eos_mask"], mstype.int32)
        target_ids = Tensor(batch["target_sos_ids"], mstype.int32)
        target_mask = Tensor(batch["target_sos_mask"], mstype.int32)
        label_ids = Tensor(batch["target_eos_ids"], mstype.int32)
        label_mask = Tensor(batch["target_eos_mask"], mstype.int32)

        start_time = time.time()
        log_prob, length = model.predict(source_ids, source_mask, target_ids,
                                         target_mask, label_ids, label_mask)
        print(f" | Batch size: {config.batch_size}, "
              f"Time cost: {time.time() - start_time}.")

        log_probs.append(log_prob.asnumpy())
        lengths.append(length.asnumpy())

    output = []
    for inputs, ref, log_prob, length in zip(source_sentences,
                                             target_sentences, log_probs,
                                             lengths):
        for i in range(config.batch_size):
            example = {
                "source": inputs[i].tolist(),
                "target": ref[i].tolist(),
                "log_prob": log_prob[i].tolist(),
                "length": length[i]
            }
            output.append(example)

    return output
 def forward_mindspore_impl(self):
     input_ms = Tensor(self.input_np)
     gamma = Tensor(self.gamma_np)
     beta = Tensor(self.beta_np)
     net = LayerNorm(self.input_shape, self.begin_norm_axis, self.begin_params_axis, gamma, beta)
     net.set_train()
     model = Model(net)
     out_me = model.predict(Tensor(input_ms))
     return out_me.asnumpy()
コード例 #15
0
def test_net():
    context.set_context(mode=context.GRAPH_MODE)
    tanh_grad = Net()
    tanh_grad.set_train()
    m = Model(tanh_grad)
    out = m.predict(input_me, input_me)
    print("out_me.dtype={}".format(out.dtype))
    print("out_me.asnumpy={}".format(out.asnumpy()))
    return out.asnumpy()
コード例 #16
0
def ms_transpose(input_, perm_in):
    context.set_context(mode=context.GRAPH_MODE)
    input_me = Tensor(input_)
    net = Net(perm_in)
    net.set_train()
    model = Model(net)
    output = model.predict(input_me)
    print("-------------ms------------------")
    print(output.asnumpy().dtype)
    print(output.asnumpy())
コード例 #17
0
def pt_me_layernorm(input_data, normalized_shape, gamma, beta, axis):
    net = Net(normalized_shape, begin_norm_axis=axis,
              begin_params_axis=axis,
              gamma=Tensor(gamma),
              beta=Tensor(beta))
    net.set_train()
    model = Model(net)
    out_me = model.predict(Tensor(input_data))
    logger.info("Check me result:")
    logger.info(out_me.asnumpy())
コード例 #18
0
ファイル: run_squad.py プロジェクト: zuoshou030/mindspore
def do_eval(dataset=None,
            vocab_file="",
            eval_json="",
            load_checkpoint_path="",
            seq_length=384):
    """ do eval """
    if load_checkpoint_path == "":
        raise ValueError(
            "Finetune model missed, evaluation task must load finetune model!")
    tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file,
                                           do_lower_case=True)
    eval_examples = read_squad_examples(eval_json, False)
    eval_features = convert_examples_to_features(examples=eval_examples,
                                                 tokenizer=tokenizer,
                                                 max_seq_length=seq_length,
                                                 doc_stride=128,
                                                 max_query_length=64,
                                                 is_training=False,
                                                 output_fn=None,
                                                 verbose_logging=False)

    net = BertSquad(bert_net_cfg, False, 2)
    net.set_train(False)
    param_dict = load_checkpoint(load_checkpoint_path)
    load_param_into_net(net, param_dict)
    model = Model(net)
    output = []
    RawResult = collections.namedtuple(
        "RawResult", ["unique_id", "start_logits", "end_logits"])
    columns_list = ["input_ids", "input_mask", "segment_ids", "unique_ids"]
    for data in dataset.create_dict_iterator():
        input_data = []
        for i in columns_list:
            input_data.append(Tensor(data[i]))
        input_ids, input_mask, segment_ids, unique_ids = input_data
        start_positions = Tensor([1], mstype.float32)
        end_positions = Tensor([1], mstype.float32)
        is_impossible = Tensor([1], mstype.float32)
        logits = model.predict(input_ids, input_mask, segment_ids,
                               start_positions, end_positions, unique_ids,
                               is_impossible)
        ids = logits[0].asnumpy()
        start = logits[1].asnumpy()
        end = logits[2].asnumpy()

        for i in range(bert_net_cfg.batch_size):
            unique_id = int(ids[i])
            start_logits = [float(x) for x in start[i].flat]
            end_logits = [float(x) for x in end[i].flat]
            output.append(
                RawResult(unique_id=unique_id,
                          start_logits=start_logits,
                          end_logits=end_logits))
    write_predictions(eval_examples, eval_features, output, 20, 30, True,
                      "./predictions.json", None, None)
コード例 #19
0
def me_greater(inputa, inputb):
    net = Greater()
    net.set_train()
    model = Model(net)

    out = model.predict(inputa, inputb)
    logger.info("Check input a: ")
    logger.info(inputa)
    logger.info("Check input b: ")
    logger.info(inputb)
    return out.asnumpy()
コード例 #20
0
def test_eval():
    """Evaluation function for SQuAD task"""
    tokenizer = tokenization.FullTokenizer(vocab_file="./vocab.txt",
                                           do_lower_case=True)
    input_file = "dataset/v1.1/dev-v1.1.json"
    eval_examples = read_squad_examples(input_file, False)
    eval_features = convert_examples_to_features(examples=eval_examples,
                                                 tokenizer=tokenizer,
                                                 max_seq_length=384,
                                                 doc_stride=128,
                                                 max_query_length=64,
                                                 is_training=False,
                                                 output_fn=None,
                                                 verbose_logging=False)

    device_id = int(os.getenv('DEVICE_ID'))
    context.set_context(mode=context.GRAPH_MODE,
                        device_target='Ascend',
                        device_id=device_id)
    dataset = get_squad_dataset(bert_net_cfg.batch_size, 1)
    net = BertSquad(bert_net_cfg, False, 2)
    net.set_train(False)
    param_dict = load_checkpoint(cfg.finetune_ckpt)
    load_param_into_net(net, param_dict)
    model = Model(net)
    output = []
    RawResult = collections.namedtuple(
        "RawResult", ["unique_id", "start_logits", "end_logits"])
    columns_list = ["input_ids", "input_mask", "segment_ids", "unique_ids"]
    for data in dataset.create_dict_iterator():
        input_data = []
        for i in columns_list:
            input_data.append(Tensor(data[i]))
        input_ids, input_mask, segment_ids, unique_ids = input_data
        start_positions = Tensor([1], mstype.float32)
        end_positions = Tensor([1], mstype.float32)
        is_impossible = Tensor([1], mstype.float32)
        logits = model.predict(input_ids, input_mask, segment_ids,
                               start_positions, end_positions, unique_ids,
                               is_impossible)
        ids = logits[0].asnumpy()
        start = logits[1].asnumpy()
        end = logits[2].asnumpy()

        for i in range(bert_net_cfg.batch_size):
            unique_id = int(ids[i])
            start_logits = [float(x) for x in start[i].flat]
            end_logits = [float(x) for x in end[i].flat]
            output.append(
                RawResult(unique_id=unique_id,
                          start_logits=start_logits,
                          end_logits=end_logits))
    write_predictions(eval_examples, eval_features, output, 20, 30, True,
                      "./predictions.json", None, None, False, False)
コード例 #21
0
def run_transformer_eval():
    """
    Transformer evaluation.
    """
    parser = argparse.ArgumentParser(description='tranformer')
    parser.add_argument(
        "--device_target",
        type=str,
        default="Ascend",
        help="device where the code will be implemented, default is Ascend")
    parser.add_argument('--device_id',
                        type=int,
                        default=0,
                        help='device id of GPU or Ascend, default is 0')
    args = parser.parse_args()

    context.set_context(mode=context.GRAPH_MODE,
                        device_target=args.device_target,
                        reserve_class_name_in_scope=False,
                        device_id=args.device_id)

    dataset = load_test_data(batch_size=transformer_net_cfg.batch_size,
                             data_file=cfg.data_file)
    tfm_model = TransformerModel(config=transformer_net_cfg,
                                 is_training=False,
                                 use_one_hot_embeddings=False)

    parameter_dict = load_weights(cfg.model_file)
    load_param_into_net(tfm_model, parameter_dict)

    tfm_infer = TransformerInferCell(tfm_model)
    model = Model(tfm_infer)

    predictions = []
    source_sents = []
    target_sents = []
    for batch in dataset.create_dict_iterator(output_numpy=True, num_epochs=1):
        source_sents.append(batch["source_eos_ids"])
        target_sents.append(batch["target_eos_ids"])
        source_ids = Tensor(batch["source_eos_ids"], mstype.int32)
        source_mask = Tensor(batch["source_eos_mask"], mstype.int32)
        predicted_ids = model.predict(source_ids, source_mask)
        predictions.append(predicted_ids.asnumpy())

    # decode and write to file
    f = open(cfg.output_file, 'w')
    for batch_out in predictions:
        for i in range(transformer_net_cfg.batch_size):
            if batch_out.ndim == 3:
                batch_out = batch_out[:, 0]
            token_ids = [str(x) for x in batch_out[i].tolist()]
            f.write(" ".join(token_ids) + "\n")
    f.close()
コード例 #22
0
ファイル: test_select.py プロジェクト: zuoshou030/mindspore
def me_select(cond, inputa, inputb, dtype=ms.float32):
    net = Select(dtype)
    net.set_train()
    model = Model(net)
    if isinstance(inputa, np.ndarray):
        inputa = Tensor(inputa)
    if isinstance(inputb, np.ndarray):
        inputb = Tensor(inputb)
    if isinstance(cond, np.bool_):
        cond = np.array(cond)

    out = model.predict(Tensor(cond), inputa, inputb)
    return out.asnumpy()
コード例 #23
0
ファイル: test_minimum.py プロジェクト: zky001/mindspore
def me_min(inputa, inputb, dtype=ms.float32):
    context.set_context(mode=context.GRAPH_MODE)
    net = Min(dtype)
    net.set_train()
    model = Model(net)
    print(type(inputa))
    if isinstance(inputa, np.ndarray) == True:
        inputa = Tensor(inputa)
    if isinstance(inputb, np.ndarray) == True:
        inputb = Tensor(inputb)
    out = model.predict(inputa, inputb)
    print(out)
    return out.asnumpy()
コード例 #24
0
def test_ctrl_if_while_graph_support_not_equal_true():
    x = np.array(0).astype(np.float32)
    y = np.array(3).astype(np.float32)
    input_shape = (512, 512, 7, 7)
    input_data = np.random.randn(*input_shape).astype(np.float32)
    net = ControlGraphSupportNotEqual()
    model = Model(net)
    out_me = model.predict(Tensor(x), Tensor(y), Tensor(x), Tensor(input_data))
    out = input_data + input_data
    out2 = input_data * input_data
    out3 = input_data + input_data
    allclose_nparray(out, out_me[0].asnumpy(), 0.0001, 0.0001)
    allclose_nparray(out2, out_me[1].asnumpy(), 0.0001, 0.0001)
    allclose_nparray(out3, out_me[2].asnumpy(), 0.0001, 0.0001)
コード例 #25
0
ファイル: eval.py プロジェクト: xyg320/mindspore
def run_transformer_eval():
    """
    Transformer evaluation.
    """
    device_id = int(os.getenv('DEVICE_ID'))
    context.set_context(mode=context.GRAPH_MODE,
                        device_target="Ascend",
                        reserve_class_name_in_scope=False,
                        device_id=device_id)

    dataset = load_test_data(batch_size=transformer_net_cfg.batch_size,
                             data_file=cfg.data_file)
    tfm_model = TransformerModel(config=transformer_net_cfg,
                                 is_training=False,
                                 use_one_hot_embeddings=False)

    parameter_dict = load_weights(cfg.model_file)
    load_param_into_net(tfm_model, parameter_dict)

    tfm_infer = TransformerInferCell(tfm_model)
    model = Model(tfm_infer)

    predictions = []
    source_sents = []
    target_sents = []
    for batch in dataset.create_dict_iterator():
        source_sents.append(batch["source_eos_ids"])
        target_sents.append(batch["target_eos_ids"])
        source_ids = Tensor(batch["source_eos_ids"], mstype.int32)
        source_mask = Tensor(batch["source_eos_mask"], mstype.int32)
        predicted_ids = model.predict(source_ids, source_mask)
        predictions.append(predicted_ids.asnumpy())

    # decode and write to file
    f = open(cfg.output_file, 'w')
    for batch_out in predictions:
        for i in range(transformer_net_cfg.batch_size):
            if batch_out.ndim == 3:
                batch_out = batch_out[:, 0]
            token_ids = [str(x) for x in batch_out[i].tolist()]
            f.write(" ".join(token_ids) + "\n")
    f.close()
コード例 #26
0
def do_eval(dataset=None,
            network=None,
            use_crf="",
            num_class=41,
            assessment_method="accuracy",
            data_file="",
            load_checkpoint_path="",
            vocab_file="",
            label_file="",
            tag_to_index=None,
            batch_size=1):
    """ do eval """
    if load_checkpoint_path == "":
        raise ValueError(
            "Finetune model missed, evaluation task must load finetune model!")
    net_for_pretraining = network(ernie_net_cfg,
                                  batch_size,
                                  False,
                                  num_class,
                                  use_crf=(use_crf.lower() == "true"),
                                  tag_to_index=tag_to_index)
    net_for_pretraining.set_train(False)
    param_dict = load_checkpoint(load_checkpoint_path)
    load_param_into_net(net_for_pretraining, param_dict)
    model = Model(net_for_pretraining)

    callback = SpanF1((use_crf.lower() == "true"), tag_to_index)

    columns_list = ["input_ids", "input_mask", "token_type_id", "label_ids"]
    for data in dataset.create_dict_iterator(num_epochs=1):
        input_data = []
        for i in columns_list:
            input_data.append(data[i])
        input_ids, input_mask, token_type_id, label_ids = input_data
        logits = model.predict(input_ids, input_mask, token_type_id, label_ids)
        callback.update(logits, label_ids)
    print("==============================================================")
    eval_result_print(assessment_method, callback)
    print("==============================================================")
コード例 #27
0
ファイル: run_squad.py プロジェクト: mark14wu/bert_demo
def do_eval(dataset=None, load_checkpoint_path="", eval_batch_size=1):
    """ do eval """
    if load_checkpoint_path == "":
        raise ValueError(
            "Finetune model missed, evaluation task must load finetune model!")
    net = BertSquad(bert_net_cfg, False, 2)
    net.set_train(False)
    param_dict = load_checkpoint(load_checkpoint_path)
    load_param_into_net(net, param_dict)
    model = Model(net)
    output = []
    RawResult = collections.namedtuple(
        "RawResult", ["unique_id", "start_logits", "end_logits"])
    columns_list = ["input_ids", "input_mask", "segment_ids", "unique_ids"]
    for data in dataset.create_dict_iterator(num_epochs=1):
        input_data = []
        for i in columns_list:
            input_data.append(data[i])
        input_ids, input_mask, segment_ids, unique_ids = input_data
        start_positions = Tensor([1], mstype.float32)
        end_positions = Tensor([1], mstype.float32)
        is_impossible = Tensor([1], mstype.float32)
        logits = model.predict(input_ids, input_mask, segment_ids,
                               start_positions, end_positions, unique_ids,
                               is_impossible)
        ids = logits[0].asnumpy()
        start = logits[1].asnumpy()
        end = logits[2].asnumpy()

        for i in range(eval_batch_size):
            unique_id = int(ids[i])
            start_logits = [float(x) for x in start[i].flat]
            end_logits = [float(x) for x in end[i].flat]
            output.append(
                RawResult(unique_id=unique_id,
                          start_logits=start_logits,
                          end_logits=end_logits))
    return output
コード例 #28
0
def transformer_infer(config, dataset):
    """
    Run infer with Transformer.

    Args:
        config (TransformerConfig): Config.
        dataset (Dataset): Dataset.

    Returns:
        List[Dict], prediction, each example has 4 keys, "source",
        "target", "prediction" and "prediction_prob".
    """
    tfm_model = TransformerInferModel(config=config,
                                      use_one_hot_embeddings=False)
    tfm_model.init_parameters_data()

    params = tfm_model.trainable_params()
    weights = load_infer_weights(config)

    for param in params:
        value = param.default_input
        name = param.name
        if name not in weights:
            raise ValueError(f"{name} is not found in weights.")

        with open("weight_after_deal.txt", "a+") as f:
            weights_name = name
            f.write(weights_name + "\n")
            if isinstance(value, Tensor):
                print(name, value.asnumpy().shape)
                if weights_name in weights:
                    assert weights_name in weights
                    param.default_input = Tensor(weights[weights_name],
                                                 mstype.float32)
                else:
                    raise ValueError(
                        f"{weights_name} is not found in checkpoint.")
            else:
                raise TypeError(f"Type of {weights_name} is not Tensor.")

    print(" | Load weights successfully.")
    tfm_infer = TransformerInferCell(tfm_model)
    model = Model(tfm_infer)

    predictions = []
    probs = []
    source_sentences = []
    target_sentences = []
    for batch in dataset.create_dict_iterator():
        source_sentences.append(batch["source_eos_ids"])
        target_sentences.append(batch["target_eos_ids"])

        source_ids = Tensor(batch["source_eos_ids"], mstype.int32)
        source_mask = Tensor(batch["source_eos_mask"], mstype.int32)

        start_time = time.time()
        predicted_ids, entire_probs = model.predict(source_ids, source_mask)
        print(f" | Batch size: {config.batch_size}, "
              f"Time cost: {time.time() - start_time}.")

        predictions.append(predicted_ids.asnumpy())
        probs.append(entire_probs.asnumpy())

    output = []
    for inputs, ref, batch_out, batch_probs in zip(source_sentences,
                                                   target_sentences,
                                                   predictions, probs):
        for i in range(config.batch_size):
            if batch_out.ndim == 3:
                batch_out = batch_out[:, 0]

            example = {
                "source": inputs[i].tolist(),
                "target": ref[i].tolist(),
                "prediction": batch_out[i].tolist(),
                "prediction_prob": batch_probs[i].tolist()
            }
            output.append(example)

    return output
コード例 #29
0
ファイル: test_pow.py プロジェクト: zky001/mindspore
def pow_forward_me_impl(input, exp):
    n = PowMe()
    n.set_train()
    m = Model(n)
    out = m.predict(input, exp)
    return out.asnumpy()
コード例 #30
0
ファイル: test_gelu.py プロジェクト: zky001/mindspore
def gelu_forward_me_impl(input):
    n = GELU()
    n.set_train()
    m = Model(n)
    out = m.predict(input)
    return out.asnumpy()