Example #1
0
        context.set_auto_parallel_context(
            device_num=device_num,
            parallel_mode=ParallelMode.DATA_PARALLEL,
            gradients_mean=True)
        init()
    else:
        rank = 0
        device_num = 1
    mindrecord_file = args.dataset_path
    if not os.path.exists(mindrecord_file):
        print("dataset file {} not exists, please check!".format(
            mindrecord_file))
        raise ValueError(mindrecord_file)
    dataset = create_gru_dataset(epoch_count=config.num_epochs,
                                 batch_size=config.batch_size,
                                 dataset_path=mindrecord_file,
                                 rank_size=device_num,
                                 rank_id=rank)
    dataset_size = dataset.get_dataset_size()
    print("dataset size is {}".format(dataset_size))
    network = Seq2Seq(config)
    network = GRUWithLossCell(network)
    lr = dynamic_lr(config, dataset_size)
    opt = Adam(network.trainable_params(), learning_rate=lr)
    scale_manager = DynamicLossScaleManager(
        init_loss_scale=config.init_loss_scale_value,
        scale_factor=config.scale_factor,
        scale_window=config.scale_window)
    update_cell = scale_manager.get_update_cell()
    netwithgrads = GRUTrainOneStepWithLossScaleCell(network, opt, update_cell)
Example #2
0
                    type=int,
                    default=1,
                    help='Use device nums, default is 1')
parser.add_argument('--result_path',
                    type=str,
                    default='./preprocess_Result/',
                    help='result path')
args = parser.parse_args()

if __name__ == "__main__":
    mindrecord_file = args.dataset_path
    if not os.path.exists(mindrecord_file):
        print("dataset file {} not exists, please check!".format(
            mindrecord_file))
        raise ValueError(mindrecord_file)
    dataset = create_gru_dataset(epoch_count=config.num_epochs, batch_size=config.eval_batch_size, \
        dataset_path=mindrecord_file, rank_size=args.device_num, rank_id=0, do_shuffle=False, is_training=False)

    source_ids_path = os.path.join(args.result_path, "00_data")
    target_ids_path = os.path.join(args.result_path, "01_data")
    os.makedirs(source_ids_path)
    os.makedirs(target_ids_path)

    for i, data in enumerate(
            dataset.create_dict_iterator(output_numpy=True, num_epochs=1)):
        file_name = "gru_bs" + str(
            config.eval_batch_size) + "_" + str(i) + ".bin"
        data["source_ids"].tofile(os.path.join(source_ids_path, file_name))
        data["target_ids"].tofile(os.path.join(target_ids_path, file_name))

    print("=" * 20, "export bin files finished", "=" * 20)
Example #3
0
                str(cb_params.net_outputs[0].asnumpy()),
                str(cb_params.net_outputs[1].asnumpy()),
                str(cb_params.net_outputs[2].asnumpy())))
            f.write('\n')

if __name__ == '__main__':
    if args.run_distribute:
        rank = args.rank_id
        device_num = args.device_num
        context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
                                          gradients_mean=True)
        init()
    else:
        rank = 0
        device_num = 1
    dataset = create_gru_dataset(epoch_count=config.num_epochs, batch_size=config.batch_size,
                                 dataset_path=args.dataset_path, rank_size=device_num, rank_id=rank)
    dataset_size = dataset.get_dataset_size()
    print("dataset size is {}".format(dataset_size))
    network = Seq2Seq(config)
    network = GRUWithLossCell(network)
    lr = dynamic_lr(config, dataset_size)
    opt = Adam(network.trainable_params(), learning_rate=lr)
    scale_manager = DynamicLossScaleManager(init_loss_scale=config.init_loss_scale_value,
                                            scale_factor=config.scale_factor,
                                            scale_window=config.scale_window)
    update_cell = scale_manager.get_update_cell()
    netwithgrads = GRUTrainOneStepWithLossScaleCell(network, opt, update_cell)

    time_cb = TimeMonitor(data_size=dataset_size)
    loss_cb = LossCallBack(rank_id=rank)
    cb = [time_cb, loss_cb]
Example #4
0
def run_gru_eval():
    """
    Transformer evaluation.
    """
    parser = argparse.ArgumentParser(description='GRU eval')
    parser.add_argument(
        "--device_target",
        type=str,
        default="Ascend",
        help="device where the code will be implemented, default is Ascend")
    parser.add_argument('--device_id',
                        type=int,
                        default=0,
                        help='device id of GPU or Ascend, default is 0')
    parser.add_argument('--device_num',
                        type=int,
                        default=1,
                        help='Use device nums, default is 1')
    parser.add_argument('--ckpt_file',
                        type=str,
                        default="",
                        help='ckpt file path')
    parser.add_argument("--dataset_path",
                        type=str,
                        default="",
                        help="Dataset path, default: f`sns.")
    args = parser.parse_args()

    context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, reserve_class_name_in_scope=False, \
        device_id=args.device_id, save_graphs=False)
    dataset = create_gru_dataset(epoch_count=config.num_epochs, batch_size=config.eval_batch_size, \
        dataset_path=args.dataset_path, rank_size=args.device_num, rank_id=0, do_shuffle=False, is_training=False)
    dataset_size = dataset.get_dataset_size()
    print("dataset size is {}".format(dataset_size))
    network = Seq2Seq(config, is_training=False)
    network = GRUInferCell(network)
    network.set_train(False)
    if args.ckpt_file != "":
        parameter_dict = load_checkpoint(args.ckpt_file)
        load_param_into_net(network, parameter_dict)
    model = Model(network)

    predictions = []
    source_sents = []
    target_sents = []
    eval_text_len = 0
    for batch in dataset.create_dict_iterator(output_numpy=True, num_epochs=1):
        source_sents.append(batch["source_ids"])
        target_sents.append(batch["target_ids"])
        source_ids = Tensor(batch["source_ids"], mstype.int32)
        target_ids = Tensor(batch["target_ids"], mstype.int32)
        predicted_ids = model.predict(source_ids, target_ids)
        print("predicts is ", predicted_ids.asnumpy())
        print("target_ids is ", target_ids)
        predictions.append(predicted_ids.asnumpy())
        eval_text_len = eval_text_len + 1

    f_output = open(config.output_file, 'w')
    f_target = open(config.target_file, "w")
    for batch_out, true_sentence in zip(predictions, target_sents):
        for i in range(config.eval_batch_size):
            target_ids = [str(x) for x in true_sentence[i].tolist()]
            f_target.write(" ".join(target_ids) + "\n")
            token_ids = [str(x) for x in batch_out[i].tolist()]
            f_output.write(" ".join(token_ids) + "\n")
    f_output.close()
    f_target.close()