Exemplo n.º 1
0
if __name__ == '__main__':

    context.set_context(mode=context.GRAPH_MODE,
                        save_graphs=False,
                        device_target="Ascend")

    device_id = int(os.getenv('DEVICE_ID'))
    context.set_context(device_id=device_id)

    if cfg.preprocess == 'true':
        print("============== Starting Data Pre-processing ==============")
        if os.path.exists(cfg.preprocess_path):
            shutil.rmtree(cfg.preprocess_path)
        os.mkdir(cfg.preprocess_path)
        convert_to_mindrecord(cfg.embed_size, cfg.data_path,
                              cfg.preprocess_path, cfg.emb_path)

    if cfg.cell == "vanilla":
        print(
            "============ Precision is lower than expected when using vanilla RNN architecture ==========="
        )

    embedding_table = np.loadtxt(
        os.path.join(cfg.preprocess_path, "weight.txt")).astype(np.float32)

    network = textrcnn(weight=Tensor(embedding_table),
                       vocab_size=embedding_table.shape[0],
                       cell=cfg.cell,
                       batch_size=cfg.batch_size)

    ds_train = create_dataset(cfg.preprocess_path, cfg.batch_size, True)
Exemplo n.º 2
0
        help=
        'the target device to run, support "GPU", "CPU". Default: "Ascend".')
    args = parser.parse_args()

    context.set_context(mode=context.GRAPH_MODE,
                        save_graphs=False,
                        device_target=args.device_target)

    if args.device_target == 'Ascend':
        cfg = lstm_cfg_ascend
    else:
        cfg = lstm_cfg

    if args.preprocess == "true":
        print("============== Starting Data Pre-processing ==============")
        convert_to_mindrecord(cfg.embed_size, args.aclimdb_path,
                              args.preprocess_path, args.glove_path)

    embedding_table = np.loadtxt(
        os.path.join(args.preprocess_path, "weight.txt")).astype(np.float32)
    # DynamicRNN in this network on Ascend platform only support the condition that the shape of input_size
    # and hiddle_size is multiples of 16, this problem will be solved later.
    if args.device_target == 'Ascend':
        pad_num = int(np.ceil(cfg.embed_size / 16) * 16 - cfg.embed_size)
        if pad_num > 0:
            embedding_table = np.pad(embedding_table, [(0, 0), (0, pad_num)],
                                     'constant')
        cfg.embed_size = int(np.ceil(cfg.embed_size / 16) * 16)
    network = SentimentNet(vocab_size=embedding_table.shape[0],
                           embed_size=cfg.embed_size,
                           num_hiddens=cfg.num_hiddens,
                           num_layers=cfg.num_layers,
Exemplo n.º 3
0
        mode=context.GRAPH_MODE,
        save_graphs=False,
        device_target=args.device_target)

    # import moxing as mox

    # mox.file.copy_parallel(src_url='s3://zhengnj-course/lstm/glove', dst_url=args.glove_path)

    if args.device_target == 'Ascend':
        cfg = lstm_cfg_ascend
    else:
        cfg = lstm_cfg

    if args.preprocess == "true":
        print("============== Starting Data Pre-processing ==============")
        convert_to_mindrecord(cfg.embed_size, args.data_url, args.preprocess_path, args.glove_path)

    embedding_table = np.loadtxt(os.path.join(args.preprocess_path, "weight.txt")).astype(np.float32)
    # DynamicRNN in this network on Ascend platform only support the condition that the shape of input_size
    # and hiddle_size is multiples of 16, this problem will be solved later.
    if args.device_target == 'Ascend':
        pad_num = int(np.ceil(cfg.embed_size / 16) * 16 - cfg.embed_size)
        if pad_num > 0:
            embedding_table = np.pad(embedding_table, [(0, 0), (0, pad_num)], 'constant')
        cfg.embed_size = int(np.ceil(cfg.embed_size / 16) * 16)
    network = SentimentNet(vocab_size=embedding_table.shape[0],
                           embed_size=cfg.embed_size,
                           num_hiddens=cfg.num_hiddens,
                           num_layers=cfg.num_layers,
                           bidirectional=cfg.bidirectional,
                           num_classes=cfg.num_classes,