コード例 #1
0
ファイル: eval.py プロジェクト: shirley18411/book
    embedding_table = np.loadtxt(
        os.path.join(args.preprocess_path, "weight.txt")).astype(np.float32)
    network = SentimentNet(vocab_size=embedding_table.shape[0],
                           embed_size=cfg.embed_size,
                           num_hiddens=cfg.num_hiddens,
                           num_layers=cfg.num_layers,
                           bidirectional=cfg.bidirectional,
                           num_classes=cfg.num_classes,
                           weight=Tensor(embedding_table),
                           batch_size=cfg.batch_size)

    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    opt = nn.Momentum(network.trainable_params(), cfg.learning_rate,
                      cfg.momentum)
    loss_cb = LossMonitor()

    model = Model(network, loss, opt, {'acc': Accuracy()})

    print("============== Starting Testing ==============")
    ds_eval = lstm_create_dataset(args.preprocess_path,
                                  cfg.batch_size,
                                  training=False)
    param_dict = load_checkpoint(args.ckpt_path)
    load_param_into_net(network, param_dict)
    if args.device_target == "CPU":
        acc = model.eval(ds_eval, dataset_sink_mode=False)
    else:
        acc = model.eval(ds_eval)
    print("============== {} ==============".format(acc))
コード例 #2
0
ファイル: train.py プロジェクト: kungfu-ml/mindspore
            embedding_table = np.pad(embedding_table, [(0, 0), (0, pad_num)],
                                     'constant')
        cfg.embed_size = int(np.ceil(cfg.embed_size / 16) * 16)
    network = SentimentNet(vocab_size=embedding_table.shape[0],
                           embed_size=cfg.embed_size,
                           num_hiddens=cfg.num_hiddens,
                           num_layers=cfg.num_layers,
                           bidirectional=cfg.bidirectional,
                           num_classes=cfg.num_classes,
                           weight=Tensor(embedding_table),
                           batch_size=cfg.batch_size)
    # pre_trained
    if args.pre_trained:
        load_param_into_net(network, load_checkpoint(args.pre_trained))

    ds_train = lstm_create_dataset(args.preprocess_path, cfg.batch_size, 1)

    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    if cfg.dynamic_lr:
        lr = Tensor(
            get_lr(global_step=cfg.global_step,
                   lr_init=cfg.lr_init,
                   lr_end=cfg.lr_end,
                   lr_max=cfg.lr_max,
                   warmup_epochs=cfg.warmup_epochs,
                   total_epochs=cfg.num_epochs,
                   steps_per_epoch=ds_train.get_dataset_size(),
                   lr_adjust_epoch=cfg.lr_adjust_epoch))
    else:
        lr = cfg.learning_rate
コード例 #3
0
        cfg.embed_size = int(np.ceil(cfg.embed_size / 16) * 16)
    network = SentimentNet(vocab_size=embedding_table.shape[0],
                           embed_size=cfg.embed_size,
                           num_hiddens=cfg.num_hiddens,
                           num_layers=cfg.num_layers,
                           bidirectional=cfg.bidirectional,
                           num_classes=cfg.num_classes,
                           weight=Tensor(embedding_table),
                           batch_size=cfg.batch_size)
    # pre_trained
    if args.pre_trained:
        load_param_into_net(network, load_checkpoint(args.pre_trained))

    ds_train = lstm_create_dataset(args.preprocess_path,
                                   cfg.batch_size,
                                   1,
                                   device_num=device_num,
                                   rank=rank)

    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    if cfg.dynamic_lr:
        lr = Tensor(
            get_lr(global_step=cfg.global_step,
                   lr_init=cfg.lr_init,
                   lr_end=cfg.lr_end,
                   lr_max=cfg.lr_max,
                   warmup_epochs=cfg.warmup_epochs,
                   total_epochs=cfg.num_epochs,
                   steps_per_epoch=ds_train.get_dataset_size(),
                   lr_adjust_epoch=cfg.lr_adjust_epoch))
    else: