Пример #1
0
 def part_ps_impl(self, dataset):
     net = Menet(self.in_channels, self.out_channels, self.kernel_size, self.vocab_size,
                 self.embedding_size, self.output_channels, self.target, self.sparse)
     net.embedding_lookup.set_param_ps()
     net.conv.conv2d.add_prim_attr('primitive_target', 'CPU')
     net.conv.bias_add.add_prim_attr('primitive_target', 'CPU')
     net.set_train()
     loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
     opt = Adam(params=filter(lambda x: x.requires_grad, net.get_parameters()))
     opt.target = 'CPU'
     model = Model(net, loss, opt)
     model.train(self.epoch_size, dataset, dataset_sink_mode=False)
     input_me = Tensor(self.input_np)
     out_me = model.predict(input_me)
     return out_me.asnumpy()
Пример #2
0
def run(n_iter=1000):
    # Simulation init
    apply = Apply()
    model = Model(apply)
    R = apply.random((1, N, 2))

    # Start simulation
    times = []
    for i in range(n_iter):
        time_start = time.perf_counter_ns()
        R = model.predict(R, Tensor(0))
        time_end = time.perf_counter_ns()
        times.append(time_end - time_start)

    # Finish with profiling times
    return times
Пример #3
0
def infer(data_dir):
    ds = create_dataset(data_dir, training=False).create_dict_iterator()
    data = ds.get_next()
    images = data['image']
    labels = data['label']
    net = LeNet5()
    load_checkpoint(CKPT_2, net=net)
    model = Model(net)
    output = model.predict(Tensor(data['image']))
    preds = np.argmax(output.asnumpy(), axis=1)

    for i in range(1, 5):
        plt.subplot(2, 2, i)
        plt.imshow(np.squeeze(images[i]))
        color = 'blue' if preds[i] == labels[i] else 'red'
        plt.title("prediction: {}, truth: {}".format(preds[i], labels[i]),
                  color=color)
        plt.xticks([])
    plt.show()
Пример #4
0
loss_cb = LossMonitor(per_print_times=int(cfg.train_size / cfg.batch_size))
config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps,
                             keep_checkpoint_max=cfg.keep_checkpoint_max)
ckpoint_cb = ModelCheckpoint(prefix=cfg.output_prefix,
                             directory=cfg.output_directory,
                             config=config_ck)
print("============== Starting Training ==============")
model.train(cfg.epoch_size,
            ds_train,
            callbacks=[ckpoint_cb, loss_cb],
            dataset_sink_mode=True)

# 使用测试集评估模型,打印总体准确率
metric = model.eval(ds_test)
print(metric)

# 预测
test_ = ds_test.create_dict_iterator().get_next()
test = Tensor(test_['x'], mindspore.float32)
predictions = model.predict(test)
softmax = nn.Softmax()
predictions = softmax(predictions)
predictions = predictions.asnumpy()
for i in range(15):
    p_np = predictions[i, :]
    p_list = p_np.tolist()
    print('第' + str(i) + '个sample预测结果:', p_list.index(max(p_list)), '   真实结果:',
          test_['y'][i])

moxing.file.copy_parallel(src_url='model_fashion', dst_url=args.train_url)