示例#1
0
    def test_input_without_name(self):
        net = MyModel(classifier_activation=None)

        inputs = [InputSpec([None, 10], 'float32')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        with self.assertRaises(ValueError):
            model = Model(net, inputs, labels)
    def test_visualdl_callback(self):
        # visualdl not support python2
        if sys.version_info < (3, ):
            return

        inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
        labels = [InputSpec([None, 1], 'int64', 'label')]

        transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
        train_dataset = MnistDataset(mode='train', transform=transform)
        eval_dataset = MnistDataset(mode='test', transform=transform)

        net = paddle.vision.models.LeNet()
        model = paddle.Model(net, inputs, labels)

        optim = paddle.optimizer.Adam(0.001, parameters=net.parameters())
        model.prepare(optimizer=optim,
                      loss=paddle.nn.CrossEntropyLoss(),
                      metrics=paddle.metric.Accuracy())

        callback = paddle.callbacks.VisualDL(log_dir='visualdl_log_dir')
        model.fit(train_dataset,
                  eval_dataset,
                  batch_size=64,
                  callbacks=callback)
示例#3
0
def dygraph_to_static(model, save_dir, cfg):
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    image_shape = None
    if 'inputs_def' in cfg['TestReader']:
        inputs_def = cfg['TestReader']['inputs_def']
        image_shape = inputs_def.get('image_shape', None)
    if image_shape is None:
        image_shape = [3, None, None]
    # Save infer cfg
    dump_infer_config(cfg, os.path.join(save_dir, 'infer_cfg.yml'),
                      image_shape, model)

    input_spec = [{
        "image":
        InputSpec(shape=[None] + image_shape, name='image'),
        "im_shape":
        InputSpec(shape=[None, 2], name='im_shape'),
        "scale_factor":
        InputSpec(shape=[None, 2], name='scale_factor')
    }]

    export_model = to_static(model, input_spec=input_spec)
    # save Model
    paddle.jit.save(export_model, os.path.join(save_dir, 'model'))
示例#4
0
    def test_accumulate(self, ):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)
        label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)
        net = MyModel()
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        inputs = [InputSpec([None, dim], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]

        for amp_cfg in [None, 'O1']:
            model = Model(net, inputs, labels)
            model.prepare(
                optim,
                loss=CrossEntropyLoss(reduction="sum"),
                amp_configs=amp_cfg)
            losses, grads = [], []
            for stat in [False, False, True]:
                loss, = model.train_batch([data], [label], update=stat)
                losses.append(loss)
                grads.append([p.grad.numpy() for p in net.parameters()])

            for grad1, grad2, grad3 in zip(*grads):
                np.testing.assert_almost_equal(grad1 * 2, grad2, decimal=4)
                np.testing.assert_almost_equal(
                    grad3, np.zeros_like(grad3), decimal=4)

            np.testing.assert_almost_equal(losses[0], losses[1], decimal=4)
            np.testing.assert_almost_equal(losses[0], losses[2], decimal=4)
示例#5
0
    def test_static_save_dynamic_load(self):
        path = os.path.join(tempfile.mkdtemp(),
                            '.cache_test_static_save_dynamic_load')
        if not os.path.exists(path):
            os.makedirs(path)
        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(path)

        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)  #if dynamic else None

        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.load(path)
        shutil.rmtree(path)
        fluid.disable_dygraph()
示例#6
0
    def test_static_save_dynamic_load(self):
        path = tempfile.mkdtemp()

        net = MyModel(classifier_activation=None)
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(path + '/test')

        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)  #if dynamic else None

        net = MyModel(classifier_activation=None)
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.load(path + '/test')
        shutil.rmtree(path)
        fluid.disable_dygraph()
示例#7
0
    def export(self, output_dir='output_inference'):
        self.model.eval()
        model_name = os.path.splitext(os.path.split(self.cfg.filename)[-1])[0]
        save_dir = os.path.join(output_dir, model_name)
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        image_shape = None
        if 'inputs_def' in self.cfg['TestReader']:
            inputs_def = self.cfg['TestReader']['inputs_def']
            image_shape = inputs_def.get('image_shape', None)
        if image_shape is None:
            image_shape = [3, None, None]

        # Save infer cfg
        _dump_infer_config(self.cfg,
                           os.path.join(save_dir, 'infer_cfg.yml'), image_shape,
                           self.model)

        input_spec = [{
            "image": InputSpec(
                shape=[None] + image_shape, name='image'),
            "im_shape": InputSpec(
                shape=[None, 2], name='im_shape'),
            "scale_factor": InputSpec(
                shape=[None, 2], name='scale_factor')
        }]

        # dy2st and save model
        static_model = paddle.jit.to_static(self.model, input_spec=input_spec)
        paddle.jit.save(static_model, os.path.join(save_dir, 'model'))
        logger.info("Export model and saved in {}".format(save_dir))
示例#8
0
    def test_concrete_program(self):
        with fluid.dygraph.guard(fluid.CPUPlace()):
            x = to_variable(np.ones([4, 10]).astype('float32'))
            y = to_variable(np.ones([4, 10]).astype('float32') * 2)
            int_val = 4.

            net = SimpleNet()
            # We can get concrete_program by specificing InputSpec information. Faking input is no need.
            net.add_func = declarative(net.add_func,
                                       input_spec=[
                                           InputSpec([-1, 10]),
                                           InputSpec([-1, 10], name='y')
                                       ])
            cp1 = net.add_func.concrete_program
            self.assertTrue(cp1.inputs[-1].shape == (-1, 10))
            self.assertTrue(cp1.inputs[-1].name == 'y')

            # generate another program
            net.add_func = declarative(
                net.add_func,
                input_spec=[InputSpec([10]),
                            InputSpec([10], name='label')])
            cp2 = net.add_func.concrete_program
            self.assertTrue(cp2.inputs[-1].shape == (10, ))
            self.assertTrue(cp2.inputs[-1].name == 'label')
            # Note(Aurelius84): New instance will be returned if we use `declarative(foo)` every time.
            # So number of cache program is 1.
            self.assertTrue(len(net.add_func.program_cache) == 1)
            self.assertTrue(cp1 != cp2)
示例#9
0
    class ExportModel(nn.Layer):
        def __init__(self, model):
            super(ExportModel, self).__init__()
            self.model = model

        @to_static(input_spec=[
            {
                'image': InputSpec(
                    shape=[None] + image_shape, name='image')
            },
            {
                'im_shape': InputSpec(
                    shape=[None, 2], name='im_shape')
            },
            {
                'scale_factor': InputSpec(
                    shape=[None, 2], name='scale_factor')
            },
        ])
        def forward(self, image, im_shape, scale_factor):
            inputs = {}
            inputs_tensor = [image, im_shape, scale_factor]
            for t in inputs_tensor:
                inputs.update(t)
            outs = self.model.get_export_model(inputs)
            return outs
    def test_jit_save_compatible_input_sepc(self):
        layer = InputSepcLayer()
        save_dir = "jit_save_compatible_input_spec"
        path = save_dir + "/model"

        paddle.jit.save(layer=layer, path=path)
        no_input_spec_layer = paddle.jit.load(path)
        self._assert_input_spec_layer_return(layer, no_input_spec_layer)
        shutil.rmtree(save_dir)

        paddle.jit.save(layer=layer,
                        path=path,
                        input_spec=[
                            InputSpec(shape=[None, 8],
                                      dtype='float32',
                                      name='x'),
                            InputSpec(shape=[None, 1],
                                      dtype='float64',
                                      name='y')
                        ])
        same_input_spec_layer = paddle.jit.load(path)
        self._assert_input_spec_layer_return(layer, same_input_spec_layer)
        shutil.rmtree(save_dir)

        paddle.jit.save(layer=layer,
                        path=path,
                        input_spec=[
                            InputSpec(shape=[8, 8], dtype='float32'),
                            InputSpec(shape=[8, -1], dtype='float64')
                        ])
        compatible_input_spec_layer = paddle.jit.load(path)
        self._assert_input_spec_layer_return(layer,
                                             compatible_input_spec_layer)
        shutil.rmtree(save_dir)
示例#11
0
    def test_dynamic_load(self):
        mnist_data = MnistDataset(mode='train')

        path = os.path.join(tempfile.mkdtemp(), '.cache_dynamic_load')
        if not os.path.exists(path):
            os.makedirs(path)

        for new_optimizer in [True, False]:
            paddle.disable_static()
            net = LeNet()
            inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
            labels = [InputSpec([None, 1], 'int64', 'label')]
            if new_optimizer:
                optim = paddle.optimizer.Adam(
                    learning_rate=0.001, parameters=net.parameters())
            else:
                optim = fluid.optimizer.Adam(
                    learning_rate=0.001, parameter_list=net.parameters())
            model = Model(net, inputs, labels)
            model.prepare(
                optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
            model.fit(mnist_data, batch_size=64, verbose=0)
            model.save(path)
            model.load(path)
            paddle.enable_static()
        shutil.rmtree(path)
示例#12
0
def infer(args):
    paddle.set_device(args.device)

    # create dataset.
    infer_dataset = LacDataset(args.data_dir, mode='infer')

    batchify_fn = lambda samples, fn=Tuple(
        Pad(axis=0, pad_val=0, dtype='int64'),  # word_ids
        Stack(dtype='int64'),  # length
    ): fn(samples)

    # Create sampler for dataloader
    infer_sampler = paddle.io.BatchSampler(
        dataset=infer_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        drop_last=False)
    infer_loader = paddle.io.DataLoader(
        dataset=infer_dataset,
        batch_sampler=infer_sampler,
        return_list=True,
        collate_fn=batchify_fn)

    # Define the model network
    network = BiGruCrf(args.emb_dim, args.hidden_size, infer_dataset.vocab_size,
                       infer_dataset.num_labels)
    inputs = InputSpec(shape=(-1, ), dtype="int64", name='inputs')
    lengths = InputSpec(shape=(-1, ), dtype="int64", name='lengths')
    model = paddle.Model(network, inputs=[inputs, lengths])
    model.prepare()

    # Load the model and start predicting
    model.load(args.init_checkpoint)
    emissions, lengths, crf_decodes = model.predict(
        test_data=infer_loader, batch_size=args.batch_size)

    # Post-processing the lexical analysis results
    lengths = np.array([l for lens in lengths for l in lens]).reshape([-1])
    preds = np.array(
        [pred for batch_pred in crf_decodes for pred in batch_pred])

    results = parse_lac_result(infer_dataset.word_ids, preds, lengths,
                               infer_dataset.word_vocab,
                               infer_dataset.label_vocab)

    sent_tags = []
    for sent, tags in results:
        sent_tag = ['(%s, %s)' % (ch, tag) for ch, tag in zip(sent, tags)]
        sent_tags.append(''.join(sent_tag))

    file_path = "results.txt"
    with open(file_path, "w", encoding="utf8") as fout:
        fout.write("\n".join(sent_tags))

    # Print some examples
    print(
        "The results have been saved in the file: %s, some examples are shown below: "
        % file_path)
    print("\n".join(sent_tags[:10]))
示例#13
0
    def export(self, output_dir='output_inference'):
        self.model.eval()
        model_name = os.path.splitext(os.path.split(self.cfg.filename)[-1])[0]
        save_dir = os.path.join(output_dir, model_name)
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        image_shape = None
        if self.cfg.architecture in MOT_ARCH:
            test_reader_name = 'TestMOTReader'
        else:
            test_reader_name = 'TestReader'
        if 'inputs_def' in self.cfg[test_reader_name]:
            inputs_def = self.cfg[test_reader_name]['inputs_def']
            image_shape = inputs_def.get('image_shape', None)
        # set image_shape=[3, -1, -1] as default
        if image_shape is None:
            image_shape = [3, -1, -1]

        self.model.eval()
        if hasattr(self.model, 'deploy'): self.model.deploy = True

        # Save infer cfg
        _dump_infer_config(self.cfg,
                           os.path.join(save_dir, 'infer_cfg.yml'), image_shape,
                           self.model)

        input_spec = [{
            "image": InputSpec(
                shape=[None] + image_shape, name='image'),
            "im_shape": InputSpec(
                shape=[None, 2], name='im_shape'),
            "scale_factor": InputSpec(
                shape=[None, 2], name='scale_factor')
        }]
        if self.cfg.architecture == 'DeepSORT':
            input_spec[0].update({
                "crops": InputSpec(
                    shape=[None, 3, 192, 64], name='crops')
            })

        static_model = paddle.jit.to_static(self.model, input_spec=input_spec)
        # NOTE: dy2st do not pruned program, but jit.save will prune program
        # input spec, prune input spec here and save with pruned input spec
        pruned_input_spec = self._prune_input_spec(
            input_spec, static_model.forward.main_program,
            static_model.forward.outputs)

        # dy2st and save model
        if 'slim' not in self.cfg or self.cfg['slim_type'] != 'QAT':
            paddle.jit.save(
                static_model,
                os.path.join(save_dir, 'model'),
                input_spec=pruned_input_spec)
        else:
            self.cfg.slim.save_quantized_model(
                self.model,
                os.path.join(save_dir, 'model'),
                input_spec=pruned_input_spec)
        logger.info("Export model and saved in {}".format(save_dir))
    def test_verify_input_spec(self):
        a_spec = InputSpec([None, 10], name='a')
        b_spec = InputSpec([10], name='b')

        # type(input_spec) should be list or tuple
        with self.assertRaises(TypeError):
            foo_spec = FunctionSpec(foo_func, input_spec=a_spec)

        foo_spec = FunctionSpec(foo_func, input_spec=[a_spec, b_spec])
        self.assertTrue(len(foo_spec.flat_input_spec) == 2)
class InputSepcLayer(paddle.nn.Layer):
    '''
    A layer with InputSpec to test InputSpec compatibility
    '''
    @paddle.jit.to_static(input_spec=[
        InputSpec(shape=[None, 8], dtype='float32', name='x'),
        InputSpec(shape=[None, 1], dtype='float64', name='y')
    ])
    def forward(self, x, y):
        return x, y
示例#16
0
    def test_from_tensor(self):
        x_bool = fluid.layers.fill_constant(shape=[1],
                                            dtype='bool',
                                            value=True)
        bool_spec = InputSpec.from_tensor(x_bool)
        self.assertEqual(bool_spec.dtype, x_bool.dtype)
        self.assertEqual(list(bool_spec.shape), list(x_bool.shape))
        self.assertEqual(bool_spec.name, x_bool.name)

        bool_spec2 = InputSpec.from_tensor(x_bool, name='bool_spec')
        self.assertEqual(bool_spec2.name, bool_spec2.name)
示例#17
0
 def get_model(self, amp_config):
     net = LeNet()
     inputs = InputSpec([None, 1, 28, 28], "float32", 'x')
     labels = InputSpec([None, 1], "int64", "y")
     model = Model(net, inputs, labels)
     optim = paddle.optimizer.Adam(learning_rate=0.001,
                                   parameters=model.parameters())
     model.prepare(optimizer=optim,
                   loss=CrossEntropyLoss(reduction="sum"),
                   amp_configs=amp_config)
     return model
class LinerNetWithUselessInput(paddle.nn.Layer):
    def __init__(self, in_size, out_size):
        super(LinerNetWithUselessInput, self).__init__()
        self._linear = Linear(in_size, out_size)

    @declarative(input_spec=[
        InputSpec(shape=[None, 784], dtype='float32', name="image"),
        InputSpec(shape=[None, 1], dtype='int64', name="label")
    ])
    def forward(self, x, label):
        out = self._linear(x)
        return out
示例#19
0
    def test_input_spec(self):
        net = SimpleNet()
        net = declarative(net, input_spec=[InputSpec([None, 8, 10])])
        self.assertTrue(len(net.forward.inputs) == 1)
        self.assertTrue(len(net.forward.program_cache) == 1)
        input_shape = net.forward.inputs[0].shape
        self.assertListEqual(list(input_shape), [-1, 8, 10])

        # redecorate
        net = declarative(net, input_spec=[InputSpec([None, 16, 10])])
        input_shape = net.forward.inputs[0].shape
        self.assertListEqual(list(input_shape), [-1, 16, 10])
示例#20
0
    def test_shape_raise_error(self):
        # 1. shape should only contain int and None.
        with self.assertRaises(ValueError):
            tensor_spec = InputSpec(['None', 4, None], dtype='int8')

        # 2. shape should be type `list` or `tuple`
        with self.assertRaises(TypeError):
            tensor_spec = InputSpec(4, dtype='int8')

        # 3. len(shape) should be greater than 0.
        with self.assertRaises(ValueError):
            tensor_spec = InputSpec([], dtype='int8')
示例#21
0
class TestLayer(paddle.nn.Layer):
    def __init__(self):
        super(TestLayer, self).__init__()

    @paddle.jit.to_static(input_spec=[
        InputSpec(shape=[1], dtype='float32', name='x'),
        InputSpec(shape=[1], dtype='float32', name='y'),
        InputSpec(shape=[1], dtype='float32', name='z')
    ])
    def forward(self, x, y, z):
        result = x + y
        return result
示例#22
0
    def test_fit_by_step(self):
        base_lr = 1e-3
        boundaries = [5, 8]

        def make_optimizer(parameters=None):
            momentum = 0.9
            weight_decay = 5e-4
            values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)]
            learning_rate = paddle.optimizer.lr.PiecewiseDecay(
                boundaries=boundaries, values=values)
            learning_rate = paddle.optimizer.lr.LinearWarmup(
                learning_rate=learning_rate,
                warmup_steps=4,
                start_lr=base_lr / 5.,
                end_lr=base_lr,
                verbose=True)
            optimizer = paddle.optimizer.Momentum(
                learning_rate=learning_rate,
                weight_decay=weight_decay,
                momentum=momentum,
                parameters=parameters)
            return optimizer

        # dynamic test
        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)
        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = make_optimizer(net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))

        dataset = MyDataset()
        model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0)

        np.testing.assert_allclose(model._optimizer._learning_rate.last_lr,
                                   base_lr * (0.1**len(boundaries)))
        # static test
        paddle.enable_static()

        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = make_optimizer(net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))

        dataset = MyDataset()
        model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0)

        np.testing.assert_allclose(model._optimizer._learning_rate.last_lr,
                                   base_lr * (0.1**len(boundaries)))
class LinearNetMultiInput1(fluid.dygraph.Layer):
    def __init__(self, in_size, out_size):
        super(LinearNetMultiInput1, self).__init__()
        self._linear1 = Linear(in_size, out_size)
        self._linear2 = Linear(in_size, out_size)

    @declarative(input_spec=(InputSpec([None, 8], dtype='float32'),
                             InputSpec([None, 8], dtype='float32')))
    def forward(self, x, y):
        x_out = self._linear1(x)
        y_out = self._linear2(y)
        loss = fluid.layers.mean(x_out + y_out)
        return x_out, y_out, loss
示例#24
0
    def test_from_numpy(self):
        x_numpy = np.ones([10, 12])
        x_np_spec = InputSpec.from_numpy(x_numpy)
        self.assertEqual(x_np_spec.dtype,
                         convert_np_dtype_to_dtype_(x_numpy.dtype))
        self.assertEqual(x_np_spec.shape, x_numpy.shape)
        self.assertEqual(x_np_spec.name, None)

        x_numpy2 = np.array([1, 2, 3, 4]).astype('int64')
        x_np_spec2 = InputSpec.from_numpy(x_numpy2, name='x_np_int64')
        self.assertEqual(x_np_spec2.dtype,
                         convert_np_dtype_to_dtype_(x_numpy2.dtype))
        self.assertEqual(x_np_spec2.shape, x_numpy2.shape)
        self.assertEqual(x_np_spec2.name, 'x_np_int64')
示例#25
0
class LinerNetWithLabel(paddle.nn.Layer):
    def __init__(self, in_size, out_size):
        super(LinerNetWithLabel, self).__init__()
        self._linear = Linear(in_size, out_size)

    @paddle.jit.to_static(input_spec=[
        InputSpec(shape=[None, 784], dtype='float32', name="image"),
        InputSpec(shape=[None, 1], dtype='int64', name="label")
    ])
    def forward(self, x, label):
        out = self._linear(x)
        loss = fluid.layers.cross_entropy(out, label)
        avg_loss = fluid.layers.mean(loss)
        return out
示例#26
0
    def test_eq_and_hash(self):
        tensor_spec_1 = InputSpec([10, 16], dtype='float32')
        tensor_spec_2 = InputSpec([10, 16], dtype='float32')
        tensor_spec_3 = InputSpec([10, 16], dtype='float32', name='x')
        tensor_spec_4 = InputSpec([16], dtype='float32', name='x')

        # override ``__eq__`` according to [shape, dtype, name]
        self.assertTrue(tensor_spec_1 == tensor_spec_2)
        self.assertTrue(tensor_spec_1 != tensor_spec_3)  # different name
        self.assertTrue(tensor_spec_3 != tensor_spec_4)  # different shape

        # override ``__hash__``  according to [shape, dtype]
        self.assertTrue(hash(tensor_spec_1) == hash(tensor_spec_2))
        self.assertTrue(hash(tensor_spec_1) == hash(tensor_spec_3))
        self.assertTrue(hash(tensor_spec_3) != hash(tensor_spec_4))
示例#27
0
class LinearNet(nn.Layer):
    def __init__(self):
        super(LinearNet, self).__init__()
        self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)

    @paddle.jit.to_static(
        input_spec=[InputSpec(shape=[None, 784], dtype='float32')])
    def forward(self, x):
        return self._linear(x)

    @paddle.jit.to_static(
        input_spec=[InputSpec(shape=[None, 784], dtype='float32')])
    def another_forward(self, x):
        tmp = self._linear(x)
        return self._linear(tmp)
def StartLearning():
    messagebox.showinfo("Message title", "已经成功运行请耐心等待")
    predict_dataset = predict.ZodiacDataset(mode='test')
    # print('测试数据集样本量:{}'.format(len(predict_dataset)))
    # 网络结构示例化
    network = paddle.vision.models.resnet50(num_classes=get('num_classes'))

    # 模型封装
    model_2 = paddle.Model(network,
                           inputs=[
                               InputSpec(shape=[-1] + get('image_shape'),
                                         dtype='float32',
                                         name='image')
                           ])

    # 训练好的模型加载
    model_2.load(get('model_save_dir'))

    # 模型配置
    model_2.prepare()

    # 执行预测
    result = model_2.predict(predict_dataset)
    #print(result)
    # 样本映射
    LABEL_MAP = get('LABEL_MAP')
    a = end.a()
    lbl1.configure(text="测试结果:" + a)
示例#29
0
    def infer(self, arch):
        path = tempfile.mkdtemp()
        x = np.array(np.random.random((2, 3, 224, 224)), dtype=np.float32)
        res = {}
        for dygraph in [True, False]:
            if not dygraph:
                paddle.enable_static()

            net = models.__dict__[arch](pretrained=True)
            inputs = [InputSpec([None, 3, 224, 224], 'float32', 'image')]
            model = paddle.Model(network=net, inputs=inputs)
            model.prepare()

            if dygraph:
                model.save(path)
                res['dygraph'] = model.predict_batch(x)
            else:
                model.load(path)
                res['static'] = model.predict_batch(x)

            if not dygraph:
                paddle.disable_static()

        shutil.rmtree(path)
        np.testing.assert_allclose(res['dygraph'], res['static'])
示例#30
0
    def test_test_batch(self):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
            m = MyModel()
            m.eval()
            output = m(to_tensor(data))
            fluid.disable_dygraph()
            return output.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
            device = paddle.set_device('cpu')
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()
            net = MyModel()
            inputs = [InputSpec([None, dim], 'float32', 'x')]
            model = Model(net, inputs)
            model.prepare()
            out, = model.predict_batch([data])

            np.testing.assert_allclose(out, ref, rtol=1e-6)
            fluid.disable_dygraph() if dynamic else None