Beispiel #1
0
 def get_all_places(self):
     p = [fluid.CPUPlace()]
     if fluid.is_compiled_with_cuda():
         p.append(fluid.CUDAPlace(0))
     return p
Beispiel #2
0
 def setUp(self):
     self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
     ) else fluid.CPUPlace()
     self.len = 100
     self._init_dyfunc()
Beispiel #3
0
 def test_case(self):
     self._test_case(fluid.CPUPlace())
     if fluid.is_compiled_with_cuda():
         self._test_case(fluid.CUDAPlace(0))
Beispiel #4
0
 def setUp(self):
     self.input = np.array([3]).astype('int32')
     self.place = paddle.CUDAPlace(
         0) if fluid.is_compiled_with_cuda() else paddle.CPUPlace()
     self._set_test_func()
Beispiel #5
0
 def setUp(self):
     self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
     ) else fluid.CPUPlace()
     self.x = np.zeros(shape=(1), dtype=np.int32)
     self._init_dyfunc()
Beispiel #6
0
 def places(self):
     if fluid.is_compiled_with_cuda():
         return [fluid.CPUPlace(), fluid.CUDAPlace(0)]
     else:
         return [fluid.CPUPlace()]
Beispiel #7
0
 def test_cases(self):
     for approximate in [True, False]:
         self._test_case1_cpu(approximate)
         if fluid.is_compiled_with_cuda():
             self._test_case1_gpu(approximate)
Beispiel #8
0
    def test_accuracy(self):
        image = fluid.layers.data(name='image',
                                  shape=[1, 28, 28],
                                  dtype='float32')
        label = fluid.layers.data(name='label', shape=[1], dtype='int64')
        model = MobileNet()
        out = model.net(input=image, class_dim=10)
        cost = fluid.layers.cross_entropy(input=out, label=label)
        avg_cost = fluid.layers.mean(x=cost)
        acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
        acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
        optimizer = fluid.optimizer.Momentum(
            momentum=0.9,
            learning_rate=0.01,
            regularization=fluid.regularizer.L2Decay(4e-5))
        optimizer.minimize(avg_cost)
        main_prog = fluid.default_main_program()
        val_prog = main_prog.clone(for_test=True)

        place = fluid.CUDAPlace(
            0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        feeder = fluid.DataFeeder([image, label], place, program=main_prog)
        train_reader = paddle.fluid.io.batch(paddle.dataset.mnist.train(),
                                             batch_size=64)
        eval_reader = paddle.fluid.io.batch(paddle.dataset.mnist.test(),
                                            batch_size=64)

        def train(program):
            iter = 0
            for data in train_reader():
                cost, top1, top5 = exe.run(
                    program,
                    feed=feeder.feed(data),
                    fetch_list=[avg_cost, acc_top1, acc_top5])
                iter += 1
                if iter % 100 == 0:
                    print(
                        'train iter={}, avg loss {}, acc_top1 {}, acc_top5 {}'.
                        format(iter, cost, top1, top5))

        def test(program, outputs=[avg_cost, acc_top1, acc_top5]):
            iter = 0
            result = [[], [], []]
            for data in train_reader():
                cost, top1, top5 = exe.run(program,
                                           feed=feeder.feed(data),
                                           fetch_list=outputs)
                iter += 1
                if iter % 100 == 0:
                    print(
                        'eval iter={}, avg loss {}, acc_top1 {}, acc_top5 {}'.
                        format(iter, cost, top1, top5))
                result[0].append(cost)
                result[1].append(top1)
                result[2].append(top5)
            print(' avg loss {}, acc_top1 {}, acc_top5 {}'.format(
                np.mean(result[0]), np.mean(result[1]), np.mean(result[2])))
            return np.mean(result[1]), np.mean(result[2])

        train(main_prog)
        top1_1, top5_1 = test(val_prog)
        fluid.io.save_inference_model(
            dirname='./test_quant_post',
            feeded_var_names=[image.name, label.name],
            target_vars=[avg_cost, acc_top1, acc_top5],
            main_program=val_prog,
            executor=exe,
            model_filename='model',
            params_filename='params')

        quant_post_static(exe,
                          './test_quant_post',
                          './test_quant_post_inference',
                          sample_generator=paddle.dataset.mnist.test(),
                          model_filename='model',
                          params_filename='params',
                          batch_nums=10)
        quant_post_prog, feed_target_names, fetch_targets = fluid.io.load_inference_model(
            dirname='./test_quant_post_inference',
            executor=exe,
            model_filename='__model__',
            params_filename='__params__')
        top1_2, top5_2 = test(quant_post_prog, fetch_targets)
        print("before quantization: top1: {}, top5: {}".format(top1_1, top5_1))
        print("after quantization: top1: {}, top5: {}".format(top1_2, top5_2))
Beispiel #9
0
 def test_cases(self):
     self._test_case1_cpu()
     if fluid.is_compiled_with_cuda():
         self._test_case1_gpu()
Beispiel #10
0
 def setUp(self):
     self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
     ) else fluid.CPUPlace()
     self.set_input()
     self.set_test_func()
Beispiel #11
0
 def setUp(self):
     self.input = np.zeros((1)).astype('int32')
     self.place = fluid.CUDAPlace(
         0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
     self.init_dygraph_func()
Beispiel #12
0
    def test_accuracy(self):
        image = fluid.layers.data(name='image',
                                  shape=[1, 28, 28],
                                  dtype='float32')
        label = fluid.layers.data(name='label', shape=[1], dtype='int64')
        model = MobileNet()
        out = model.net(input=image, class_dim=10)
        cost = fluid.layers.cross_entropy(input=out, label=label)
        avg_cost = fluid.layers.mean(x=cost)
        acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
        acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
        optimizer = fluid.optimizer.Momentum(
            momentum=0.9,
            learning_rate=0.01,
            regularization=fluid.regularizer.L2Decay(4e-5))
        optimizer.minimize(avg_cost)
        main_prog = fluid.default_main_program()
        val_prog = main_prog.clone(for_test=True)

        place = fluid.CUDAPlace(
            0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        feeder = fluid.DataFeeder([image, label], place, program=main_prog)
        train_reader = paddle.batch(paddle.dataset.mnist.train(),
                                    batch_size=64)
        eval_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=64)

        def train(program):
            iter = 0
            for data in train_reader():
                cost, top1, top5 = exe.run(
                    program,
                    feed=feeder.feed(data),
                    fetch_list=[avg_cost, acc_top1, acc_top5])
                iter += 1
                if iter % 100 == 0:
                    print(
                        'train iter={}, avg loss {}, acc_top1 {}, acc_top5 {}'.
                        format(iter, cost, top1, top5))

        def test(program):
            iter = 0
            result = [[], [], []]
            for data in eval_reader():
                cost, top1, top5 = exe.run(
                    program,
                    feed=feeder.feed(data),
                    fetch_list=[avg_cost, acc_top1, acc_top5])
                iter += 1
                if iter % 100 == 0:
                    print(
                        'eval iter={}, avg loss {}, acc_top1 {}, acc_top5 {}'.
                        format(iter, cost, top1, top5))
                result[0].append(cost)
                result[1].append(top1)
                result[2].append(top5)
            print(' avg loss {}, acc_top1 {}, acc_top5 {}'.format(
                np.mean(result[0]), np.mean(result[1]), np.mean(result[2])))
            return np.mean(result[1]), np.mean(result[2])

        train(main_prog)
        top1_1, top5_1 = test(main_prog)

        config = {
            'weight_quantize_type': 'channel_wise_abs_max',
            'activation_quantize_type': 'moving_average_abs_max',
            'quantize_op_types': ['depthwise_conv2d', 'mul', 'conv2d'],
        }
        quant_train_prog = quant_aware(main_prog,
                                       place,
                                       config,
                                       for_test=False)
        quant_eval_prog = quant_aware(val_prog, place, config, for_test=True)
        train(quant_train_prog)
        quant_eval_prog, int8_prog = convert(quant_eval_prog,
                                             place,
                                             config,
                                             save_int8=True)
        top1_2, top5_2 = test(quant_eval_prog)
        # values before quantization and after quantization should be close
        print("before quantization: top1: {}, top5: {}".format(top1_1, top5_1))
        print("after quantization: top1: {}, top5: {}".format(top1_2, top5_2))
 def __init__(self, loader, loader_ref=None, latent_dim=16, mode=''):
     self.loader = loader # 这玩意儿是个生成器或者迭代器应该都行, 我这里是生成器
     self.loader_ref = loader_ref
     self.latent_dim = latent_dim
     self.device_bool = fluid.is_compiled_with_cuda() # 目前版本的`paddle`用不着
     self.mode = mode
import paddle
import paddle.fluid as fluid
import paddleslim as slim
import numpy as np
import paddle.dataset.mnist as reader
import paddleslim.quant as quant

use_gpu = fluid.is_compiled_with_cuda()
exe, train_program, val_program, inputs, outputs = slim.models.image_classification(
    "MobileNet", [1, 28, 28], 10, use_gpu=use_gpu)
place = fluid.CUDAPlace(
    0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()

train_reader = paddle.batch(reader.train(), batch_size=128, drop_last=True)
test_reader = paddle.batch(reader.test(), batch_size=128, drop_last=True)
data_feeder = fluid.DataFeeder(inputs, place)


def train(prog):
    iter = 0
    loss_list = []
    for data in train_reader():
        acc1, acc5, loss = exe.run(prog,
                                   feed=data_feeder.feed(data),
                                   fetch_list=outputs)
        loss_list.append(loss)
        if iter % 100 == 0:
            print('train iter={}, top1={}, top5={}, loss={}'.format(
                iter, acc1.mean(), acc5.mean(), loss.mean()))
        iter += 1
    avg_los = np.mean(loss_list)
Beispiel #15
0
import paddle.fluid as fluid
from paddle.fluid.dygraph import declarative, ProgramTranslator
from paddle.fluid.dygraph.nn import BatchNorm, Conv2D, Linear, Pool2D
from paddle.fluid.dygraph.io import VARIABLE_FILENAME

from predictor_utils import PredictorTools

SEED = 2020
IMAGENET1000 = 1281167
base_lr = 0.001
momentum_rate = 0.9
l2_decay = 1e-4
# NOTE: Reduce batch_size from 8 to 2 to avoid unittest timeout.
batch_size = 2
epoch_num = 1
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \
    else fluid.CPUPlace()
MODEL_SAVE_PATH = "./resnet.inference.model"
DY_STATE_DICT_SAVE_PATH = "./resnet.dygraph"
program_translator = ProgramTranslator()

if fluid.is_compiled_with_cuda():
    fluid.set_flags({'FLAGS_cudnn_deterministic': True})


def optimizer_setting(parameter_list=None):
    optimizer = fluid.optimizer.Momentum(
        learning_rate=base_lr,
        momentum=momentum_rate,
        regularization=fluid.regularizer.L2Decay(l2_decay),
        parameter_list=parameter_list)
Beispiel #16
0
    10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373,
    326
]
# anchor mask of each yolo layer
cfg.anchor_masks = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
# IoU threshold to ignore objectness loss of pred box
cfg.ignore_thresh = .7
#
# SOLVER options
#
# batch size
cfg.batch_size = 1 if sys.platform == 'darwin' or os.name == 'nt' else 4
# derived learning rate the to get the final learning rate.
cfg.learning_rate = 0.001
# maximum number of iterations
cfg.max_iter = 20 if fluid.is_compiled_with_cuda() else 2
# Disable mixup in last N iter
cfg.no_mixup_iter = 10 if fluid.is_compiled_with_cuda() else 1
# warm up to learning rate
cfg.warm_up_iter = 10 if fluid.is_compiled_with_cuda() else 1
cfg.warm_up_factor = 0.
# lr steps_with_decay
cfg.lr_steps = [400000, 450000]
cfg.lr_gamma = 0.1
# L2 regularization hyperparameter
cfg.weight_decay = 0.0005
# momentum with SGD
cfg.momentum = 0.9
#
# ENV options
#
 def setUp(self):
     self.input = numpy.ones(5).astype("int32")
     self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
     ) else fluid.CPUPlace()
     self.init_test_func()
Beispiel #18
0
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import numpy as np
import paddle.fluid as fluid
import unittest

from paddle.fluid.dygraph.jit import dygraph_to_static_func

from ifelse_simple_func import *

np.random.seed(1)

if fluid.is_compiled_with_cuda():
    place = fluid.CUDAPlace(0)
else:
    place = fluid.CPUPlace()


class TestDygraphIfElse(unittest.TestCase):
    """
    TestCase for the transformation from control flow `if/else`
    dependent on tensor in Dygraph into Static `fluid.layers.cond`.
    """
    def setUp(self):
        self.x = np.random.random([10, 16]).astype('float32')
        self.dyfunc = dyfunc_with_if_else

    def _run_static(self):
Beispiel #19
0

def dynamic_evaluate(model, dataloader):
    with fluid.dygraph.no_grad():
        model.eval()
        cnt = 0
        for inputs, labels in dataloader:
            outputs = model(inputs)

            cnt += (np.argmax(outputs.numpy(), -1)[:, np.newaxis] ==
                    labels.numpy()).astype('int').sum()

    return cnt / len(dataloader.dataset)


@unittest.skipIf(not fluid.is_compiled_with_cuda(),
                 'CPU testing is not supported')
class TestModel(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        if not fluid.is_compiled_with_cuda():
            self.skipTest('module not tested when ONLY_CPU compling')
        cls.device = paddle.set_device('gpu')
        fluid.enable_dygraph(cls.device)

        sp_num = 1280
        cls.train_dataset = MnistDataset(mode='train', sample_num=sp_num)
        cls.val_dataset = MnistDataset(mode='test', sample_num=sp_num)
        cls.test_dataset = MnistDataset(
            mode='test', return_label=False, sample_num=sp_num)
Beispiel #20
0
    def setUp(self):
        self.place = fluid.CUDAPlace(
            0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()

        self.init_data()
        self.init_dygraph_func()
Beispiel #21
0
import tempfile
import unittest
import numpy as np

import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX

from bert_dygraph_model import PretrainModelLayer
from bert_utils import get_bert_config, get_feed_data_reader

from predictor_utils import PredictorTools

program_translator = ProgramTranslator()
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace(
)
SEED = 2020
STEP_NUM = 10
PRINT_STEP = 2


class TestBert(unittest.TestCase):
    def setUp(self):
        self.bert_config = get_bert_config()
        self.data_reader = get_feed_data_reader(self.bert_config)
        self.temp_dir = tempfile.TemporaryDirectory()
        self.model_save_dir = os.path.join(self.temp_dir.name, 'inference')
        self.model_save_prefix = os.path.join(self.model_save_dir, 'bert')
        self.model_filename = 'bert' + INFER_MODEL_SUFFIX
        self.params_filename = 'bert' + INFER_PARAMS_SUFFIX
 def setUp(self):
     self.input = np.random.random([10, 16]).astype('float32')
     self.place = fluid.CUDAPlace(
         0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
     self.init_test_func()