Exemple #1
0
def vgg_bn_drop(input):
    def conv_block(ipt, num_filter, groups, dropouts, num_channels=None, param_attr=None):
        return paddle.networks.img_conv_group(
            input=ipt,
            num_channels=num_channels,
            pool_size=2,
            pool_stride=2,
            conv_num_filter=[num_filter] * groups,
            conv_filter_size=3,
            conv_act=paddle.activation.Relu(),
            conv_with_batchnorm=True,
            conv_batchnorm_drop_rate=dropouts,
            pool_type=paddle.pooling.Max(), param_attr = param_attr)
    pa = ParamAttr(update_hooks = Hook('pruning', sparsity_ratio = 0.609))
    conv1 = conv_block(input, 64, 2, [0.3, 0], 3, param_attr=pa)
    pa_conv = ParamAttr(update_hooks = Hook('pruning', sparsity_ratio = 0.813))
    conv2 = conv_block(conv1, 128, 2, [0.4, 0], param_attr = pa_conv)
    conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0], param_attr = pa_conv)
    conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0], param_attr = pa_conv)
    conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0], param_attr = pa_conv)

    pa_fc = ParamAttr(update_hooks = Hook('pruning', sparsity_ratio = 0.985))
    fc1 = paddle.layer.fc(input=conv5, size=512, act=paddle.activation.Linear(),
                            param_attr = pa_fc)
    bn = paddle.layer.batch_norm(
        input=fc1,
        act=paddle.activation.Relu())
    fc2 = paddle.layer.fc(input=bn, size=512, act=paddle.activation.Linear(),
                            param_attr = pa_fc)
    return fc2
 def create_conv(context_len, hidden_size, prefix):
     key = "%s_%d_%d" % (prefix, context_len, hidden_size)
     conv = paddle.networks.sequence_conv_pool(
         input=emb,
         context_len=context_len,
         hidden_size=hidden_size,
         context_proj_param_attr=ParamAttr(name=key + 'contex_proj.w'),
         fc_param_attr=ParamAttr(name=key + '_fc.w'),
         fc_bias_attr=ParamAttr(name=key + '_fc.b'),
         pool_bias_attr=ParamAttr(name=key + '_pool.b'))
     return conv
 def create_conv(context_len, hidden_size, prefix):
     key = "%s_%d_%d" % (prefix, context_len, hidden_size)
     conv = paddle.networks.sequence_conv_pool(
         input=input_layer,
         context_len=context_len,
         hidden_size=hidden_size,
         # set parameter attr for parameter sharing
         context_proj_param_attr=ParamAttr(name=key + "contex_proj.w"),
         fc_param_attr=ParamAttr(name=key + "_fc.w"),
         fc_bias_attr=ParamAttr(name=key + "_fc.b"),
         pool_bias_attr=ParamAttr(name=key + "_pool.b"))
     return conv
 def _create_dnn(self, input_layer):
     for id, dim in enumerate(self.fc_config):
         name = "fc_%d_%d" % (id, dim)
         logger.info("create fc_layer which dimention is %d" % dim)
         fc = paddle.layer.fc(input=input_layer,
                              size=dim,
                              act=paddle.activation.Tanh(),
                              param_attr=ParamAttr(name="%s.w" % name),
                              bias_attr=ParamAttr(name="%s.b" % name,
                                                  initial_std=0.))
         input_layer = fc
     return input_layer
Exemple #5
0
 def create_rnn(self, emb, prefix=""):
     """
     A GRU sentence vector learner.
     """
     gru = paddle.networks.simple_gru(
         input=emb,
         size=self.dnn_dims[1],
         mixed_param_attr=ParamAttr(name="%s_gru_mixed.w" % prefix),
         mixed_bias_param_attr=ParamAttr(name="%s_gru_mixed.b" % prefix),
         gru_param_attr=ParamAttr(name="%s_gru.w" % prefix),
         gru_bias_attr=ParamAttr(name="%s_gru.b" % prefix))
     sent_vec = paddle.layer.last_seq(gru)
     return sent_vec
Exemple #6
0
 def create_fc(self, emb, prefix=""):
     """
     A multi-layer fully connected neural networks.
     :param emb: The output of the embedding layer
     :type emb: paddle.layer
     :param prefix: A prefix will be added to the layers' names.
     :type prefix: str
     """
     _input_layer = paddle.layer.pooling(input=emb,
                                         pooling_type=paddle.pooling.Max())
     fc = paddle.layer.fc(input=_input_layer,
                          size=self.dnn_dims[1],
                          param_attr=ParamAttr(name="%s_fc.w" % prefix),
                          bias_attr=ParamAttr(name="%s_fc.b" % prefix,
                                              initial_std=0.))
     return fc
 def create_dnn(self, sent_vec, prefix):
     # if more than 3 layers, add a fc layer
     if len(self.dnn_dims) > 1:
         _input_layer = sent_vec
         for id, dim in enumerate(self.dnn_dims[1:]):
             name = "%s_fc_%d_%d" % (prefix, id, dim)
             logger.info("create fc layer [%s] which dim is %d" %
                         (name, dim))
             fc = paddle.layer.fc(name=name,
                                  input=_input_layer,
                                  size=dim,
                                  act=paddle.activation.Tanh(),
                                  param_attr=ParamAttr(name='%s.w' % name),
                                  bias_attr=ParamAttr(name='%s.b' % name))
             _input_layer = fc
     return _input_layer
Exemple #8
0
 def create_dnn(self, sent_vec, prefix):
     # if more than three layers, than a fc layer will be added.
     if len(self.dnn_dims) > 1:
         _input_layer = sent_vec
         for id, dim in enumerate(self.dnn_dims[1:]):
             name = "%s_fc_%d_%d" % (prefix, id, dim)
             logger.info("create fc layer [%s] which dimention is %d" %
                         (name, dim))
             fc = paddle.layer.fc(input=_input_layer,
                                  size=dim,
                                  act=paddle.activation.Tanh(),
                                  param_attr=ParamAttr(name="%s.w" % name),
                                  bias_attr=ParamAttr(name="%s.b" % name,
                                                      initial_std=0.))
             _input_layer = fc
     return _input_layer
 def create_embedding(self, input, prefix=""):
     logger.info("create embedding table [%s] which dim is %d" %
                 (prefix, self.dnn_dims[0]))
     emb = paddle.layer.embedding(input=input,
                                  size=self.dnn_dims[0],
                                  param_attr=ParamAttr(name='%s_emb.w' %
                                                       prefix))
     return emb
Exemple #10
0
def basicblock(input, ch_in, ch_out, stride):
    short = shortcut(input, ch_in, ch_out, stride)
    param_attr = ParamAttr(
        update_hooks=Hook('dynamic_pruning', sparsity_upper_bound=0.8))
    conv1 = conv_bn_layer(input, ch_out, 3, stride, 1, param_attr=param_attr)
    conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, paddle.activation.Linear())
    return paddle.layer.addto(input=[short, conv2],
                              act=paddle.activation.Relu())
Exemple #11
0
 def create_embedding(self, input, prefix=''):
     '''
     Create an embedding table whose name has a `prefix`.
     '''
     logger.info("create embedding table [%s] which dimention is %d" %
                 (prefix, self.dnn_dims[0]))
     emb = paddle.layer.embedding(
         input=input,
         size=self.dnn_dims[0],
         param_attr=ParamAttr(name='%s_emb.w' % prefix))
     return emb
Exemple #12
0
 def create_embedding(self, input, prefix=""):
     """
     Create word embedding. The `prefix` is added in front of the name of
     embedding"s learnable parameter.
     """
     logger.info("Create embedding table [%s] whose dimention is %d. " %
                 (prefix, self.dnn_dims[0]))
     emb = paddle.layer.embedding(input=input,
                                  size=self.dnn_dims[0],
                                  param_attr=ParamAttr(name="%s_emb.w" %
                                                       prefix))
     return emb
def create_embedding(input, emb_dim=256, prefix=""):
    """
    A word embedding vector layer
    :param input:
    :param emb_dim:
    :param prefix:
    :return:
    """
    logger.info("create embedding table [%s] which dim is %d" %
                (prefix, emb_dim))
    emb = paddle.layer.embedding(input=input,
                                 size=emb_dim,
                                 param_attr=ParamAttr(name='%s_emb.w' %
                                                      prefix))
    return emb
    def test_initializer(self):
        def initializer(name):
            assert name == "fc.w"
            mat = numpy.ones((3, 2), dtype=numpy.float32)
            mat[1, 1] = 2
            return mat

        x = layer.data(name="x", type=data_type.dense_vector(3))
        y = layer.fc(x,
                     size=2,
                     bias_attr=False,
                     param_attr=ParamAttr(
                         name="fc.w", initializer=initializer))
        params = parameters.create(y)
        val = params["fc.w"]
        assert val.shape == (3, 2)
        expected = numpy.array([[1, 1], [1, 2], [1, 1]], numpy.float32)
        assert numpy.logical_and.reduce(numpy.reshape(val == expected, 6))
def depthwise_separable(input, num_filters1, num_filters2, num_groups, stride):
    """
    """
    tmp = conv_bn_layer(input=input,
                        filter_size=3,
                        num_filters=num_filters1,
                        stride=stride,
                        padding=1,
                        num_groups=num_groups)

    pa0 = ParamAttr(
        update_hooks=Hook('dynamic_pruning', sparsity_upper_bound=0.75))
    tmp = conv_bn_layer(input=tmp,
                        filter_size=1,
                        num_filters=num_filters2,
                        stride=1,
                        padding=0,
                        param_attr=pa0)
    return tmp
Exemple #16
0
def main():
    datadim = 3 * 224 * 224
    classdim = 102

    # PaddlePaddle init
    paddle.init(use_gpu=True, trainer_count=1, gpu_id=2)

    momentum_optimizer = paddle.optimizer.Momentum(
        momentum=0.9,
        regularization=paddle.optimizer.L2Regularization(rate=0.0005 * BATCH),
        learning_rate=0.001 / BATCH,
        #learning_rate_decay_a=0.1,
        #learning_rate_decay_b=50000 * 50,
        learning_rate_schedule='constant')

    image = paddle.layer.data(name="image",
                              type=paddle.data_type.dense_vector(datadim))

    net = mobile_net(image)
    # option 2. vgg
    #net = vgg_bn_drop(image)

    out = paddle.layer.fc(
        input=net,
        size=classdim,
        act=paddle.activation.Softmax(),
        param_attr=ParamAttr(
            update_hooks=Hook('dynamic_pruning', sparsity_upper_bound=0.8)))
    '''
    out = paddle.layer.img_conv(
                         input=net,
                         filter_size=1,
                         num_filters=classdim,
                         stride=1,
                         act=paddle.activation.Linear())
    '''

    lbl = paddle.layer.data(name="label",
                            type=paddle.data_type.integer_value(classdim))
    cost = paddle.layer.classification_cost(input=out, label=lbl)

    # Create parameters
    parameters = paddle.parameters.create(cost)
    #with gzip.open('Paddle_mobilenet.tar.gz', 'r') as f:
    with gzip.open('mobilenet_flowers102.tar.gz', 'r') as f:
        fparameters = paddle.parameters.Parameters.from_tar(f)
    for param_name in fparameters.names():
        if param_name in parameters.names():
            parameters.set(param_name, fparameters.get(param_name))

    # End batch and end pass event handler
    def event_handler(event):
        if isinstance(event, paddle.event.EndIteration):
            if event.batch_id % 50 == 0:
                print "\nPass %d, Batch %d, Cost %f, %s" % (
                    event.pass_id, event.batch_id, event.cost, event.metrics)
            else:
                sys.stdout.write('.')
                sys.stdout.flush()
        if isinstance(event, paddle.event.EndPass):
            # save parameters
            with gzip.open(
                    'pruning_mobilenet_params_pass_%d.tar.gz' % event.pass_id,
                    'w') as f:
                parameters.to_tar(f)

            result = trainer.test(reader=paddle.batch(
                paddle.dataset.flowers.test(), batch_size=10),
                                  feeding={
                                      'image': 0,
                                      'label': 1
                                  })
            print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)

    # Create trainer
    trainer = paddle.trainer.SGD(cost=cost,
                                 parameters=parameters,
                                 update_equation=momentum_optimizer)
    trainer.train(reader=paddle.batch(paddle.reader.shuffle(
        paddle.dataset.flowers.train(), buf_size=50000),
                                      batch_size=BATCH),
                  num_passes=100,
                  event_handler=event_handler,
                  feeding={
                      'image': 0,
                      'label': 1
                  })
def main():
    datadim = 3 * 32 * 32
    classdim = 10

    # PaddlePaddle init
    paddle.init(use_gpu=False, trainer_count=1)

    image = paddle.layer.data(name="image",
                              type=paddle.data_type.dense_vector(datadim))

    # adapt the parameters of momentum_optimizer for your own model
    momentum_optimizer = paddle.optimizer.Momentum(
        momentum=0.9,
        regularization=paddle.optimizer.L2Regularization(rate=0.0002 * 128),
        learning_rate=0.005 / 128,
        learning_rate_schedule='constant')

    # Add neural network conf
    net = vgg_bn_drop(image)

    out = paddle.layer.fc(
        input=net,
        size=classdim,
        act=paddle.activation.Softmax(),
        param_attr=ParamAttr(
            update_hooks=Hook('pruning', sparsity_ratio=0.985)))

    lbl = paddle.layer.data(name="label",
                            type=paddle.data_type.integer_value(classdim))
    cost = paddle.layer.classification_cost(input=out, label=lbl)

    with gzip.open('params_120.tar.gz', 'r') as f:
        parameters = paddle.parameters.Parameters.from_tar(f)

    # Create parameters
    # parameters = paddle.parameters.create(cost)

    # Create optimizer

    # End batch and end pass event handler
    def event_handler(event):
        if isinstance(event, paddle.event.EndIteration):
            if event.batch_id % 100 == 0:
                print "\nPass %d, Batch %d, Cost %f, %s" % (
                    event.pass_id, event.batch_id, event.cost, event.metrics)
            else:
                sys.stdout.write('.')
                sys.stdout.flush()
        if isinstance(event, paddle.event.EndPass):
            # save parameters
            with gzip.open(
                    'static_pruning_params_pass_%d.tar.gz' % event.pass_id,
                    'w') as f:
                parameters.to_tar(f)

            result = trainer.test(reader=paddle.batch(
                paddle.dataset.cifar.test10(), batch_size=128),
                                  feeding={
                                      'image': 0,
                                      'label': 1
                                  })
            print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)

    # Create trainer
    trainer = paddle.trainer.SGD(cost=cost,
                                 parameters=parameters,
                                 update_equation=momentum_optimizer)
    trainer.train(
        reader=paddle.batch(
            #	paddle.reader.shuffle(
            #		        paddle.dataset.cifar.train10(), buf_size=50000),
            paddle.reader.buffered(paddle.dataset.cifar.train10(),
                                   size=100000),
            batch_size=128),
        num_passes=200,
        event_handler=event_handler,
        feeding={
            'image': 0,
            'label': 1
        })
Exemple #18
0
def main():
    datadim = 3 * 32 * 32
    classdim = 10

    # PaddlePaddle init
    paddle.init(use_gpu=True, trainer_count=1, gpu_id=1)
    momentum_optimizer = paddle.optimizer.Momentum(
	    momentum=0.9,
	    regularization=paddle.optimizer.L2Regularization(rate=0.0002 * 128),
	    learning_rate=0.001 / 128.0,
			learning_rate_decay_a=0.1,
					learning_rate_decay_b=50000 * 100,
							learning_rate_schedule='discexp')

    image = paddle.layer.data(
        name="image", type=paddle.data_type.dense_vector(datadim))

    # Add neural network config
    # option 1. resnet
    # net = resnet_cifar10(image, depth=32)
    # option 2. vgg
    #net = resnet_cifar10(image)
    net = vgg_bn_drop(image)


    out = paddle.layer.fc(
        input=net, size=classdim, act=paddle.activation.Softmax(),
		param_attr = ParamAttr(update_hooks=Hook('dynamic_pruning')))

    lbl = paddle.layer.data(
        name="label", type=paddle.data_type.integer_value(classdim))
    cost = paddle.layer.classification_cost(input=out, label=lbl)
    with gzip.open('params_120.tar.gz', 'r') as f:
        parameters = paddle.parameters.Parameters.from_tar(f)

    # Create parameters
    #parameters = paddle.parameters.create(cost)

    # Create optimizer

    # End batch and end pass event handler
    def event_handler(event):
        if isinstance(event, paddle.event.EndIteration):
            sys.stdout.write('.')
            sys.stdout.flush()
        if isinstance(event, paddle.event.EndPass):
            # save parameters
            with gzip.open('static_pruning_params_pass__%d.tar.gz' % event.pass_id, 'w') as f:
                parameters.to_tar(f)

            result = trainer.test(
                reader=paddle.batch(
                    paddle.dataset.cifar.test10(), batch_size=128),
                feeding={'image': 0,
                         'label': 1})
            print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)

    # Create trainer
    trainer = paddle.trainer.SGD(
        cost=cost, parameters=parameters, update_equation=momentum_optimizer)
    trainer.train(
        reader=paddle.batch(
		#	paddle.reader.shuffle(
		#		        paddle.dataset.cifar.train10(), buf_size=50000),
            paddle.reader.buffered(
                paddle.dataset.cifar.train10(), size=100000),
            batch_size=128),
        num_passes=200,
        event_handler=event_handler,
        feeding={'image': 0,
                 'label': 1})