def test_case(self):
        np.random.seed(200)
        x_data = np.random.random((2, 3, 6, 6)).astype("float32")
        dim_data = np.array([12]).astype("int32")
        shape_data = np.array([12, 12]).astype("int32")
        actual_size_data = np.array([12, 12]).astype("int32")
        scale_data = np.array([2.0]).astype("float32")

        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()

        with fluid.program_guard(prog, startup_prog):

            x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")

            dim = fluid.data(name="dim", shape=[1], dtype="int32")
            shape_tensor = fluid.data(name="shape_tensor",
                                      shape=[2],
                                      dtype="int32")
            actual_size = fluid.data(name="actual_size",
                                     shape=[2],
                                     dtype="int32")
            scale_tensor = fluid.data(name="scale_tensor",
                                      shape=[1],
                                      dtype="float32")

            out1 = interpolate(x,
                               out_shape=[12, 12],
                               resample='BICUBIC',
                               align_corners=False)
            out2 = interpolate(x,
                               out_shape=[12, dim],
                               resample='BICUBIC',
                               align_corners=False)
            out3 = interpolate(x,
                               out_shape=shape_tensor,
                               resample='BICUBIC',
                               align_corners=False)
            out4 = interpolate(x,
                               out_shape=[4, 4],
                               actual_shape=actual_size,
                               resample='BICUBIC',
                               align_corners=False)
            out5 = interpolate(x,
                               scale=scale_tensor,
                               resample='BICUBIC',
                               align_corners=False)

            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            results = exe.run(fluid.default_main_program(),
                              feed={
                                  "x": x_data,
                                  "dim": dim_data,
                                  "shape_tensor": shape_data,
                                  "actual_size": actual_size_data,
                                  "scale_tensor": scale_data
                              },
                              fetch_list=[out1, out2, out3, out4, out5],
                              return_numpy=True)

            expect_res = bicubic_interp_np(x_data,
                                           out_h=12,
                                           out_w=12,
                                           align_corners=False)
            for res in results:
                self.assertTrue(np.allclose(res, expect_res))

        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(x_data)
            interp = interpolate(x,
                                 out_shape=[12, 12],
                                 resample='BICUBIC',
                                 align_corners=False)
            dy_result = interp.numpy()
            expect = bicubic_interp_np(x_data,
                                       out_h=12,
                                       out_w=12,
                                       align_corners=False)
            self.assertTrue(np.allclose(dy_result, expect))
Beispiel #2
0
 def test_axis_max():
     # maximum of axis should less than dimensions of x
     x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
     paddle.nn.functional.dropout(x2, axis=[0, 5])
Beispiel #3
0
 def test_axis_len():
     # length of axis should not greater than dimensions of x
     x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
     paddle.nn.functional.dropout(x2, axis=[0, 1, 2, 3, 4])
Beispiel #4
0
 def test_pdtype():
     # p should be int or float
     x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
     paddle.nn.functional.dropout(x2, p='0.5')
Beispiel #5
0
 def test_mode():
     # mode should be 'downscale_in_infer' or 'upscale_in_train'
     x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
     paddle.nn.functional.dropout(x2, mode='abc')
Beispiel #6
0
    def context(self,
                input_image=None,
                trainable=True,
                pretrained=True,
                param_prefix='',
                get_prediction=False,
                variant='d',
                norm_type='bn',
                feature_maps=[3, 4, 5],
                return_c5=False):
        """Distill the Head Features, so as to perform transfer learning.

        :param input_image: image tensor.
        :type input_image: <class 'paddle.fluid.framework.Variable'>
        :param trainable: whether to set parameters trainable.
        :type trainable: bool
        :param pretrained: whether to load default pretrained model.
        :type pretrained: bool
        :param param_prefix: the prefix of parameters in yolo_head and backbone
        :type param_prefix: str
        :param get_prediction: whether to get prediction,
            if True, outputs is {'bbox_out': bbox_out},
            if False, outputs is {'head_features': head_features}.
        :type get_prediction: bool
        :param depth: depth of network
        :type depth: int
        :param variant: type of resnet
        :type variant: str
        :param norm_type: type of normlization
        :type norm_type: str
        :param feature_maps: stage of output
        :type feature_maps: list
        """
        context_prog = input_image.block.program if input_image else fluid.Program(
        )
        startup_program = fluid.Program()
        with fluid.program_guard(context_prog, startup_program):
            if return_c5:
                return ResNetC5(
                    depth=50,
                    norm_type=norm_type,
                    variant=variant,
                    feature_maps=feature_maps)
            image = input_image if input_image else fluid.data(
                name='image',
                shape=[-1, 3, 224, 224],
                dtype='float32',
                lod_level=0)
            backbone = ResNet(depth=50, variant=variant, norm_type=norm_type,\
                              feature_maps=feature_maps, get_prediction=get_prediction)

            out = backbone(image)
            inputs = {'image': image}
            if get_prediction:
                outputs = {'pred_out': out}
            else:
                outputs = {'body_feats': out}

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            if pretrained:

                def _if_exist(var):
                    return os.path.exists(
                        os.path.join(self.default_pretrained_model_path,
                                     var.name))

                if not param_prefix:
                    fluid.io.load_vars(
                        exe,
                        self.default_pretrained_model_path,
                        main_program=context_prog,
                        predicate=_if_exist)
            else:
                exe.run(startup_program)
            return inputs, outputs, context_prog
Beispiel #7
0
    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[-1, -1], dtype="float32")
            res1 = paddle.nn.functional.dropout(x=input, p=0., training=False)
            res2 = paddle.nn.functional.dropout(
                x=input, p=0., axis=0, training=True, mode='upscale_in_train')
            res3 = paddle.nn.functional.dropout(
                x=input, p=0., axis=0, training=True, mode='downscale_in_infer')
            res4 = paddle.nn.functional.dropout(
                x=input, p=0., axis=0, training=False, mode='upscale_in_train')
            res5 = paddle.nn.functional.dropout(
                x=input,
                p=0.,
                axis=0,
                training=False,
                mode='downscale_in_infer')
            res6 = paddle.nn.functional.dropout(
                x=input,
                p=0.,
                axis=[0, 1],
                training=True,
                mode='upscale_in_train')
            res7 = paddle.nn.functional.dropout(
                x=input,
                p=0.,
                axis=[0, 1],
                training=True,
                mode='downscale_in_infer')
            res8 = paddle.nn.functional.dropout(
                x=input,
                p=0.,
                axis=[0, 1],
                training=False,
                mode='upscale_in_train')
            res9 = paddle.nn.functional.dropout(
                x=input,
                p=0.,
                axis=[0, 1],
                training=False,
                mode='downscale_in_infer')
            res10 = paddle.nn.functional.dropout(x=input, p=1., training=True)
            res11 = paddle.fluid.layers.dropout(x=input, dropout_prob=0.)
            res12 = paddle.nn.functional.dropout(
                x=input,
                p=0.,
                axis=(0, 1),
                training=False,
                mode='upscale_in_train')

            res13 = paddle.nn.functional.dropout(
                x=input, p=0.7, axis=1, training=True, mode='upscale_in_train')

            in_np = np.ones([40, 40]).astype("float32")
            res_np = in_np
            res_np2 = np.zeros_like(in_np)

            exe = fluid.Executor(place)
            res_list = [
                res1, res2, res3, res4, res5, res6, res7, res8, res9, res11,
                res12
            ]
            for res in res_list:
                fetches = exe.run(fluid.default_main_program(),
                                  feed={"input": in_np},
                                  fetch_list=[res])
                self.assertTrue(np.allclose(fetches[0], res_np))
            fetches2 = exe.run(fluid.default_main_program(),
                               feed={"input": in_np},
                               fetch_list=[res10])
            self.assertTrue(np.allclose(fetches2[0], res_np2))
            fetches3 = exe.run(fluid.default_main_program(),
                               feed={"input": in_np},
                               fetch_list=[res13])
Beispiel #8
0
from opts import parser
from model import ECOfull
from config import parse_config, print_configs
from reader import KineticsReader

args = parser.parse_args()

cfg = parse_config('config.txt')
# print_configs(cfg, 'TRAIN')
main_program = fluid.default_main_program()
start_program = fluid.default_startup_program()
place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
with fluid.program_guard(main_program, start_program):

    # data placeholder
    input = fluid.data(name='data', shape=[-1, 3, 224, 224], dtype='float32')
    label = fluid.data(name='label', shape=[-1, 1], dtype='int64')
    print(f'label shape:{label.shape}')
    model = ECOfull(input, num_segments=args.num_segments)
    net_out = model()

    cost = fluid.layers.softmax_with_cross_entropy(net_out, label)
    avg_cost = fluid.layers.mean(cost)
    acc = fluid.layers.accuracy(net_out, label)
    # test program
    eval_program = main_program.clone(for_test=True)
    # optimizer
    fluid.optimizer.SGD(args.lr).minimize(avg_cost)

# 验证集
val_reader = KineticsReader('eco', 'valid', cfg).create_reader()
Beispiel #9
0
                param_initializer=self.param_initializer)
            self.model['blocks'][i]['feedforward']['out'] = enc_output
            enc_input = enc_output

        enc_output = self.pre_process_layer(
            enc_output,
            self.preprocess_cmd,
            self.prepostprocess_dropout,
            scale_name=self.param_names['post_encoder']['scale'],
            bias_name=self.param_names['post_encoder']['bias'])

        self.model['out'] = enc_output

        return enc_output


if __name__ == "__main__":
    bert = TransformerEncoder('bert_base')

    embedding_input = fluid.data(shape=[-1, 128, 764],
                                 dtype='float32',
                                 name='embedding')
    input_mask = fluid.data(shape=[-1, 128, 1], dtype='float32', name='mask')
    att_mask = fluid.layers.matmul(x=input_mask,
                                   y=input_mask,
                                   transpose_y=True)

    bert.build(embedding_input, att_mask)

    print(bert.model)
Beispiel #10
0
    def test_run(self):
        inputs_basic_lstm = fluid.data(
            name='inputs_basic_lstm',
            shape=[None, None, self.input_size],
            dtype='float32')
        sequence_length = fluid.data(
            name="sequence_length", shape=[None], dtype='int64')

        inputs_dynamic_rnn = layers.transpose(inputs_basic_lstm, perm=[1, 0, 2])
        cell = LSTMCell(self.hidden_size, name="LSTMCell_for_rnn")
        output, final_state = dynamic_rnn(
            cell=cell,
            inputs=inputs_dynamic_rnn,
            sequence_length=sequence_length,
            is_reverse=False)
        output_new = layers.transpose(output, perm=[1, 0, 2])

        rnn_out, last_hidden, last_cell = basic_lstm(inputs_basic_lstm, None, None, self.hidden_size, num_layers=1, \
                batch_first = False, bidirectional=False, sequence_length=sequence_length, forget_bias = 1.0)

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        exe = Executor(place)
        exe.run(framework.default_startup_program())

        inputs_basic_lstm_np = np.random.uniform(
            -0.1, 0.1,
            (self.seq_len, self.batch_size, self.input_size)).astype('float32')
        sequence_length_np = np.ones(
            self.batch_size, dtype='int64') * self.seq_len

        inputs_np = np.random.uniform(
            -0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')
        pre_hidden_np = np.random.uniform(
            -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
        pre_cell_np = np.random.uniform(
            -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')

        param_names = [[
            "LSTMCell_for_rnn/BasicLSTMUnit_0.w_0",
            "basic_lstm_layers_0/BasicLSTMUnit_0.w_0"
        ], [
            "LSTMCell_for_rnn/BasicLSTMUnit_0.b_0",
            "basic_lstm_layers_0/BasicLSTMUnit_0.b_0"
        ]]

        for names in param_names:
            param = np.array(fluid.global_scope().find_var(names[0]).get_tensor(
            ))
            param = np.random.uniform(
                -0.1, 0.1, size=param.shape).astype('float32')
            fluid.global_scope().find_var(names[0]).get_tensor().set(param,
                                                                     place)
            fluid.global_scope().find_var(names[1]).get_tensor().set(param,
                                                                     place)

        out = exe.run(feed={
            'inputs_basic_lstm': inputs_basic_lstm_np,
            'sequence_length': sequence_length_np,
            'inputs': inputs_np,
            'pre_hidden': pre_hidden_np,
            'pre_cell': pre_cell_np
        },
                      fetch_list=[output_new, rnn_out])

        self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4))
import paddle.fluid as fluid

#define operation
w = fluid.data(name='w', shape=[None, 1], dtype='float32')
w.stop_gradient = False
loss = w * w
grad = fluid.gradients([loss], w)
print(grad)
#Define Exector
cpu = fluid.core.CPUPlace()
exe = fluid.Executor(cpu)
exe.run(fluid.default_startup_program())
#Prepare data
import numpy

x = numpy.ones((1, 1))
x = x.astype('float32')

#Run computing
outs = exe.run(feed={'w': x}, fetch_list=[loss, grad])

print('loss: {}, grad: {}'.format(outs[0][0], outs[1][0]))
Beispiel #12
0
def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN):
    width = train_shape[0]
    height = train_shape[1]
    image_shape = [-1, 3, height, width]
    grt_shape = [-1, 1, height, width]
    class_num = 20
    with fluid.program_guard(main_prog, start_prog):
        with fluid.unique_name.guard():
            image = fluid.data(name='image',
                               shape=image_shape,
                               dtype='float32')
            label = fluid.data(name='label', shape=grt_shape, dtype='int32')
            mask = fluid.data(name='mask', shape=grt_shape, dtype='int32')

            if ModelPhase.is_train(phase) or ModelPhase.is_eval(phase):
                data_loader = fluid.io.DataLoader.from_generator(
                    feed_list=[image, label, mask],
                    capacity=256,
                    iterable=False,
                    use_double_buffer=True)
            net_name = cfg["model"]
            if net_name == "deeplab":
                net = deeplabv3p
            elif net_name == "hrnet":
                net = hrnet
            elif net_name == "ocrnet":
                net = ocrnet
            #logit = hrnet(image,class_num)
            logits = net(image, class_num)
            if ModelPhase.is_train(phase) or ModelPhase.is_eval(phase):
                weight_softmax = [0.5] + ([5.0] * 19)
                softmax_loss = multi_softmax_with_loss(logits, label, mask,
                                                       class_num,
                                                       weight_softmax)
                if cfg["lovasz_loss"]:
                    lovasz_loss = multi_lovasz_softmax_loss(
                        logits, label, mask)
                    lovasz_weight = cfg["lovasz_weight"]
                    softmax_weight = 1 - lovasz_weight
                    avg_loss = (lovasz_weight *
                                lovasz_loss) + (softmax_weight * softmax_loss)
                else:
                    avg_loss = softmax_loss

            if isinstance(logits, tuple):
                logit = logits[0]
            else:
                logit = logits

            if logit.shape[2:] != label.shape[2:]:
                logit = fluid.layers.resize_bilinear(logit, label.shape[2:])
            if ModelPhase.is_predict(phase):
                logit = softmax(logit)
            out = fluid.layers.transpose(logit, [0, 2, 3, 1])
            pred = fluid.layers.argmax(out, axis=3)
            pred = fluid.layers.unsqueeze(pred, axes=[3])
            if ModelPhase.is_eval(phase):
                return data_loader, avg_loss, pred, label, mask
            if ModelPhase.is_train(phase):
                decay_step = all_step
                power = 0.9

                #decay_lr = fluid.layers.piecewise_decay(cfg["decay_step"],values=cfg["decay_values"])
                #optimizer = fluid.optimizer.Momentum(learning_rate=decay_lr,momentum=0.9,
                #            regularization=fluid.regularizer.L2Decay(regularization_coeff=4e-05))
                poly_lr = fluid.layers.polynomial_decay(cfg["lr"],
                                                        decay_step,
                                                        end_learning_rate=0,
                                                        power=power)
                optimizer = fluid.optimizer.Adam(
                    learning_rate=poly_lr,
                    beta1=0.9,
                    beta2=0.99,
                    regularization=fluid.regularizer.L2Decay(
                        regularization_coeff=4e-05))

                optimizer.minimize(avg_loss)
                return data_loader, avg_loss, poly_lr, pred, label, mask
Beispiel #13
0
 def test_out_shape():
     x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
     out = interpolate(x,
                       out_shape=[12],
                       resample='BICUBIC',
                       align_corners=False)
Beispiel #14
0
 def test_align_corcers():
     x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
     interpolate(x,
                 out_shape=[12, 12],
                 resample='BICUBIC',
                 align_corners=3)
Beispiel #15
0
# define a random dataset
class RandomDataset(paddle.io.Dataset):
    def __init__(self, num_samples):
        self.num_samples = num_samples

    def __getitem__(self, idx):
        image = np.random.random([IMAGE_SIZE]).astype('float32')
        label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
        return image, label

    def __len__(self):
        return self.num_samples


image = fluid.data(name='image', shape=[None, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
pred = fluid.layers.fc(input=image, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=pred, label=label)
avg_loss = fluid.layers.mean(loss)

optimizer = fluid.optimizer.SGD(learning_rate=0.001)
optimizer.minimize(avg_loss)

place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())

# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
Beispiel #16
0
        padding=0,
        act=None,
        param_attr=ParamAttr(initializer=MSRA(), name='conv-12_weights'),
        bias_attr=False)
    out4 = fluid.layers.concat([out2, out3], axis=1)
    out4 = fluid.layers.conv2d(
        input=out4,
        num_filters=num_classes,
        filter_size=1,
        stride=1,
        padding=0,
        act=None,
        param_attr=ParamAttr(initializer=MSRA(), name='conv-13_weights'),
        bias_attr=False)

    out4 = fluid.layers.resize_bilinear(out4, input.shape[2:])

    return out4


def post_hrnet(input, num_classes):
    logit = high_resolution_net(input, num_classes)
    return logit


if __name__ == '__main__':
    image_shape = [-1, 3, 769, 769]
    image = fluid.data(name='image', shape=image_shape, dtype='float32')
    logit = post_hrnet(image, 4)
    print("logit:", logit.shape)
Beispiel #17
0
            yield train_data, label_data

    return reader


train_reader = fluid.io.batch(paddle.reader.shuffle(my_reader(), buf_size=8),
                              4)

#DEV_COUNT = fluid.core.get_cuda_device_count()
use_cuda = False
place = fluid.cuda_places() if use_cuda else fluid.cpu_places()

if not use_cuda:
    os.environ['CPU_NUM'] = str(2)

data = fluid.data(name="char", shape=[None, 50], dtype="int64", lod_level=0)
#data = fluid.data(name="char", shape=[None, 50], dtype="float32", lod_level=0)
label = fluid.data(name="label", shape=[None, 1], dtype="int64", lod_level=0)

reader = fluid.io.PyReader(feed_list=[data, label],
                           capacity=40,
                           iterable=True,
                           return_list=False)
reader.decorate_sample_list_generator(train_reader, place)

emb = fluid.embedding(data, size=[10, 64])
prob = fluid.layers.fc(emb, size=2, act='softmax')
#prob = fluid.layers.fc(data, size=2, act='softmax')
ce = fluid.layers.cross_entropy(prob, label)
loss = fluid.layers.mean(ce)
import shutil

import paddle
import paddle.fluid as fluid

import create_data
import text_reader
import bilstm_net
try:
    # 兼容PaddlePaddle2.0
    paddle.enable_static()
except:
    pass

# 定义输入数据, lod_level不为0指定输入数据为序列数据
words = fluid.data(name='words', shape=[None, 1], dtype='int64', lod_level=1)
label = fluid.data(name='label', shape=[None, 1], dtype='int64')

# 获取数据字典长度
dict_dim = create_data.get_dict_len('datasets/dict_txt.txt')
# 获取长短期记忆网络
model = bilstm_net.bilstm_net(words, dict_dim, 15)

# 获取损失函数和准确率
cost = fluid.layers.cross_entropy(input=model, label=label)
avg_cost = fluid.layers.mean(cost)
acc = fluid.layers.accuracy(input=model, label=label)

# 获取预测程序
test_program = fluid.default_main_program().clone(for_test=True)
Beispiel #19
0
    def loss_scaling_check_inf(self, use_npu=True, scope=fluid.Scope()):
        a = fluid.data(name="a", shape=[1024, 1024], dtype='float32')
        b = fluid.data(name="b", shape=[512, 128], dtype='float32')
        x = [a, b]
        found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool')
        prev_loss_scaling = fluid.data(name="prev_loss_scaling",
                                       shape=[1],
                                       dtype='float32')
        num_good_steps = fluid.data(name="num_good_steps",
                                    shape=[1],
                                    dtype='int32')
        num_bad_steps = fluid.data(name="num_bad_steps",
                                   shape=[1],
                                   dtype='int32')

        a_v = np.random.random([1024, 1024]).astype('float32')
        b_v = np.random.random([512, 128]).astype('float32')
        i = np.random.randint(0, 1024, 1)
        j = np.random.randint(0, 1024, 1)
        a_v[i[0]][j[0]] = np.inf
        found_inf_v = np.array([True]).astype('bool')
        prev_loss_scaling_v = np.array([2048]).astype('float32')
        num_good_steps_v = np.array([999], dtype=np.int32)
        num_bad_steps_v = np.array([1], dtype=np.int32)

        incr_every_n_steps = 1000
        decr_every_n_nan_or_inf = 2
        incr_ratio = 2
        decr_ratio = 0.8

        result = amp_nn.update_loss_scaling(x,
                                            found_inf,
                                            prev_loss_scaling,
                                            num_good_steps,
                                            num_bad_steps,
                                            incr_every_n_steps,
                                            decr_every_n_nan_or_inf,
                                            incr_ratio,
                                            decr_ratio,
                                            name="update_loss_scaling")

        place = paddle.NPUPlace(0) if use_npu else fluid.CPUPlace()
        exe = fluid.Executor(place)
        with fluid.scope_guard(scope):
            exe.run(fluid.default_startup_program())
            result_v = exe.run(feed={
                'a': a_v,
                'b': b_v,
                'found_inf': found_inf_v,
                'prev_loss_scaling': prev_loss_scaling_v,
                'num_good_steps': num_good_steps_v,
                'num_bad_steps': num_bad_steps_v
            },
                               fetch_list=[
                                   result, x, found_inf, prev_loss_scaling,
                                   num_good_steps, num_bad_steps
                               ])
        assert np.array_equal(result_v[0], np.zeros_like(a_v))
        assert np.array_equal(result_v[1], np.zeros_like(b_v))
        assert np.array_equal(result_v[2], np.zeros_like(a_v))
        assert np.array_equal(result_v[3], np.zeros_like(b_v))
        assert np.array_equal(result_v[4], found_inf_v)
        assert np.array_equal(result_v[5], prev_loss_scaling_v * decr_ratio)
        assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v))
        assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v))
Beispiel #20
0
        os.makedirs(frame_path_interpolated)
    if not os.path.exists(frame_path_combined):
        os.makedirs(frame_path_combined)
    if not os.path.exists(video_path_input):
        os.makedirs(video_path_input)
    if not os.path.exists(video_path_output):
        os.makedirs(video_path_output)

    args.KEY_FRAME_THREAD = 0.
    saved_model = args.saved_model

    timestep = args.time_step
    num_frames = int(1.0 / timestep) - 1

    image = fluid.data(name='image',
                       shape=[2, 1, args.channels, -1, -1],
                       dtype='float32')
    DAIN = networks.__dict__["DAIN_slowmotion"](channel=args.channels,
                                                filter_size=args.filter_size,
                                                timestep=args.time_step,
                                                training=False)
    out = DAIN(image)
    out = out[0][1]

    place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    fetch_list = [out.name]

    inference_program = fluid.default_main_program().clone(for_test=True)
Beispiel #21
0
 def test_dtype():
     # the input dtype of dropout must be float32 or float64
     # float16 only can be set on GPU place
     xr = fluid.data(name='xr', shape=[3, 4, 5, 6], dtype="int32")
     paddle.nn.functional.dropout(xr, p=0.5)
Beispiel #22
0
    def create_network(self, is_infer=False):
        """Create data layers and model network.
        :param is_training: Whether to create a network for training.
        :type is_training: bool 
        :return reader: Reader for input.
        :rtype reader: read generater
        :return log_probs: An output unnormalized log probability layer.
        :rtype lig_probs: Varable
        :return loss: A ctc loss layer.
        :rtype loss: Variable
        """

        if not is_infer:
            input_fields = {
                'names': ['audio_data', 'text_data', 'seq_len_data', 'masks'],
                'shapes': [[None, 161, None], [None, 1], [None, 1],
                           [None, 32, 81, None]],
                'dtypes': ['float32', 'int32', 'int64', 'float32'],
                'lod_levels': [0, 1, 0, 0]
            }

            inputs = [
                fluid.data(name=input_fields['names'][i],
                           shape=input_fields['shapes'][i],
                           dtype=input_fields['dtypes'][i],
                           lod_level=input_fields['lod_levels'][i])
                for i in range(len(input_fields['names']))
            ]

            reader = fluid.io.DataLoader.from_generator(feed_list=inputs,
                                                        capacity=64,
                                                        iterable=False,
                                                        use_double_buffer=True)

            (audio_data, text_data, seq_len_data, masks) = inputs
        else:
            audio_data = fluid.data(name='audio_data',
                                    shape=[None, 161, None],
                                    dtype='float32',
                                    lod_level=0)
            seq_len_data = fluid.data(name='seq_len_data',
                                      shape=[None, 1],
                                      dtype='int64',
                                      lod_level=0)
            masks = fluid.data(name='masks',
                               shape=[None, 32, 81, None],
                               dtype='float32',
                               lod_level=0)
            text_data = None
            reader = fluid.DataFeeder([audio_data, seq_len_data, masks],
                                      self._place)

        log_probs, loss = deep_speech_v2_network(
            audio_data=audio_data,
            text_data=text_data,
            seq_len_data=seq_len_data,
            masks=masks,
            dict_size=self._vocab_size,
            num_conv_layers=self._num_conv_layers,
            num_rnn_layers=self._num_rnn_layers,
            rnn_size=self._rnn_layer_size,
            use_gru=self._use_gru,
            share_rnn_weights=self._share_rnn_weights)
        return reader, log_probs, loss
Beispiel #23
0
 def test_pvalue():
     # p should be 0.<=p<=1.
     x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
     paddle.nn.functional.dropout(x2, p=1.2)
Beispiel #24
0
 def test_dtype():
     data = fluid.data(shape=[10], dtype="float16", name="input")
     fluid.layers.unique(data)
Beispiel #25
0
 def test_axis():
     # axis should be int or list
     x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
     paddle.nn.functional.dropout(x2, axis=1.2)
Beispiel #26
0
def build_model(main_prog=None,
                start_prog=None,
                phase=ModelPhase.TRAIN,
                **kwargs):

    if not ModelPhase.is_valid_phase(phase):
        raise ValueError("ModelPhase {} is not valid!".format(phase))
    if ModelPhase.is_train(phase):
        width = cfg.TRAIN_CROP_SIZE[0]
        height = cfg.TRAIN_CROP_SIZE[1]
    else:
        width = cfg.EVAL_CROP_SIZE[0]
        height = cfg.EVAL_CROP_SIZE[1]

    image_shape = [-1, cfg.DATASET.DATA_DIM, height, width]
    grt_shape = [-1, 1, height, width]
    class_num = cfg.DATASET.NUM_CLASSES

    #with fluid.program_guard(main_prog, start_prog):
    #    with fluid.unique_name.guard():
    # 在导出模型的时候,增加图像标准化预处理,减小预测部署时图像的处理流程
    # 预测部署时只须对输入图像增加batch_size维度即可
    if cfg.SLIM.KNOWLEDGE_DISTILL_IS_TEACHER:
        image = main_prog.global_block()._clone_variable(
            kwargs['image'], force_persistable=False)
        label = main_prog.global_block()._clone_variable(
            kwargs['label'], force_persistable=False)
        mask = main_prog.global_block()._clone_variable(
            kwargs['mask'], force_persistable=False)
    else:
        if ModelPhase.is_predict(phase):
            origin_image = fluid.data(name='image',
                                      shape=[-1, -1, -1, cfg.DATASET.DATA_DIM],
                                      dtype='float32')
            image, valid_shape, origin_shape = export_preprocess(origin_image)

        else:
            image = fluid.data(name='image',
                               shape=image_shape,
                               dtype='float32')
        label = fluid.data(name='label', shape=grt_shape, dtype='int32')
        mask = fluid.data(name='mask', shape=grt_shape, dtype='int32')

    # use DataLoader.from_generator when doing traning and evaluation
    if ModelPhase.is_train(phase) or ModelPhase.is_eval(phase):
        data_loader = None
        if not cfg.SLIM.KNOWLEDGE_DISTILL_IS_TEACHER:
            data_loader = fluid.io.DataLoader.from_generator(
                feed_list=[image, label, mask],
                capacity=cfg.DATALOADER.BUF_SIZE,
                iterable=False,
                use_double_buffer=True)

    loss_type = cfg.SOLVER.LOSS
    if not isinstance(loss_type, list):
        loss_type = list(loss_type)

    # dice_loss或bce_loss只适用两类分割中
    if class_num > 2 and (("dice_loss" in loss_type) or
                          ("bce_loss" in loss_type)):
        raise Exception(
            "dice loss and bce loss is only applicable to binary classfication"
        )

    # 在两类分割情况下,当loss函数选择dice_loss或bce_loss的时候,最后logit输出通道数设置为1
    if ("dice_loss" in loss_type) or ("bce_loss" in loss_type):
        class_num = 1
        if "softmax_loss" in loss_type:
            raise Exception(
                "softmax loss can not combine with dice loss or bce loss")
    logits = seg_model(image, class_num)

    # 根据选择的loss函数计算相应的损失函数
    if ModelPhase.is_train(phase) or ModelPhase.is_eval(phase):
        loss_valid = False
        avg_loss_list = []
        valid_loss = []
        if "softmax_loss" in loss_type:
            weight = cfg.SOLVER.CROSS_ENTROPY_WEIGHT
            avg_loss_list.append(
                multi_softmax_with_loss(logits, label, mask, class_num,
                                        weight))
            loss_valid = True
            valid_loss.append("softmax_loss")
        if "dice_loss" in loss_type:
            avg_loss_list.append(multi_dice_loss(logits, label, mask))
            loss_valid = True
            valid_loss.append("dice_loss")
        if "bce_loss" in loss_type:
            avg_loss_list.append(multi_bce_loss(logits, label, mask))
            loss_valid = True
            valid_loss.append("bce_loss")
        if not loss_valid:
            raise Exception(
                "SOLVER.LOSS: {} is set wrong. it should "
                "include one of (softmax_loss, bce_loss, dice_loss) at least"
                " example: ['softmax_loss'], ['dice_loss'], ['bce_loss', 'dice_loss']"
                .format(cfg.SOLVER.LOSS))

        invalid_loss = [x for x in loss_type if x not in valid_loss]
        if len(invalid_loss) > 0:
            print(
                "Warning: the loss {} you set is invalid. it will not be included in loss computed."
                .format(invalid_loss))

        avg_loss = 0
        for i in range(0, len(avg_loss_list)):
            avg_loss += avg_loss_list[i]

    #get pred result in original size
    if isinstance(logits, tuple):
        logit = logits[0]
    else:
        logit = logits

    if logit.shape[2:] != label.shape[2:]:
        logit = fluid.layers.resize_bilinear(logit, label.shape[2:])

    # return image input and logit output for inference graph prune
    if ModelPhase.is_predict(phase):
        # 两类分割中,使用dice_loss或bce_loss返回的logit为单通道,进行到两通道的变换
        if class_num == 1:
            logit = sigmoid_to_softmax(logit)
        else:
            logit = softmax(logit)

        # 获取有效部分
        logit = fluid.layers.slice(logit,
                                   axes=[2, 3],
                                   starts=[0, 0],
                                   ends=valid_shape)

        logit = fluid.layers.resize_bilinear(logit,
                                             out_shape=origin_shape,
                                             align_corners=False,
                                             align_mode=0)
        logit = fluid.layers.argmax(logit, axis=1)
        return origin_image, logit

    if class_num == 1:
        out = sigmoid_to_softmax(logit)
        out = fluid.layers.transpose(out, [0, 2, 3, 1])
    else:
        out = fluid.layers.transpose(logit, [0, 2, 3, 1])

    pred = fluid.layers.argmax(out, axis=3)
    pred = fluid.layers.unsqueeze(pred, axes=[3])
    if ModelPhase.is_visual(phase):
        if class_num == 1:
            logit = sigmoid_to_softmax(logit)
        else:
            logit = softmax(logit)
        return pred, logit

    if ModelPhase.is_eval(phase):
        return data_loader, avg_loss, pred, label, mask

    if ModelPhase.is_train(phase):
        decayed_lr = None
        if not cfg.SLIM.KNOWLEDGE_DISTILL:
            optimizer = solver.Solver(main_prog, start_prog)
            decayed_lr = optimizer.optimise(avg_loss)
        # optimizer = solver.Solver(main_prog, start_prog)
        # decayed_lr = optimizer.optimise(avg_loss)
        return data_loader, avg_loss, decayed_lr, pred, label, mask, image
Beispiel #27
0
 def test_axis_min():
     # minimum of axis should greater equal than 0
     x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
     paddle.nn.functional.dropout(x2, axis=[0, -1])
def infer(args):
    data_shape = [None, 3, args.image_size, args.image_size]
    input = fluid.data(name='input', shape=data_shape, dtype='float32')
    label_org_ = fluid.data(name='label_org_',
                            shape=[None, args.c_dim],
                            dtype='float32')
    label_trg_ = fluid.data(name='label_trg_',
                            shape=[None, args.c_dim],
                            dtype='float32')
    image_name = fluid.data(name='image_name',
                            shape=[None, args.n_samples],
                            dtype='int32')

    model_name = 'net_G'

    if args.model_net == 'CycleGAN':
        loader = fluid.io.DataLoader.from_generator(
            feed_list=[input, image_name],
            capacity=4,  ## batch_size * 4
            iterable=True,
            use_double_buffer=True)

        from network.CycleGAN_network import CycleGAN_model
        model = CycleGAN_model()
        if args.input_style == "A":
            fake = model.network_G(input, name="GA", cfg=args)
        elif args.input_style == "B":
            fake = model.network_G(input, name="GB", cfg=args)
        else:
            raise "Input with style [%s] is not supported." % args.input_style
    elif args.model_net == 'Pix2pix':
        loader = fluid.io.DataLoader.from_generator(
            feed_list=[input, image_name],
            capacity=4,  ## batch_size * 4
            iterable=True,
            use_double_buffer=True)

        from network.Pix2pix_network import Pix2pix_model
        model = Pix2pix_model()
        fake = model.network_G(input, "generator", cfg=args)
    elif args.model_net == 'StarGAN':

        loader = fluid.io.DataLoader.from_generator(
            feed_list=[input, label_org_, label_trg_, image_name],
            capacity=32,
            iterable=True,
            use_double_buffer=True)

        from network.StarGAN_network import StarGAN_model
        model = StarGAN_model()
        fake = model.network_G(input, label_trg_, name="g_main", cfg=args)
    elif args.model_net == 'STGAN':
        from network.STGAN_network import STGAN_model

        loader = fluid.io.DataLoader.from_generator(
            feed_list=[input, label_org_, label_trg_, image_name],
            capacity=32,
            iterable=True,
            use_double_buffer=True)

        model = STGAN_model()
        fake, _ = model.network_G(input,
                                  label_org_,
                                  label_trg_,
                                  cfg=args,
                                  name='generator',
                                  is_test=True)
    elif args.model_net == 'AttGAN':
        from network.AttGAN_network import AttGAN_model

        loader = fluid.io.DataLoader.from_generator(
            feed_list=[input, label_org_, label_trg_, image_name],
            capacity=32,
            iterable=True,
            use_double_buffer=True)

        model = AttGAN_model()
        fake, _ = model.network_G(input,
                                  label_org_,
                                  label_trg_,
                                  cfg=args,
                                  name='generator',
                                  is_test=True)
    elif args.model_net == 'CGAN':
        noise = fluid.data(name='noise',
                           shape=[None, args.noise_size],
                           dtype='float32')
        conditions = fluid.data(name='conditions',
                                shape=[None, 1],
                                dtype='float32')

        from network.CGAN_network import CGAN_model
        model = CGAN_model(args.n_samples)
        fake = model.network_G(noise, conditions, name="G")
    elif args.model_net == 'DCGAN':
        noise = fluid.data(name='noise',
                           shape=[None, args.noise_size],
                           dtype='float32')

        from network.DCGAN_network import DCGAN_model
        model = DCGAN_model(args.n_samples)
        fake = model.network_G(noise, name="G")
    elif args.model_net == 'SPADE':
        label_shape = [None, args.label_nc, args.crop_height, args.crop_width]
        spade_data_shape = [None, 1, args.crop_height, args.crop_width]
        from network.SPADE_network import SPADE_model
        model = SPADE_model()
        input_label = fluid.data(name='input_label',
                                 shape=label_shape,
                                 dtype='float32')
        input_ins = fluid.data(name='input_ins',
                               shape=spade_data_shape,
                               dtype='float32')
        input_ = fluid.layers.concat([input_label, input_ins], 1)
        fake = model.network_G(input_, "generator", cfg=args, is_test=True)
    else:
        raise NotImplementedError("model_net {} is not support".format(
            args.model_net))

    def _compute_start_end(image_name):
        image_name_start = np.array(image_name)[0].astype('int32')
        image_name_end = image_name_start + args.n_samples - 1
        image_name_save = str(np.array(image_name)[0].astype('int32')) + '.jpg'
        print("read {}.jpg ~ {}.jpg".format(image_name_start, image_name_end))
        return image_name_save

    # prepare environment
    place = fluid.CPUPlace()
    if args.use_gpu:
        place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    for var in fluid.default_main_program().all_parameters():
        print(var.name)
    print(args.init_model + '/' + model_name)
    fluid.load(fluid.default_main_program(),
               os.path.join(args.init_model, model_name))
    print('load params done')
    if not os.path.exists(args.output):
        os.makedirs(args.output)

    attr_names = args.selected_attrs.split(',')

    if args.model_net == 'AttGAN' or args.model_net == 'STGAN':
        test_reader = celeba_reader_creator(image_dir=args.dataset_dir,
                                            list_filename=args.test_list,
                                            args=args,
                                            mode="VAL")
        reader_test = test_reader.make_reader(return_name=True)
        loader.set_batch_generator(
            reader_test,
            places=fluid.cuda_places() if args.use_gpu else fluid.cpu_places())
        for data in loader():
            real_img, label_org, label_trg, image_name = data[0][
                'input'], data[0]['label_org_'], data[0]['label_trg_'], data[
                    0]['image_name']
            image_name_save = _compute_start_end(image_name)
            real_img_temp = save_batch_image(np.array(real_img))
            images = [real_img_temp]
            for i in range(args.c_dim):
                label_trg_tmp = copy.deepcopy(np.array(label_trg))
                for j in range(len(label_trg_tmp)):
                    label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i]
                    label_trg_tmp = check_attribute_conflict(
                        label_trg_tmp, attr_names[i], attr_names)
                label_org_tmp = list(
                    map(lambda x: ((x * 2) - 1) * 0.5, np.array(label_org)))
                label_trg_tmp = list(
                    map(lambda x: ((x * 2) - 1) * 0.5, label_trg_tmp))
                if args.model_net == 'AttGAN':
                    for k in range(len(label_trg_tmp)):
                        label_trg_tmp[k][i] = label_trg_tmp[k][i] * 2.0
                tensor_label_org_ = fluid.LoDTensor()
                tensor_label_trg_ = fluid.LoDTensor()
                tensor_label_org_.set(label_org_tmp, place)
                tensor_label_trg_.set(label_trg_tmp, place)
                out = exe.run(feed={
                    "input": real_img,
                    "label_org_": tensor_label_org_,
                    "label_trg_": tensor_label_trg_
                },
                              fetch_list=[fake.name])
                fake_temp = save_batch_image(out[0])
                images.append(fake_temp)
            images_concat = np.concatenate(images, 1)
            if len(np.array(label_org)) > 1:
                images_concat = np.concatenate(images_concat, 1)
            fake_image = Image.fromarray(
                ((images_concat + 1) * 127.5).astype(np.uint8))
            fake_image.save(
                os.path.join(args.output, "fake_image_" + image_name_save))
    elif args.model_net == 'StarGAN':
        test_reader = celeba_reader_creator(image_dir=args.dataset_dir,
                                            list_filename=args.test_list,
                                            args=args,
                                            mode="VAL")
        reader_test = test_reader.make_reader(return_name=True)
        loader.set_batch_generator(
            reader_test,
            places=fluid.cuda_places() if args.use_gpu else fluid.cpu_places())
        for data in loader():
            real_img, label_org, label_trg, image_name = data[0][
                'input'], data[0]['label_org_'], data[0]['label_trg_'], data[
                    0]['image_name']
            image_name_save = _compute_start_end(image_name)
            real_img_temp = save_batch_image(np.array(real_img))
            images = [real_img_temp]
            for i in range(args.c_dim):
                label_trg_tmp = copy.deepcopy(np.array(label_org))
                for j in range(len(np.array(label_org))):
                    label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i]
                    label_trg_tmp = check_attribute_conflict(
                        label_trg_tmp, attr_names[i], attr_names)
                tensor_label_trg_ = fluid.LoDTensor()
                tensor_label_trg_.set(label_trg_tmp, place)
                out = exe.run(feed={
                    "input": real_img,
                    "label_trg_": tensor_label_trg_
                },
                              fetch_list=[fake.name])
                fake_temp = save_batch_image(out[0])
                images.append(fake_temp)
            images_concat = np.concatenate(images, 1)
            if len(np.array(label_org)) > 1:
                images_concat = np.concatenate(images_concat, 1)
            fake_image = Image.fromarray(
                ((images_concat + 1) * 127.5).astype(np.uint8))
            fake_image.save(
                os.path.join(args.output, "fake_image_" + image_name_save))

    elif args.model_net == 'Pix2pix' or args.model_net == 'CycleGAN':
        test_reader = reader_creator(image_dir=args.dataset_dir,
                                     list_filename=args.test_list,
                                     shuffle=False,
                                     batch_size=args.n_samples,
                                     mode="VAL")
        reader_test = test_reader.make_reader(args, return_name=True)
        loader.set_batch_generator(
            reader_test,
            places=fluid.cuda_places() if args.use_gpu else fluid.cpu_places())
        id2name = test_reader.id2name
        for data in loader():
            real_img, image_name = data[0]['input'], data[0]['image_name']
            image_names = []
            for name in image_name:
                image_names.append(id2name[np.array(name).astype('int32')[0]])
            print("read: ", image_names)
            fake_temp = exe.run(fetch_list=[fake.name],
                                feed={"input": real_img})
            fake_temp = save_batch_image(fake_temp[0])
            input_temp = save_batch_image(np.array(real_img))

            if len(image_names) == 1:
                fake_temp = np.expand_dims(fake_temp, axis=0)
                input_temp = np.expand_dims(input_temp, axis=0)
            for i, name in enumerate(image_names):
                fake_image = Image.fromarray(
                    ((fake_temp[i] + 1) * 127.5).astype(np.uint8))
                fake_image.save(os.path.join(args.output, "fake_" + name))
                input_image = Image.fromarray(
                    ((input_temp[i] + 1) * 127.5).astype(np.uint8))
                input_image.save(os.path.join(args.output, "input_" + name))
    elif args.model_net == 'SPADE':
        test_reader = triplex_reader_creator(image_dir=args.dataset_dir,
                                             list_filename=args.test_list,
                                             shuffle=False,
                                             batch_size=1,
                                             mode="TEST")
        id2name = test_reader.id2name
        reader_test = test_reader.make_reader(args, return_name=True)
        for data in zip(reader_test()):
            data_A, data_B, data_C, name = data[0]
            name = id2name[np.array(name).astype('int32')[0]]
            print("read: ", name)
            tensor_A = fluid.LoDTensor()
            tensor_C = fluid.LoDTensor()
            tensor_A.set(data_A, place)
            tensor_C.set(data_C, place)
            fake_B_temp = exe.run(fetch_list=[fake.name],
                                  feed={
                                      "input_label": tensor_A,
                                      "input_ins": tensor_C
                                  })
            fake_B_temp = np.squeeze(fake_B_temp[0]).transpose([1, 2, 0])
            input_B_temp = np.squeeze(data_B[0]).transpose([1, 2, 0])

            fakeB_image = Image.fromarray(
                ((fake_B_temp + 1) * 127.5).astype(np.uint8))
            fakeB_image.save(os.path.join(args.output, "fakeB_" + name))
            real_image = Image.fromarray(
                ((input_B_temp + 1) * 127.5).astype(np.uint8))
            real_image.save(os.path.join(args.output, "real_" + name))

    elif args.model_net == 'CGAN':
        noise_data = np.random.uniform(low=-1.0,
                                       high=1.0,
                                       size=[args.n_samples, args.noise_size
                                             ]).astype('float32')
        label = np.random.randint(0, 9, size=[args.n_samples,
                                              1]).astype('float32')
        noise_tensor = fluid.LoDTensor()
        conditions_tensor = fluid.LoDTensor()
        noise_tensor.set(noise_data, place)
        conditions_tensor.set(label, place)
        fake_temp = exe.run(fetch_list=[fake.name],
                            feed={
                                "noise": noise_tensor,
                                "conditions": conditions_tensor
                            })[0]
        fake_image = np.reshape(fake_temp, (args.n_samples, -1))

        fig = utility.plot(fake_image)
        plt.savefig(os.path.join(args.output, 'fake_cgan.png'),
                    bbox_inches='tight')
        plt.close(fig)

    elif args.model_net == 'DCGAN':
        noise_data = np.random.uniform(low=-1.0,
                                       high=1.0,
                                       size=[args.n_samples, args.noise_size
                                             ]).astype('float32')
        noise_tensor = fluid.LoDTensor()
        noise_tensor.set(noise_data, place)
        fake_temp = exe.run(fetch_list=[fake.name],
                            feed={"noise": noise_tensor})[0]
        fake_image = np.reshape(fake_temp, (args.n_samples, -1))

        fig = utility.plot(fake_image)
        plt.savefig(os.path.join(args.output, 'fake_dcgan.png'),
                    bbox_inches='tight')
        plt.close(fig)
    else:
        raise NotImplementedError("model_net {} is not support".format(
            args.model_net))
Beispiel #29
0
 def test_xdim():
     # dimentions of x should be 4
     x = fluid.data(name='x1', shape=[2, 3, 4, 5, 6], dtype="int32")
     paddle.nn.functional.dropout2d(x)
Beispiel #30
0
        output = fluid.layers.elementwise_mul(
            mask, x) + fluid.layers.elementwise_mul((1.0 - mask), y)
        return output


def SlimFaceNet_A_x0_60(class_dim=None, scale=0.6, arch=None):
    scale = 0.6
    arch = [0, 1, 5, 1, 0, 2, 1, 2, 0, 1, 2, 1, 1, 0, 1]
    return SlimFaceNet(class_dim=class_dim, scale=scale, arch=arch)


def SlimFaceNet_B_x0_75(class_dim=None, scale=0.6, arch=None):
    scale = 0.75
    arch = [1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 3, 2, 2, 3]
    return SlimFaceNet(class_dim=class_dim, scale=scale, arch=arch)


def SlimFaceNet_C_x0_75(class_dim=None, scale=0.6, arch=None):
    scale = 0.75
    arch = [1, 1, 2, 1, 0, 2, 1, 0, 1, 0, 1, 1, 2, 2, 3]
    return SlimFaceNet(class_dim=class_dim, scale=scale, arch=arch)


if __name__ == "__main__":
    paddle.enable_static()
    x = fluid.data(name='x', shape=[-1, 3, 112, 112], dtype='float32')
    print(x.shape)
    model = SlimFaceNet(10000,
                        arch=[1, 3, 3, 1, 1, 0, 0, 1, 0, 1, 1, 0, 5, 5, 3])
    y = model.net(x)