예제 #1
0
    def __init__(self,
                 base_encoder,
                 dim=128,
                 queue_size=65536,
                 momentum=0.999,
                 scale=50,
                 margin=0.3):
        super(DCQ, self).__init__()

        self.queue_size = queue_size
        self.momentum = momentum
        self.scale = scale
        self.margin = margin

        # create the encoders
        # num_classes is the output fc dimension
        self.encoder_q = base_encoder(num_classes=dim, name_prefix='q')
        self.encoder_k = base_encoder(num_classes=dim, name_prefix='k')

        for param_q, param_k in zip(
                self.encoder_q.parameters(include_sublayers=True),
                self.encoder_k.parameters(include_sublayers=True)):
            param_k.stop_gradient = True
            param_q.set_value(param_k)

        self.register_buffer("weight_queue", paddle.randn([dim, queue_size]))
        self.weight_queue = normalize(self.weight_queue, axis=0)

        self.register_buffer("label_queue", paddle.randn([1, queue_size]))
        self.register_buffer("queue_ptr", paddle.zeros([
            1,
        ], dtype='int64'))
예제 #2
0
 def setUp(self):
     self.in_num = 16
     self.out_num = 16
     self.x_spec = paddle.static.InputSpec([-1, 16], name='x')
     self.y_spec = paddle.static.InputSpec([16], name='y')
     self.x = paddle.randn([4, 16])
     self.y = paddle.randn([16])
예제 #3
0
def style_mixing(generator, mean_style, n_source, n_target):
    source_code = paddle.randn([n_source, generator.style_dim])
    target_code = paddle.randn([n_target, generator.style_dim])

    resolution = 2**((generator.n_latent + 2) // 2)

    images = [paddle.ones([1, 3, resolution, resolution]) * -1]

    source_image = generator([source_code],
                             truncation_latent=mean_style,
                             truncation=0.7)[0]
    target_image = generator([target_code],
                             truncation_latent=mean_style,
                             truncation=0.7)[0]

    images.append(source_image)

    for i in range(n_target):
        image = generator(
            [target_code[i].unsqueeze(0).tile([n_source, 1]), source_code],
            truncation_latent=mean_style,
            truncation=0.7,
        )[0]
        images.append(target_image[i].unsqueeze(0))
        images.append(image)

    images = paddle.concat(images, 0)

    return images
예제 #4
0
    def test_save_load_finetune_load(self):
        model_path = "test_jit_save_load_finetune_load/model"
        IMAGE_SIZE = 224
        inps0 = paddle.randn([1, IMAGE_SIZE])
        inps1 = paddle.randn([2, IMAGE_SIZE])
        # Use new namespace
        with unique_name.guard():
            layer_save = LayerSaved(IMAGE_SIZE, IMAGE_SIZE)
        layer_save(inps0)
        #save
        paddle.jit.save(layer_save, model_path)
        #load
        with unique_name.guard():
            layer_load = LayerLoadFinetune(IMAGE_SIZE, IMAGE_SIZE, model_path)
        #train
        train(layer_load, input_size=IMAGE_SIZE)
        result_00 = layer_load(inps0)
        result_01 = layer_load(inps1)
        #save
        paddle.jit.save(layer_load, model_path)
        #load
        layer_finetune = paddle.jit.load(model_path)
        result_10 = layer_finetune(inps0)
        result_11 = layer_finetune(inps1)

        self.assertTrue(float((result_00 - result_10).abs().max()) < 1e-5)
        self.assertTrue(float(((result_01 - result_11)).abs().max()) < 1e-5)
예제 #5
0
def test_forward_reshape():
    @paddle.jit.to_static
    def reshape1(inputs, x):
        new_shape = paddle.shape(x)
        return paddle.reshape(inputs, new_shape)

    @paddle.jit.to_static
    def reshape2(inputs):
        return inputs.reshape([-1])

    @paddle.jit.to_static
    def reshape3(inputs):
        data_shape = inputs.shape
        return inputs.reshape([data_shape[0] * data_shape[1], data_shape[2]])

    @paddle.jit.to_static
    def reshape4(inputs, x):
        new_shape = paddle.shape(x)
        return paddle.reshape(inputs, [new_shape[2], 2, -1])

    input_shape = [2, 1, 10, 1, 10]
    input_data = paddle.rand(input_shape, dtype="float32")
    input_data2 = paddle.randn([2, 1, 10, 10])
    verify_model(reshape1, input_data=[input_data, input_data2])
    verify_model(reshape2, input_data=input_data)
    verify_model(reshape3, input_data=paddle.randn((2, 3, 4)))
    verify_model(reshape4, input_data=[input_data, input_data2])
예제 #6
0
def train(print_result=False):
    # 1. initialize parallel environment
    dist.init_parallel_env()

    # 2. create data parallel layer & optimizer
    layer = LinearNet()
    dp_layer = paddle.DataParallel(layer)

    loss_fn = nn.MSELoss()
    adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters())

    # 3. run layer
    inputs = paddle.randn([10, 10], 'float32')
    outputs = dp_layer(inputs)
    labels = paddle.randn([10, 1], 'float32')
    loss = loss_fn(outputs, labels)

    if print_result is True:
        print("Rank:", int(os.getenv("PADDLE_TRAINER_ID")))

    loss.backward()
    adam.step()
    adam.clear_grad()

    return int(os.getenv("PADDLE_TRAINER_ID"))
예제 #7
0
    def test_save_in_eval(self, with_training=True):
        paddle.jit.ProgramTranslator().enable(True)
        net = Net(12, 2)
        x = paddle.randn((2, 10, 12))
        if with_training:
            x.stop_gradient = False
            dygraph_out = net(x)
            loss = paddle.mean(dygraph_out)
            sgd = paddle.optimizer.SGD(learning_rate=0.001,
                                       parameters=net.parameters())
            loss.backward()
            sgd.step()
        # switch eval mode firstly
        net.eval()
        x = paddle.randn((2, 10, 12))
        net = paddle.jit.to_static(
            net, input_spec=[paddle.static.InputSpec(shape=[-1, 10, 12])])
        model_path = os.path.join(self.temp_dir.name, 'simple_lstm')
        paddle.jit.save(net, model_path)

        dygraph_out = net(x)
        # load saved model
        load_net = paddle.jit.load(model_path)

        static_out = load_net(x)
        self.assertTrue(np.allclose(dygraph_out.numpy(), static_out.numpy()),
                        msg='dygraph_out is {}\n static_out is \n{}'.format(
                            dygraph_out, static_out))
        # switch back into train mode.
        net.train()
        train_out = net(x)
        self.assertTrue(np.allclose(dygraph_out.numpy(), train_out.numpy()),
                        msg='dygraph_out is {}\n static_out is \n{}'.format(
                            dygraph_out, train_out))
예제 #8
0
    def test_save_load_finetune_load(self):
        model_path = "test_jit_save_load_save_without_running/model"
        IMAGE_SIZE = 224
        inps0 = paddle.randn([1, IMAGE_SIZE])
        inps1 = paddle.randn([2, IMAGE_SIZE])
        # Use new namespace
        with unique_name.guard():
            layer_save = LayerSaved(IMAGE_SIZE, IMAGE_SIZE)
        #save
        paddle.jit.save(layer_save,
                        model_path,
                        input_spec=[
                            paddle.static.InputSpec(shape=[None, IMAGE_SIZE],
                                                    dtype='float32')
                        ])
        result_00 = layer_save(inps0)
        result_01 = layer_save(inps1)
        #load and save without running
        with unique_name.guard():
            layer_load = paddle.jit.load(model_path)
            paddle.jit.save(layer_load,
                            model_path,
                            input_spec=[
                                paddle.static.InputSpec(
                                    shape=[None, IMAGE_SIZE], dtype='float32')
                            ])
        #reload
        layer_reload = paddle.jit.load(model_path)
        result_10 = layer_reload(inps0)
        result_11 = layer_reload(inps1)

        self.assertTrue(float((result_00 - result_10).abs().max()) < 1e-5)
        self.assertTrue(float((result_01 - result_11).abs().max()) < 1e-5)
예제 #9
0
def train(print_result=False):
    # 1. enable dynamic mode
    paddle.disable_static()
    
    # 2. initialize parallel environment
    dist.init_parallel_env()

    # 3. create data parallel layer & optimizer
    layer = LinearNet()
    dp_layer = paddle.DataParallel(layer)

    loss_fn = nn.MSELoss()
    adam = opt.Adam(
        learning_rate=0.001, parameters=dp_layer.parameters())

    # 4. run layer
    inputs = paddle.randn([10, 10], 'float32')
    outputs = dp_layer(inputs)
    labels = paddle.randn([10, 1], 'float32')
    loss = loss_fn(outputs, labels)
    
    if print_result is True:
        print("loss:", loss.numpy())
    
    loss.backward()

    adam.step()
    adam.clear_grad()
예제 #10
0
    def test_save_in_eval(self):
        paddle.jit.ProgramTranslator().enable(True)
        net = LinearNet()
        x = paddle.randn((2, 10))
        x.stop_gradient = False
        dygraph_out = net(x)
        loss = paddle.mean(dygraph_out)
        sgd = paddle.optimizer.SGD(learning_rate=0.001,
                                   parameters=net.parameters())
        loss.backward()
        sgd.step()
        # switch eval mode firstly
        net.eval()
        # save directly
        net = paddle.jit.to_static(
            net, input_spec=[paddle.static.InputSpec(shape=[-1, 10])])

        model_path = os.path.join(self.temp_dir.name, 'linear_net')
        paddle.jit.save(net, model_path)
        # load saved model
        load_net = paddle.jit.load(model_path)

        x = paddle.randn((2, 10))
        eval_out = net(x)

        infer_out = load_net(x)
        self.assertTrue(np.allclose(eval_out.numpy(), infer_out.numpy()),
                        msg='eval_out is {}\n infer_out is \n{}'.format(
                            eval_out, infer_out))
예제 #11
0
    def test_api(self):
        shape = [1000, 784]
        train_program = Program()
        startup_program = Program()
        with program_guard(train_program, startup_program):
            x1 = paddle.randn(shape, 'float32')
            x2 = paddle.randn(shape, 'float64')

            dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20)
            dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50)
            x3 = paddle.randn([dim_1, dim_2, 784])

            var_shape = paddle.static.data('X', [2], 'int32')
            x4 = paddle.randn(var_shape)

        place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
        exe = paddle.static.Executor(place)
        res = exe.run(train_program,
                      feed={'X': np.array(shape, dtype='int32')},
                      fetch_list=[x1, x2, x3, x4])

        for out in res:
            self.assertAlmostEqual(np.mean(out), .0, delta=0.1)
            self.assertAlmostEqual(np.std(out), 1., delta=0.1)
예제 #12
0
def train(print_result=True):
    """train"""
    # 1. initialize parallel environment
    train_data_list1 = []
    train_data_list2 = []
    dist.init_parallel_env()

    # 2. create data parallel layer & optimizer
    layer = LinearNet()
    dp_layer = paddle.DataParallel(layer)

    loss_fn = nn.MSELoss()
    adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters())

    # 3. run layer
    inputs = paddle.randn([10, 10], 'float32')
    outputs = dp_layer(inputs)
    labels = paddle.randn([10, 1], 'float32')
    loss = loss_fn(outputs, labels)
    assert len(loss) == 1
    if print_result is True:
        train_data_list1.append(loss.numpy())
    assert len(train_data_list1)

    loss.backward()

    adam.step()
    adam.clear_grad()
예제 #13
0
def train():
    # 1. enable dynamic mode
    paddle.disable_static()

    # 2. initialize parallel environment
    dist.init_parallel_env()

    # 3. create data parallel layer & optimizer
    layer = LinearNet()
    dp_layer = paddle.DataParallel(layer)

    loss_fn = nn.MSELoss()
    adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters())

    # 4. run layer
    inputs = paddle.randn([10, 10], 'float32')
    outputs = dp_layer(inputs)
    labels = paddle.randn([10, 1], 'float32')
    loss = loss_fn(outputs, labels)

    loss = dp_layer.scale_loss(loss)
    loss.backward()
    dp_layer.apply_collective_grads()

    adam.step()
    adam.clear_grad()
    def make_noise(self):
        noises = [paddle.randn((1, 1, 2**2, 2**2))]

        for i in range(3, self.log_size + 1):
            for _ in range(2):
                noises.append(paddle.randn((1, 1, 2**i, 2**i)))

        return noises
예제 #15
0
    def make_noise(self, batch, num_noise):
        if num_noise == 1:
            noises = paddle.randn([batch, self.num_style_feat])
        else:
            noises = []
            for _ in range(num_noise):
                noises.append(paddle.randn([batch, self.num_style_feat]))

        return noises
예제 #16
0
    def sample_prior(self, prob_decode=False):
        """
        Sample a molecule from prior distribution.
        Args:
            prob_decode(bool): using bernoulli distribution in graph decode if prob_decode=true.

        Returns:
            smiles.
        """
        z_tree = paddle.randn([1, self.latent_size])
        z_mol = paddle.randn([1, self.latent_size])
        return self.decode(z_tree, z_mol, prob_decode)
예제 #17
0
def test_GRU_base():
    """
    api: paddle.nn.GRU
    op version: 9, 10, 11, 12
    """
    op = Net()
    op.eval()
    # net, name, ver_list, delta=1e-10, rtol=1e-11
    obj = APIOnnx(op, 'nn_GRU', [9, 10, 11, 12])
    obj.set_input_data("input_data", paddle.randn((4, 23, 16)),
                       paddle.randn((2, 4, 32)))
    obj.run()
예제 #18
0
def train():
    """bergin train"""
    arr1 = []
    arr2 = []
    dist.init_parallel_env()
    set_seed(2021)
    layer = LinearNet()

    if dist.get_world_size() > 1:
        dp_layer = paddle.DataParallel(layer)
    else:
        dp_layer = layer

    layer2 = LinearNet()

    if dist.get_world_size() > 1:
        dp_layer2 = paddle.DataParallel(layer2)
    else:
        dp_layer2 = layer2

    dp_layer2.set_state_dict(dp_layer.state_dict())

    loss_fn = nn.MSELoss()
    adam = opt.Adam(
        learning_rate=0.001, parameters=dp_layer.parameters())

    adam2 = opt.Adam(
        learning_rate=0.001, parameters=dp_layer2.parameters())

    for i in range(2):
        batch_size = 10
        shard = int(batch_size / dist.get_world_size())
        start_no = shard * dist.get_rank()
        end_no = start_no + shard
        inputs = paddle.randn([10, 10], 'float32')[start_no:end_no]
        outputs = dp_layer(inputs)
        labels = paddle.randn([10, 1], 'float32')[start_no:end_no]
        loss = loss_fn(outputs, labels)
        if dist.get_rank() == 0:
            arr1.append(loss.numpy()[0])
        loss.backward()
        adam.step()
        adam.clear_grad()

        outputs = dp_layer2(inputs)
        loss = loss_fn(outputs, labels)
        loss.backward()
        if dist.get_rank() == 0:
            arr2.append(loss.numpy()[0])
        adam2.step()
        adam2.clear_grad()
    check_data(arr1, arr2)
예제 #19
0
def train():
    dist.init_parallel_env()
    # 1. initialize parallel environment
    set_seed(2021)
    # 2. create data parallel layer & optimizer
    layer = LinearNet()

    if dist.get_world_size() > 1:
        dp_layer = paddle.DataParallel(layer)
    else:
        dp_layer = layer

    layer2 = LinearNet()

    if dist.get_world_size() > 1:
        dp_layer2 = paddle.DataParallel(layer2)
    else:
        dp_layer2 = layer2

    dp_layer2.set_state_dict(dp_layer.state_dict())

    loss_fn = nn.MSELoss()
    adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters())

    adam2 = opt.Adam(learning_rate=0.001, parameters=dp_layer2.parameters())
    # 3. run layer

    print("Start")
    for i in range(10):
        batch_size = 10
        shard = int(batch_size / dist.get_world_size())
        start_no = shard * dist.get_rank()
        end_no = start_no + shard
        inputs = paddle.randn([10, 10], 'float32')[start_no:end_no]
        outputs = dp_layer(inputs)
        labels = paddle.randn([10, 1], 'float32')[start_no:end_no]
        loss = loss_fn(outputs, labels)
        if dist.get_rank() == 0:
            print("Loss1", loss.numpy()[0])
            print(dp_layer.parameters())
        loss.backward()
        adam.step()
        adam.clear_grad()

        outputs = dp_layer2(inputs)
        loss = loss_fn(outputs, labels)
        loss.backward()
        if dist.get_rank() == 0:
            print("Loss2", loss.numpy()[0])
            print(dp_layer2.parameters())
        adam2.step()
        adam2.clear_grad()
예제 #20
0
    def test_dygraph(self):
        with paddle.fluid.dygraph.base.guard():
            x = paddle.randn([10, 10], dtype='float32')
            y = paddle.poisson(x)
            self.assertTrue(np.min(y.numpy()) >= 0)

            with _test_eager_guard():
                x = paddle.randn([10, 10], dtype='float32')
                x.stop_gradient = False
                y = paddle.poisson(x)
                y.backward()
                self.assertTrue(np.min(y.numpy()) >= 0)
                self.assertTrue(np.array_equal(np.zeros_like(x), x.gradient()))
예제 #21
0
    def setUp(self):
        model = ModelInputDict()

        sp_net_config = supernet(expand_ratio=[0.5, 1.0])
        self.model = Convert(sp_net_config).convert(model)
        self.images = paddle.randn(shape=[2, 3, 32, 32], dtype='float32')
        self.images2 = {
            'data': paddle.randn(shape=[2, 12, 32, 32], dtype='float32')
        }
        default_run_config = {'skip_layers': ['conv1.0', 'conv2.0']}
        self.run_config = RunConfig(**default_run_config)

        self.ofa_model = OFA(self.model, run_config=self.run_config)
        self.ofa_model._clear_search_space(self.images, data=self.images2)
    def setup_input(self, input):
        """Unpack input data from the dataloader and perform necessary pre-processing steps.

        Args:
            input (dict): include the data itself and its metadata information.

        The option 'direction' can be used to swap images in domain A and domain B.
        """
        pass
        self.input = input
        self.input['z_trg'] = paddle.randn(
            (input['src'].shape[0], self.latent_dim))
        self.input['z_trg2'] = paddle.randn(
            (input['src'].shape[0], self.latent_dim))
    def test_cuda_rng_tracker(self):
        seed_1 = 2021
        seed_2 = 1024

        size = [20, 15]

        paddle.seed(seed_1)
        target_11 = paddle.randn(size, "float32")
        target_12 = paddle.randn(size, "float32")

        paddle.seed(seed_2)
        target_21 = paddle.randn(size, "float32")
        target_22 = paddle.randn(size, "float32")

        paddle.seed(seed_1)

        fleet.meta_parallel.get_rng_state_tracker().add("test", seed_2)

        result_11 = paddle.randn(size, "float32")

        with fleet.meta_parallel.get_rng_state_tracker().rng_state("test"):
            result_21 = paddle.randn(size, "float32")

        result_12 = paddle.randn(size, "float32")

        with fleet.meta_parallel.get_rng_state_tracker().rng_state("test"):
            result_22 = paddle.randn(size, "float32")

        np.testing.assert_allclose(result_11.numpy(), target_11.numpy())
        np.testing.assert_allclose(result_12.numpy(), target_12.numpy())
        np.testing.assert_allclose(result_21.numpy(), target_21.numpy())
        np.testing.assert_allclose(result_22.numpy(), target_22.numpy())
예제 #24
0
    def test_api(self):
        input = paddle.randn([6, 4, 2, 2])
        out = paddle.fluid.layers.temporal_shift(x=input,
                                                 seg_num=2,
                                                 shift_ratio=0.2)

        out_from_function = paddle.nn.functional.temporal_shift(
            x=input, seg_num=2, shift_ratio=0.2)

        # dygraph
        with paddle.fluid.dygraph.guard():
            input = paddle.randn([6, 4, 2, 2])
            out = paddle.nn.functional.temporal_shift(x=input,
                                                      seg_num=2,
                                                      shift_ratio=0.2)
예제 #25
0
 def __init__(self, num_classes=5013, feat_dim=2048):
     super(CenterLoss, self).__init__()
     self.num_classes = num_classes
     self.feat_dim = feat_dim
     self.centers = paddle.randn(
         shape=[self.num_classes, self.feat_dim]).astype(
             "float64")  #random center
예제 #26
0
def ddpm_steps(x, seq, model, b, **kwargs):
    with paddle.no_grad():
        n = x.shape[0]
        seq_next = [-1] + list(seq[:-1])
        xs = [x]
        x0_preds = []
        betas = b
        for i, j in zip(reversed(seq), reversed(seq_next)):
            t = (paddle.ones([n]) * i)
            next_t = (paddle.ones([n]) * j)
            at = compute_alpha(betas, t.astype('int64'))
            atm1 = compute_alpha(betas, next_t.astype('int64'))
            beta_t = 1 - at / atm1
            x = xs[-1]

            output = model(x, t.astype('float32'))
            e = output

            x0_from_e = (1.0 / at).sqrt() * x - (1.0 / at - 1).sqrt() * e
            x0_from_e = paddle.clip(x0_from_e, -1, 1)
            x0_preds.append(x0_from_e)
            mean_eps = ((atm1.sqrt() * beta_t) * x0_from_e +
                        ((1 - beta_t).sqrt() * (1 - atm1)) * x) / (1.0 - at)

            mean = mean_eps
            noise = paddle.randn(x.shape)
            mask = 1 - (t == 0).astype('float32')
            mask = mask.reshape([-1, 1, 1, 1])
            logvar = beta_t.log()
            sample = mean + mask * paddle.exp(0.5 * logvar) * noise
            xs.append(sample)
    return xs, x0_preds
예제 #27
0
    def __init__(self,
                 dim,
                 in_dim,
                 head_cnt=1,
                 kernel_ratio=0.5,
                 dp1=0.1,
                 dp2=0.1):
        super().__init__()
        self.emb = in_dim * head_cnt  # we use 1, so it is no need here
        self.kqv = nn.Linear(dim, 3 * self.emb)
        self.dp = nn.Dropout(dp1)
        self.proj = nn.Linear(self.emb, self.emb)
        self.head_cnt = head_cnt
        self.norm1 = nn.LayerNorm(dim)
        self.norm2 = nn.LayerNorm(self.emb)
        self.epsilon = 1e-8  # for stable in division

        self.mlp = nn.Sequential(
            nn.Linear(self.emb, 1 * self.emb),
            nn.GELU(),
            nn.Linear(1 * self.emb, self.emb),
            nn.Dropout(dp2),
        )

        self.m = int(self.emb * kernel_ratio)
        self.w = paddle.randn((self.m, self.emb))

        self.w = add_parameter(self, orthogonal_(self.w) * math.sqrt(self.m))
예제 #28
0
def build_program():
    main_program = paddle.static.Program()
    startup_program = paddle.static.Program()

    with paddle.static.program_guard(main_program, startup_program):
        with paddle.static.device_guard('cpu'):
            data = paddle.ones([4, 64], dtype='float32', name='data')

        # data -> [memcpy_h2d] -> data' -> [matmul] -> out ->[add] -> add_out
        with paddle.static.device_guard('gpu'):
            weight = paddle.randn([64, 64], name='weight')  # gpu
            matmul_out = paddle.matmul(data, weight, name='matmul_out')  # gpus
            bias = paddle.ones([4, 64], dtype='float32', name='bias')
            add_out = paddle.add(matmul_out, bias, name='add_out')

        # add_out -> [memcpy_d2h] -> add_out' -> [sub] -> sub_out -> [tanh] -> tanh_out
        with paddle.static.device_guard('cpu'):
            sub_out = paddle.subtract(add_out, data, name='sub_out')
            tanh_out = paddle.tanh(sub_out, name='tanh_out')

        with paddle.static.device_guard('gpu'):
            bias_1 = paddle.add(bias, sub_out, name='bias_1')
            out_before = paddle.tanh(bias_1, name='out_before')
            out_last = paddle.subtract(tanh_out, data, name='out_last')

            out = paddle.add(out_before, out_last, name='out')
            mean = paddle.mean(out, name='mean_out')

    return main_program, startup_program, [mean]
예제 #29
0
    def test_error(self):
        x = paddle.randn([2, 3, 4, 5])
        # src must have the same number with dst
        with self.assertRaises(AssertionError):
            paddle.moveaxis(x, [1, 0], [2])

        # each element of src must be unique
        with self.assertRaises(ValueError):
            paddle.moveaxis(x, [1, 1], [0, 2])

        # each element of dst must be unique
        with self.assertRaises(ValueError):
            paddle.moveaxis(x, [0, 1], [2, 2])

        # each element of src must be integer
        with self.assertRaises(AssertionError):
            paddle.moveaxis(x, [0.5], [1])

        # each element of dst must be integer
        with self.assertRaises(AssertionError):
            paddle.moveaxis(x, [0], [1.5])

        # each element of src must be in the range of [-4, 3)
        with self.assertRaises(AssertionError):
            paddle.moveaxis(x, [-10, 1], [2, 3])

        # each element of dst must be in the range of [-4, 3)
        with self.assertRaises(AssertionError):
            paddle.moveaxis(x, [2, 1], [10, 3])
예제 #30
0
 def setUp(self):
     self.in_num = 16
     self.out_num = 16
     self.x = paddle.randn([4, 16])
     self.spec = [
         paddle.static.InputSpec(shape=[None, 16], dtype='float32')
     ]