Exemplo n.º 1
0
    def forward(self, x, condition):
        """Compute output for a whole folded sequence.
        
        Parameters
        ----------
        x : Tensor [shape=(batch_size, channel, height, width)]
            The input.
            
        condition : Tensor [shape=(batch_size, condition_channel, height, width)]
            The local condition.

        Returns
        -------
        res : Tensor [shape=(batch_size, channel, height, width)]
            The residual output.
            
        skip : Tensor [shape=(batch_size, channel, height, width)]
            The skip output.
        """
        x_in = x
        x = self.conv(x)
        x += self.condition_proj(condition)

        content, gate = paddle.chunk(x, 2, axis=1)
        x = paddle.tanh(content) * F.sigmoid(gate)

        x = self.out_proj(x)
        res, skip = paddle.chunk(x, 2, axis=1)
        res = x_in + res
        return res, skip
Exemplo n.º 2
0
    def get_score(self, head, rel, tail):
        re_head, im_head = paddle.chunk(head, chunks=2, axis=-1)
        re_tail, im_tail = paddle.chunk(tail, chunks=2, axis=-1)

        phase_rel = rel / (self.emb_init / np.pi)
        re_rel, im_rel = paddle.cos(phase_rel), paddle.sin(phase_rel)
        re_score = re_rel * re_tail + im_rel * im_tail
        im_score = re_rel * im_tail - im_rel * re_tail
        re_score = re_score - re_head
        im_score = im_score - im_head

        score = paddle.stack([re_score, im_score], axis=0)
        score = self.gamma - paddle.sum(paddle.norm(score, p=2, axis=0),
                                        axis=-1)
        return score
Exemplo n.º 3
0
    def forward(self, x):
        """Compute the stft transform.

        Parameters
        ------------
        x : Tensor [shape=(B, T)]
            The input waveform.

        Returns
        ------------
        real : Tensor [shape=(B, C, 1, frames)] 
            The real part of the spectrogram.
            
        imag : Tensor [shape=(B, C, 1, frames)] 
            The image part of the spectrogram.
        """
        # x(batch_size, time_steps)
        # pad it first with reflect mode
        # TODO(chenfeiyu): report an issue on paddle.flip
        pad_start = paddle.reverse(x[:, 1:1 + self.n_fft // 2], axis=[1])
        pad_stop = paddle.reverse(x[:, -(1 + self.n_fft // 2):-1], axis=[1])
        x = paddle.concat([pad_start, x, pad_stop], axis=-1)

        # to BC1T, C=1
        x = paddle.unsqueeze(x, axis=[1, 2])
        out = F.conv2d(x, self.weight, stride=(1, self.hop_length))
        real, imag = paddle.chunk(out, 2, axis=1)  # BC1T
        return real, imag
Exemplo n.º 4
0
 def forward(self, input, mask=None):
     """
     Args:
         input (obj: `paddle.Tensor`) of shape (batch, seq_len, input_size): Tensor containing the features of the input sequence.
         mask (obj: `paddle.Tensor`, optional, defaults to `None`) of shape (batch, seq_len) :
             Tensor is a bool tensor, whose each element identifies whether the input word id is pad token or not.
     """
     forward_input, backward_input = paddle.chunk(input, chunks=2, axis=2)
     # elementwise-sum forward_x and backward_x
     # Shape: (batch_size, max_seq_len, hidden_size)
     h = paddle.add_n([forward_input, backward_input])
     # Shape: (batch_size, hidden_size, 1)
     att_weight = self.att_weight.tile(
         repeat_times=(paddle.shape(h)[0], 1, 1))
     # Shape: (batch_size, max_seq_len, 1)
     att_score = paddle.bmm(paddle.tanh(h), att_weight)
     if mask is not None:
         # mask, remove the effect of 'PAD'
         mask = paddle.cast(mask, dtype='float32')
         mask = mask.unsqueeze(axis=-1)
         inf_tensor = paddle.full(
             shape=mask.shape, dtype='float32', fill_value=-INF)
         att_score = paddle.multiply(att_score, mask) + paddle.multiply(
             inf_tensor, (1 - mask))
     # Shape: (batch_size, max_seq_len, 1)
     att_weight = F.softmax(att_score, axis=1)
     # Shape: (batch_size, lstm_hidden_size)
     reps = paddle.bmm(h.transpose(perm=(0, 2, 1)),
                       att_weight).squeeze(axis=-1)
     reps = paddle.tanh(reps)
     return reps, att_weight
Exemplo n.º 5
0
    def get_neg_score(self,
                      heads,
                      relations,
                      tails,
                      batch_size,
                      mini_batch_size,
                      neg_sample_size,
                      neg_head=True):
        mini_batch_num = int(batch_size / mini_batch_size)
        if neg_head:
            hidden_dim = heads.shape[-1]
            re_tail, im_tail = paddle.chunk(tails, chunks=2, axis=-1)

            phase_rel = relations / (self.emb_init / np.pi)
            re_rel, im_rel = paddle.cos(phase_rel), paddle.sin(phase_rel)
            real = re_tail * re_rel + im_tail * im_rel
            imag = -re_tail * im_rel + im_tail * re_rel

            emb_complex = paddle.concat([real, imag], axis=-1)
            score = emb_complex.reshape([mini_batch_num, -1, 1, hidden_dim])
            heads = heads.reshape([mini_batch_num, 1, -1, hidden_dim])
            score = score - heads
            re_score, im_score = paddle.chunk(score, chunks=2, axis=1)
            score = paddle.stack(
                [re_score, im_score], axis=-1).norm(
                    p=2, axis=-1)
            return self.gamma - score.sum(-1)
        else:
            hidden_dim = heads.shape[-1]
            re_head, im_head = paddle.chunk(heads, chunks=2, axis=-1)

            phase_rel = relations / (self.emb_init / np.pi)
            re_rel, im_rel = paddle.cos(phase_rel), paddle.sin(phase_rel)
            real = re_head * re_rel - im_head * im_rel
            imag = re_head * im_rel + im_head * re_rel

            emb_complex = paddle.concat([real, imag], axis=-1)
            score = emb_complex.reshape([mini_batch_num, -1, 1, hidden_dim])
            tails = tails.reshape([mini_batch_num, 1, -1, hidden_dim])
            score = score - tails
            re_score, im_score = paddle.chunk(score, chunks=2, axis=1)
            score = paddle.stack(
                [re_score, im_score], axis=-1).norm(
                    p=2, axis=-1)

            return self.gamma - score.sum(-1)
Exemplo n.º 6
0
 def forward(self, x):
     y = []
     for i, x_i in enumerate(paddle.chunk(x, self.scale, axis=1)):
         if i == 0:
             y_i = x_i
         elif i == 1:
             y_i = self.blocks[i - 1](x_i)
         else:
             y_i = self.blocks[i - 1](x_i + y_i)
         y.append(y_i)
     y = paddle.concat(y, axis=1)
     return y
Exemplo n.º 7
0
    def add_input(self, x_row, condition_row):
        """Compute the output for a row and update the buffer.

        Parameters
        ----------
        x_row : Tensor [shape=(batch_size, channel, 1, width)]
            A row of the input.
            
        condition_row : Tensor [shape=(batch_size, condition_channel, 1, width)]
            A row of the condition.

        Returns
        -------
        res : Tensor [shape=(batch_size, channel, 1, width)]
            A row of the the residual output.
            
        skip : Tensor [shape=(batch_size, channel, 1, width)]
            A row of the skip output.
        """
        x_row_in = x_row
        if self._conv_buffer is None:
            self._init_buffer(x_row)
        self._update_buffer(x_row)

        rw = self.rw
        x_row = F.conv2d(
            self._conv_buffer,
            self.conv.weight,
            self.conv.bias,
            padding=[0, 0, rw // 2, (rw - 1) // 2],
            dilation=self.dilations)
        x_row += self.condition_proj(condition_row)

        content, gate = paddle.chunk(x_row, 2, axis=1)
        x_row = paddle.tanh(content) * F.sigmoid(gate)

        x_row = self.out_proj(x_row)
        res, skip = paddle.chunk(x_row, 2, axis=1)
        res = x_row_in + res
        return res, skip
Exemplo n.º 8
0
 def test_out1(self):
     with fluid.dygraph.guard():
         input_1 = np.random.random([4, 6, 6]).astype("int32")
         # input is a variable which shape is [4, 6, 6]
         input = fluid.dygraph.to_variable(input_1)
         x0, x1, x2 = paddle.chunk(input, chunks=3, axis=1)
         x0_out = x0.numpy()
         x1_out = x1.numpy()
         x2_out = x2.numpy()
         ex_x0, ex_x1, ex_x2 = np.array_split(input_1, 3, axis=1)
     self.assertTrue(np.allclose(ex_x0, x0_out))
     self.assertTrue(np.allclose(ex_x1, x1_out))
     self.assertTrue(np.allclose(ex_x2, x2_out))
Exemplo n.º 9
0
 def test_out(self):
     with fluid.program_guard(fluid.Program(), fluid.Program()):
         data1 = paddle.data('data1', shape=[4, 6, 6], dtype='float64')
         x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=2)
         place = paddle.CPUPlace()
         exe = paddle.static.Executor(place)
         input1 = np.random.random([4, 6, 6]).astype('float64')
         r0, r1, r2, = exe.run(feed={"data1": input1},
                               fetch_list=[x0, x1, x2])
         ex_x0, ex_x1, ex_x2 = np.array_split(input1, 3, axis=2)
         self.assertTrue(np.allclose(ex_x0, r0))
         self.assertTrue(np.allclose(ex_x1, r1))
         self.assertTrue(np.allclose(ex_x2, r2))
Exemplo n.º 10
0
    def get_test_score(self, entity_embedding, head, rel, tail):
        re_entity_embedding, im_entity_embedding = paddle.chunk(
            entity_embedding, chunks=2, axis=-1)
        re_head, im_head = paddle.chunk(head, chunks=2, axis=-1)
        re_tail, im_tail = paddle.chunk(tail, chunks=2, axis=-1)
        phase_rel = rel / (self.emb_init / np.pi)
        re_rel, im_rel = paddle.cos(phase_rel), paddle.sin(phase_rel)

        re_score = re_rel * re_tail + im_rel * im_tail
        im_score = re_rel * im_tail - im_rel * re_tail
        re_score = re_entity_embedding - re_score
        im_score = im_entity_embedding - im_score

        re_score = re_score * re_score
        im_score = im_score * im_score
        head_score = re_score + im_score
        head_score += self.epsilon
        head_score = paddle.sqrt(head_score)
        head_score = paddle.sum(head_score, axis=-1)

        re_score = re_head * re_rel - im_head * im_rel
        im_score = re_head * im_rel + im_head * re_rel
        re_score = re_entity_embedding - re_score
        im_score = im_entity_embedding - im_score

        re_score = re_score * re_score
        im_score = im_score * im_score
        tail_score = re_score + im_score
        tail_score += self.epsilon
        tail_score = paddle.sqrt(tail_score)
        tail_score = paddle.sum(tail_score, axis=-1)

        head_score = log_sigmoid(head_score)
        tail_score = log_sigmoid(tail_score)

        return head_score, tail_score
Exemplo n.º 11
0
 def compute_qkv(self, hidden_states):
     if self.fast_qkv:
         qkv = self.qkv_linear(hidden_states)
         q, k, v = paddle.chunk(qkv, 3, axis=-1)
         if q.ndimension() == self.q_bias.ndimension():
             q = q + self.q_bias
             v = v + self.v_bias
         else:
             _sz = (1, ) * (q.ndimension() - 1) + (-1, )
             q = q + self.q_bias.reshape(_sz)
             v = v + self.v_bias.vreshape(_sz)
     else:
         q = self.query(hidden_states)
         k = self.key(hidden_states)
         v = self.value(hidden_states)
     return q, k, v
Exemplo n.º 12
0
    def forward(self, h, attn_mask=None, mems=None):
        if mems is not None:
            c = paddle.concat([mems, h], axis=1)
        else:
            c = h

        if self.normalize_before:
            c = self.layer_norm(c)

        head_q = self.q_proj(h)
        head_k, head_v = paddle.chunk(self.kv_proj(c), chunks=2, axis=-1)

        head_q = paddle.reshape(
            head_q, shape=[h.shape[0], h.shape[1], self.n_head, self.d_head])
        head_k = paddle.reshape(
            head_k, shape=[c.shape[0], c.shape[1], self.n_head, self.d_head])
        head_v = paddle.reshape(
            head_v, shape=[c.shape[0], c.shape[1], self.n_head, self.d_head])

        attn_score = paddle.einsum('bind,bjnd->bnij', head_q, head_k)
        attn_score = attn_score * self.scale
        if attn_mask is not None:
            attn_score = attn_score - float('inf') * attn_mask

        attn_prob = F.softmax(attn_score, dim=-1)
        attn_prob = self.attn_drop(attn_prob)

        attn_vec = paddle.einsum('bnij,bjnd->bind', attn_prob, head_v)
        attn_vec = paddle.reshape(
            attn_vec,
            shape=[
                attn_vec.shape[0], attn_vec.shape[1], self.n_head * self.d_head
            ])

        attn_out = self.o_proj(attn_vec)
        attn_out = self.drop(attn_out)
        if self.normalize_before:
            output = h + attn_out
        else:
            output = self.layer_norm(h + attn_out)

        return output
Exemplo n.º 13
0
 def apply_rotary_position_embeddings(sinusoidal_pos,
                                      query_layer,
                                      key_layer,
                                      value_layer=None):
     # https://kexue.fm/archives/8265
     # sin [batch_size, num_heads, sequence_length, embed_size_per_head//2]
     # cos [batch_size, num_heads, sequence_length, embed_size_per_head//2]
     sin, cos = paddle.chunk(sinusoidal_pos, 2, axis=-1)
     # sin [θ0,θ1,θ2......θd/2-1] -> sin_pos [θ0,θ0,θ1,θ1,θ2,θ2......θd/2-1,θd/2-1]
     sin_pos = paddle.reshape(paddle.stack([sin, sin], axis=-1),
                              sinusoidal_pos.shape)
     # cos [θ0,θ1,θ2......θd/2-1] -> cos_pos [θ0,θ0,θ1,θ1,θ2,θ2......θd/2-1,θd/2-1]
     cos_pos = paddle.reshape(paddle.stack([cos, cos], axis=-1),
                              sinusoidal_pos.shape)
     # rotate_half_query_layer [-q1,q0,-q3,q2......,-qd-1,qd-2]
     rotate_half_query_layer = paddle.reshape(
         paddle.stack(
             [-query_layer[:, :, :, 1::2], query_layer[:, :, :, 0::2]],
             axis=-1),
         query_layer.shape,
     )
     query_layer = query_layer * cos_pos + rotate_half_query_layer * sin_pos
     # rotate_half_key_layer [-k1,k0,-k3,k2......,-kd-1,kd-2]
     rotate_half_key_layer = paddle.reshape(
         paddle.stack([-key_layer[:, :, :, 1::2], key_layer[:, :, :, 0::2]],
                      axis=-1),
         key_layer.shape,
     )
     key_layer = key_layer * cos_pos + rotate_half_key_layer * sin_pos
     if value_layer is not None:
         # rotate_half_value_layer [-v1,v0,-v3,v2......,-vd-1,vd-2]
         rotate_half_value_layer = paddle.reshape(
             paddle.stack(
                 [-value_layer[:, :, :, 1::2], value_layer[:, :, :, 0::2]],
                 axis=-1),
             value_layer.shape,
         )
         value_layer = value_layer * cos_pos + rotate_half_value_layer * sin_pos
         return query_layer, key_layer, value_layer
     return query_layer, key_layer
Exemplo n.º 14
0
    def forward(self, x):
        b, c, h, w = x.shape

        x = paddle.reshape(x, [b * self.groups, -1, h, w])
        x_0, x_1 = paddle.chunk(x, 2, axis=1)

        # channel attention
        xn = self.avg_pool(x_0)
        xn = self.cweight * xn + self.cbias
        xn = x_0 * self.sigmoid(xn)

        # spatial attention
        xs = self.gn(x_1)
        xs = self.sweight * xs + self.sbias
        xs = x_1 * self.sigmoid(xs)

        # concatenate along channel axis
        out = paddle.concat([xn, xs], axis=1)
        out = paddle.reshape(out, [b, -1, h, w])

        out = self.channel_shuffle(out, 2)
        return out
Exemplo n.º 15
0
 def _predict_parameters(self, x, condition):
     x = self.input_proj(x)
     x = self.resnet(x, condition)
     bijection_params = self.output_proj(x)
     logs, b = paddle.chunk(bijection_params, 2, axis=1)
     return logs, b
Exemplo n.º 16
0
 def test_axis_variable_type():
     x2 = paddle.data(shape=[4], dtype='float16', name='x9')
     x3 = paddle.data(shape=[1], dtype='float16', name='x10')
     paddle.chunk(input=x2, chunks=2, axis=x3)
Exemplo n.º 17
0
    def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
        qlen, bsz = w.shape[1], w.shape[0]

        if mems is not None:
            cat = paddle.concat([mems, w], 1)
            if self.normalize_before:
                w_heads = self.qkv_proj(self.layer_norm(cat))
            else:
                w_heads = self.qkv_proj(cat)
            w_head_q, w_head_k, w_head_v = paddle.chunk(
                w_heads, chunks=3, axis=-1)

            w_head_q = w_head_q[-qlen:]
        else:
            if self.normalize_before:
                w_heads = self.qkv_proj(self.layer_norm(w))
            else:
                w_heads = self.qkv_proj(w)
            w_head_q, w_head_k, w_head_v = paddle.chunk(
                w_heads, chunks=3, axis=-1)

        klen = w_head_k.shape[1]

        w_head_q = paddle.reshape(
            w_head_q,
            shape=[
                w_head_q.shape[0], w_head_q.shape[1], self.n_head, self.d_head
            ])
        w_head_k = paddle.reshape(
            w_head_k,
            shape=[
                w_head_k.shape[0], w_head_k.shape[1], self.n_head, self.d_head
            ])
        w_head_v = paddle.reshape(
            w_head_v,
            shape=[
                w_head_v.shape[0], w_head_v.shape[1], self.n_head, self.d_head
            ])

        if klen > r_emb.shape[0]:
            r_emb_pad = r_emb[0:1].expand(klen - r_emb.shape[0], -1, -1)
            r_emb = paddle.concat([r_emb_pad, r_emb], 0)
            r_bias_pad = r_bias[0:1].expand(klen - r_bias.shape[0], -1)
            r_bias = paddle.concat([r_bias_pad, r_bias], 0)
        else:
            r_emb = r_emb[-klen:]
            r_bias = r_bias[-klen:]

        rw_head_q = w_head_q + r_w_bias.unsqueeze([0])

        AC = paddle.einsum('bind,bjnd->bnij', rw_head_q, w_head_k)
        r_emb = r_emb.unsqueeze([0]).expand([bsz, -1, -1, -1])
        B_ = paddle.einsum('bind,bjnd->bnij', w_head_q, r_emb)
        D_ = r_bias.unsqueeze([0, 2])
        BD = self._rel_shift(B_ + D_)

        attn_score = AC + BD
        attn_score = attn_score * self.scale

        if attn_mask is not None:
            attn_score = attn_score - float('inf') * attn_mask

        attn_prob = F.softmax(attn_score, dim=-1)
        attn_prob = self.attn_drop(attn_prob)

        attn_vec = paddle.einsum('bnij,bjnd->bind', attn_prob, w_head_v)

        attn_vec = paddle.reshape(
            attn_vec,
            shape=[
                attn_vec.shape[0], attn_vec.shape[1], self.n_head * self.d_head
            ])

        attn_out = self.o_net(attn_vec)
        attn_out = self.drop(attn_out)

        if self.normalize_before:
            output = w + attn_out
        else:
            output = self.layer_norm(w + attn_out)

        return output
Exemplo n.º 18
0
 def _predict_row_parameters(self, x_row, condition_row):
     x_row = self.input_proj(x_row)
     x_row = self.resnet.add_input(x_row, condition_row)
     bijection_params = self.output_proj(x_row)
     logs, b = paddle.chunk(bijection_params, 2, axis=1)
     return logs, b
Exemplo n.º 19
0
    def forward(self, w, r, r_w_bias, r_r_bias, attn_mask=None, mems=None):
        qlen, rlen, bsz = w.shape[1], r.shape[1], w.shape[0]

        if mems is not None:
            cat = paddle.concat([mems, w], axis=1)
            if self.normalize_before:
                w_heads = self.qkv_proj(self.layer_norm(cat))
            else:
                w_heads = self.qkv_proj(cat)
            r_head_k = self.r_proj(r)

            w_head_q, w_head_k, w_head_v = paddle.chunk(
                w_heads, chunks=3, axis=-1)

            w_head_q = w_head_q[:, -qlen:, :]
        else:
            if self.normalize_before:
                w_heads = self.qkv_proj(self.layer_norm(w))
            else:
                w_heads = self.qkv_proj(w)
            r_head_k = self.r_proj(r)

            w_head_q, w_head_k, w_head_v = paddle.chunk(
                w_heads, chunks=3, axis=-1)

        klen = w_head_k.shape[1]

        w_head_q = paddle.reshape(
            w_head_q, shape=[bsz, qlen, self.n_head, self.d_head])
        w_head_k = paddle.reshape(
            w_head_k, shape=[bsz, klen, self.n_head, self.d_head])
        w_head_v = paddle.reshape(
            w_head_v, shape=[bsz, klen, self.n_head, self.d_head])

        r_head_k = paddle.reshape(
            r_head_k, shape=[bsz, rlen, self.n_head, self.d_head])

        rw_head_q = w_head_q + r_w_bias

        AC = paddle.einsum('bind,bjnd->bnij', rw_head_q, w_head_k)
        rr_head_q = w_head_q + r_r_bias

        BD = paddle.einsum('bind,bjnd->bnij', rr_head_q, r_head_k)
        BD = self._rel_shift(BD)

        attn_score = AC + BD
        attn_score = attn_score * self.scale

        if attn_mask is not None:
            attn_score = attn_score - 1e30 * attn_mask

        attn_prob = F.softmax(attn_score, axis=-1)
        attn_prob = self.attn_drop(attn_prob)

        attn_vec = paddle.einsum('bnij,bjnd->bind', attn_prob, w_head_v)

        attn_vec = paddle.reshape(
            attn_vec,
            shape=[
                attn_vec.shape[0], attn_vec.shape[1], self.n_head * self.d_head
            ])

        attn_out = self.o_proj(attn_vec)
        attn_out = self.drop(attn_out)

        if self.normalize_before:
            output = w + attn_out
        else:
            output = self.layer_norm(w + attn_out)

        return output
Exemplo n.º 20
0
 def chunk(self, chunks, dim=0 ):
     return paddle.chunk(self,chunks,axis=dim)
 def forward(self, x, s):
     h = self.fc(s)
     # h = h.view(h.size(0), h.size(1), 1, 1)
     h = paddle.reshape(h, (h.shape[0], h.shape[1], 1, 1))
     gamma, beta = paddle.chunk(h, chunks=2, axis=1)
     return (1 + gamma) * self.norm(x) + beta
Exemplo n.º 22
0
 def test_axis_type():
     x1 = paddle.data(shape=[4], dtype='float16', name='x3')
     paddle.chunk(x=x1, chunks=2, axis=3.2)
Exemplo n.º 23
0
 def test_axis_type_tensor():
     x5 = paddle.data(shape=[4], dtype='float16', name='x6')
     paddle.chunk(input=x5, chunks=2, axis=3.2)
Exemplo n.º 24
0
    def forward(self, x):
        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.max_pool2d(h, 2, 2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pool2d(h, 2, 2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        f3_3 = h
        h = F.max_pool2d(h, 2, 2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        f4_3 = h
        h = F.max_pool2d(h, 2, 2)

        h = F.relu(self.conv5_1(h))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        f5_3 = h
        h = F.max_pool2d(h, 2, 2)

        h = F.relu(self.fc6(h))
        h = F.relu(self.fc7(h))
        ffc7 = h
        h = F.relu(self.conv6_1(h))
        h = F.relu(self.conv6_2(h))
        f6_2 = h
        h = F.relu(self.conv7_1(h))
        h = F.relu(self.conv7_2(h))
        f7_2 = h

        f3_3 = self.conv3_3_norm(f3_3)
        f4_3 = self.conv4_3_norm(f4_3)
        f5_3 = self.conv5_3_norm(f5_3)

        cls1 = self.conv3_3_norm_mbox_conf(f3_3)
        reg1 = self.conv3_3_norm_mbox_loc(f3_3)
        cls2 = self.conv4_3_norm_mbox_conf(f4_3)
        reg2 = self.conv4_3_norm_mbox_loc(f4_3)
        cls3 = self.conv5_3_norm_mbox_conf(f5_3)
        reg3 = self.conv5_3_norm_mbox_loc(f5_3)
        cls4 = self.fc7_mbox_conf(ffc7)
        reg4 = self.fc7_mbox_loc(ffc7)
        cls5 = self.conv6_2_mbox_conf(f6_2)
        reg5 = self.conv6_2_mbox_loc(f6_2)
        cls6 = self.conv7_2_mbox_conf(f7_2)
        reg6 = self.conv7_2_mbox_loc(f7_2)

        # max-out background label
        chunk = paddle.chunk(cls1, 4, 1)
        tmp_max = paddle.where(chunk[0] > chunk[1], chunk[0], chunk[1])
        bmax = paddle.where(tmp_max > chunk[2], tmp_max, chunk[2])
        cls1 = paddle.concat([bmax, chunk[3]], axis=1)

        return [
            cls1, reg1, cls2, reg2, cls3, reg3, cls4, reg4, cls5, reg5, cls6,
            reg6
        ]
Exemplo n.º 25
0
 def test_chunks_type():
     x4 = paddle.data(shape=[4], dtype='float16', name='x4')
     paddle.chunk(input=x4, chunks=2.1, axis=3)