Esempio n. 1
0
 def forward(self, input, indices, output_size=None):
     if output_size is None:
         n, c, h, w = input.shape
         out_h = (
             h -
             1) * self.stride[0] - 2 * self.padding[0] + self.kernel_size[0]
         out_w = (
             w -
             1) * self.stride[1] - 2 * self.padding[1] + self.kernel_size[1]
         output_size = (n, c, out_h, out_w)
     else:
         if len(output_size) == len(self.kernel_size) + 2:
             output_size = output_size[2:]
     t = str(input.dtype).lower().strip().split(".")[-1]
     t = TYPE_MAPPER[t]
     out = paddle.zeros(output_size, dtype=t)
     flatten_out = paddle.flatten(out)
     for i in range(indices.shape[0]):
         for j in range(indices.shape[1]):
             for k in range(indices.shape[2]):
                 for m in range(indices.shape[3]):
                     indices[i, j, k, m] = (out.shape[1] * out.shape[2] * out.shape[3]) * i + \
                                           (out.shape[2] * out.shape[3]) * j + indices[i, j, k, m]
     flatten_indices = paddle.flatten(indices)
     flatten_input = paddle.flatten(input)
     for i in range(flatten_indices.shape[0]):
         flatten_out[
             flatten_indices[i].tolist()] = flatten_input[i].tolist()
     out = paddle.reshape(flatten_out, out.shape)
     return out
Esempio n. 2
0
    def __call__(self, x, index):
        if self.dim < 0:
            self.dim += len(x.shape)
        x_range = list(range(len(x.shape)))
        x_range[0] = self.dim
        x_range[self.dim] = 0
        x_swaped = paddle.transpose(x, perm=x_range)
        index_range = list(range(len(index.shape)))
        index_range[0] = self.dim
        index_range[self.dim] = 0
        index_swaped = paddle.transpose(index, perm=index_range)
        dtype = index.dtype

        x_shape = paddle.shape(x_swaped)
        index_shape = paddle.shape(index_swaped)

        prod = paddle.cast(paddle.prod(x_shape), dtype=dtype) / x_shape[0]

        x_swaped_flattend = paddle.flatten(x_swaped)
        index_swaped_flattend = paddle.flatten(index_swaped)
        index_swaped_flattend *= prod

        bias = paddle.arange(start=0, end=prod, dtype=dtype)
        bias = paddle.reshape(bias, x_shape[1:])
        bias = paddle.crop(bias, index_shape[1:])
        bias = paddle.flatten(bias)
        bias = paddle.tile(bias, [index_shape[0]])
        index_swaped_flattend += bias

        gathered = paddle.index_select(x_swaped_flattend, index_swaped_flattend)
        gathered = paddle.reshape(gathered, index_swaped.shape)

        out = paddle.transpose(gathered, perm=x_range)

        return out
Esempio n. 3
0
 def test_type():
     # dtype must be float32, float64, int8, int32, int64
     x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] *
                    image_shape[3]).reshape(image_shape) / 100.
     x2 = x2.astype('float16')
     x2_var = paddle.fluid.data(
         name='x2', shape=[3, 2, 4, 5], dtype='float16')
     paddle.flatten(x2_var)
Esempio n. 4
0
    def forward(self, input, label):
        feature = input["features"]
        logits = input["logits"]

        dist = paddle.sum(paddle.square(
            (paddle.unsqueeze(feature, 1) - paddle.unsqueeze(feature, 0))),
                          axis=2)

        # label to ont-hot
        label = paddle.flatten(label)
        n_class = logits.shape[1]
        label = paddle.nn.functional.one_hot(label, n_class).astype("float32")

        s = (paddle.matmul(label, label,
                           transpose_y=True) == 0).astype("float32")
        margin = 2 * feature.shape[1]
        Ld = (1 - s) / 2 * dist + s / 2 * (margin - dist).clip(min=0)
        Ld = Ld.mean()

        if self.multi_label:
            # multiple labels classification loss
            Lc = (logits - label * logits +
                  ((1 + (-logits).exp()).log())).sum(axis=1).mean()
        else:
            # single labels classification loss
            Lc = (-paddle.nn.functional.softmax(logits).log() *
                  label).sum(axis=1).mean()

        return {"dshsdloss": Lc + Ld * self.alpha}
Esempio n. 5
0
 def forward(self, rois_feat, stage=0):
     rois_feat = paddle.flatten(rois_feat, start_axis=1, stop_axis=-1)
     fc6 = self.fc6_list[stage](rois_feat)
     fc6_relu = self.fc6_relu_list[stage](fc6)
     fc7 = self.fc7_list[stage](fc6_relu)
     fc7_relu = self.fc7_relu_list[stage](fc7)
     return fc7_relu
Esempio n. 6
0
    def forward(self, x):  #
        batch = x.shape[0]
        x = self.cnn0(x)

        y = self.cnn1(x)
        y1 = self.avg(y)

        y = self.cnn2(y)
        y2 = self.avg(y)

        y = self.cnn3(y)
        y3 = self.avg(y)

        # print('CNN:', y1.shape, y2.shape, y3.shape)

        r, t = self.rnn0(x)

        x = paddle.concat([y1, y2, y3, r], axis=-1)

        x = self.rnn1(x)
        x = self.rnn2(x)
        x = paddle.flatten(x, start_axis=1)

        x = self.cls(x)
        return x
Esempio n. 7
0
    def forward(self, x):
        x = x.transpose([0, 3, 2, 1])
        x = self.bn0(x)
        x = x.transpose([0, 3, 2, 1])

        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = self.layer1(x)
        x = self.drop1(x)
        x = self.layer2(x)
        x = self.drop2(x)
        x = self.layer3(x)
        x = self.drop3(x)
        x = self.layer4(x)
        x = self.drop4(x)

        if self.with_pool:
            x = self.avgpool(x)

        if self.num_classes > 0:
            x = paddle.flatten(x, 1)
            x = self.drop(x)

            x = self.extra_fc(x)
            x = self.relu2(x)
            x = self.fc(x)

        return x
Esempio n. 8
0
    def forward(self, x):

        x = self.conv_1(x)
        x = self.BN_1(x)
        x = F.relu(x)
        x = F.max_pool2d(x, kernel_size=2)  # 第1个MAX_POOL层

        x = self.conv_2(x)
        x = self.BN_2(x)
        x = F.relu(x)
        x = F.max_pool2d(x, kernel_size=2)  # 第2个MAX_POOL层

        x = self.conv_3(x)
        x = self.BN_3(x)
        x = F.relu(x)
        x = F.max_pool2d(x, kernel_size=2)  # 第3个MAX_POOL层

        x = self.conv_4(x)
        x = self.BN_4(x)
        x = F.relu(x)
        x = F.max_pool2d(x, kernel_size=2)  # 第4个MAX_POOL层

        x = paddle.flatten(x, 1, -1)  ## flatten
        x = self.linear(x)  # linear

        output = x

        return output
Esempio n. 9
0
    def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):

        super().__init__()
        self.dim = dim
        self.window_size = window_size  # Wh, Ww
        self.num_heads = num_heads
        head_dim = dim // num_heads
        self.scale = qk_scale or head_dim ** -0.5

        # define a parameter table of relative position bias
        relative_position_bias_table = self.create_parameter(
            shape=((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads), default_initializer=nn.initializer.Constant(value=0))  # 2*Wh-1 * 2*Ww-1, nH
        self.add_parameter("relative_position_bias_table", relative_position_bias_table)

        # get pair-wise relative position index for each token inside the window
        coords_h = paddle.arange(self.window_size[0])
        coords_w = paddle.arange(self.window_size[1])
        coords = paddle.stack(paddle.meshgrid([coords_h, coords_w]))                   # 2, Wh, Ww
        coords_flatten = paddle.flatten(coords, 1)                                     # 2, Wh*Ww
        relative_coords = coords_flatten.unsqueeze(-1) - coords_flatten.unsqueeze(1)   # 2, Wh*Ww, Wh*Ww
        relative_coords = relative_coords.transpose([1, 2, 0])                         # Wh*Ww, Wh*Ww, 2
        relative_coords[:, :, 0] += self.window_size[0] - 1                            # shift to start from 0
        relative_coords[:, :, 1] += self.window_size[1] - 1
        relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
        self.relative_position_index = relative_coords.sum(-1)                         # Wh*Ww, Wh*Ww
        self.register_buffer("relative_position_index", self.relative_position_index)

        self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias)
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop)

        self.softmax = nn.Softmax(axis=-1)
Esempio n. 10
0
    def forward(self, x):
        x = self.relu(self.conv1_1(x))
        x = self.relu(self.conv1_2(x))
        x = self.pool(x)

        x = self.relu(self.conv2_1(x))
        x = self.relu(self.conv2_2(x))
        x = self.pool(x)

        x = self.relu(self.conv3_1(x))
        x = self.relu(self.conv3_2(x))
        x = self.relu(self.conv3_3(x))
        x = self.pool(x)

        x = self.relu(self.conv4_1(x))
        x = self.relu(self.conv4_2(x))
        x = self.relu(self.conv4_3(x))
        x = self.pool(x)

        x = self.relu(self.conv5_1(x))
        x = self.relu(self.conv5_2(x))
        x = self.relu(self.conv5_3(x))
        x = self.pool(x)

        x = paddle.flatten(x, 1, -1)
        x = self.dropout1(self.relu(self.fc1(x)))
        x = self.dropout2(self.relu(self.fc2(x)))
        x = self.fc3(x)
        return x
Esempio n. 11
0
 def forward(self, rois_feat):
     rois_feat = paddle.flatten(rois_feat, start_axis=1, stop_axis=-1)
     fc6 = self.fc6(rois_feat)
     fc6 = F.relu(fc6)
     fc7 = self.fc7(fc6)
     fc7 = F.relu(fc7)
     return fc7
 def test_quant_flatten(self):
     start_axis = 1
     end_axis = 2
     out_1 = paddle.flatten(self.x, start_axis, end_axis)
     out_2 = paddle.nn.quant.flatten()(self.x.clone(), start_axis, end_axis)
     self.check(out_1, out_2)
     self.assertTrue(out_1.shape == out_2.shape)
Esempio n. 13
0
    def forward(self, x):
        """
        Args:
            x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.

        Returns:
            dict[str->Tensor]: names and the corresponding features
        """
        assert x.dim(
        ) == 4, f"ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!"
        outputs = {}
        x = self.stem(x)
        if "stem" in self._out_features:
            outputs["stem"] = x
        for name, stage in zip(self.stage_names, self.stages):
            x = stage(x)
            if name in self._out_features:
                outputs[name] = x
        if self.num_classes is not None:
            x = self.avgpool(x)
            x = paddle.flatten(x, 1)
            x = self.linear(x)
            if "linear" in self._out_features:
                outputs["linear"] = x
        return outputs
Esempio n. 14
0
 def forward(self, img):
     hidden = self.conv1(img)
     hidden = paddle.flatten(hidden, start_axis=1)
     hidden = self.linear1(hidden)
     hidden = self.linear2(hidden)
     prediction = self.linear3(hidden)
     return prediction
Esempio n. 15
0
    def forward(self, inputs):
        x = self.features(inputs['x1'])

        if self.num_classes > 0:
            x = paddle.flatten(x, 1)
            x = self.fc(x + inputs['x2'])
        return x
Esempio n. 16
0
def generate_flatten_contiguous_range(name: str, x, start_axis, stop_axis,
                                      in_dtype):
    import paddle
    paddle.enable_static()

    with paddle.static.program_guard(paddle.static.Program(),
                                     paddle.static.Program()):
        node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype)
        out = paddle.flatten(node_x, start_axis, stop_axis)

        cpu = paddle.static.cpu_places(1)
        exe = paddle.static.Executor(cpu[0])

        # startup program will call initializer to initialize the parameters.
        exe.run(paddle.static.default_startup_program())
        outs = exe.run(feed={'x': x}, fetch_list=[out])
        saveModel(name,
                  exe,
                  feedkeys=['x'],
                  fetchlist=[out],
                  inputs=[x],
                  outputs=[outs[0]],
                  target_dir=sys.argv[1])

    return outs[0]
Esempio n. 17
0
    def forward(self, inputs):
        x = self._conv(inputs)
        x = self._pool(x)
        x = self._conv_1(x)
        x = self._conv_2(x)
        x = self._pool(x)

        x = self._ince3a(x)
        x = self._ince3b(x)
        x = self._pool(x)

        ince4a = self._ince4a(x)
        x = self._ince4b(ince4a)
        x = self._ince4c(x)
        ince4d = self._ince4d(x)
        x = self._ince4e(ince4d)
        x = self._pool(x)

        x = self._ince5a(x)
        ince5b = self._ince5b(x)

        out, out1, out2 = ince5b, ince4a, ince4d

        if self.with_pool:
            out = self._pool_5(out)
            out1 = self._pool_o1(out1)
            out2 = self._pool_o2(out2)

        if self.num_classes > 0:
            out = self._drop(out)
            out = paddle.squeeze(out, axis=[2, 3])
            out = self._fc_out(out)

            out1 = self._conv_o1(out1)
            out1 = paddle.flatten(out1, start_axis=1, stop_axis=-1)
            out1 = self._fc_o1(out1)
            out1 = F.relu(out1)
            out1 = self._drop_o1(out1)
            out1 = self._out1(out1)

            out2 = self._conv_o2(out2)
            out2 = paddle.flatten(out2, start_axis=1, stop_axis=-1)
            out2 = self._fc_o2(out2)
            out2 = self._drop_o2(out2)
            out2 = self._out2(out2)

        return [out, out1, out2]
Esempio n. 18
0
 def __call__(self, x, pad):
     pad = paddle.reshape(pad, shape=[2, -1])
     pad = paddle.transpose(pad, perm=[1, 0])
     pad = paddle.reverse(pad, axis=[0])
     pad = paddle.flatten(pad)
     pad = paddle.cast(pad, dtype="int32")
     out = paddle.nn.functional.pad(x=x, pad=pad, **self.layer_attrs)
     return out
Esempio n. 19
0
 def forward(self, x):
     """
     forward
     """
     x = paddle.flatten(x,
                        start_axis=self.config["start_axis"],
                        stop_axis=self.config["stop_axis"])
     return x
Esempio n. 20
0
 def forward(self, x):
     x = self.pool0(x)
     x = self.conv0(x)
     x = self.conv1(x)
     x = self.pool1(x)
     x = paddle.flatten(x, axis=1)
     x = paddle.fluid.layers.fc(x, size=self.num_classes)
     return x
Esempio n. 21
0
 def forward(self, x):
     y = self.conv1(x)
     y = self.bn1(y)
     out = self.conv2(x)
     out = self.bn2(out) + y
     out = self.relu(out)
     out = paddle.flatten(out, 1)
     return out
Esempio n. 22
0
 def forward(self, inputs):
     y = self.conv1(inputs)
     for block in self.block_list:
         y = block(y)
     y = self.pool2d_avg(y)
     y = paddle.flatten(y, start_axis=1, stop_axis=-1)
     y = self.out(y)
     return y
Esempio n. 23
0
    def forward(self, inputs):
        x = inputs[0]
        x = self.features(x)

        if self.num_classes > 0:
            x = paddle.flatten(x, 1)
            x = self.fc(x + inputs[1])
        return x
Esempio n. 24
0
    def forward(self, input_data):

        user_input = input_data[0]
        item_input = input_data[1]
        label = input_data[2]

        user_embedding_mf = self.MF_Embedding_User(user_input)
        mf_user_latent = paddle.flatten(x=user_embedding_mf,
                                        start_axis=1,
                                        stop_axis=2)
        item_embedding_mf = self.MF_Embedding_Item(item_input)
        mf_item_latent = paddle.flatten(x=item_embedding_mf,
                                        start_axis=1,
                                        stop_axis=2)
        mf_vector = paddle.multiply(mf_user_latent, mf_item_latent)
        prediction = self.prediction(mf_vector)
        prediction = self.sigmoid(prediction)
        return prediction
Esempio n. 25
0
    def _forward_impl(self, x):
        x = self.features(x)

        x = self.avgpool(x)
        x = paddle.flatten(x, 1)

        x = self.classifier(x)

        return x
Esempio n. 26
0
def gather_op(x, dim, index):

    dtype_mapping = {
        "VarType.INT32": "int32",
        "VarType.INT64": "int64",
        "paddle.int32": "int32",
        "paddle.int64": "int64"
    }
    if dim < 0:
        dim += len(x.shape)

    x_range = list(range(len(x.shape)))
    x_range[0] = dim
    x_range[dim] = 0
    x_swaped = paddle.transpose(x, perm=x_range)

    index_range = list(range(len(index.shape)))
    index_range[0] = dim
    index_range[dim] = 0
    index_swaped = paddle.transpose(index, perm=index_range)

    dtype = dtype_mapping[str(index.dtype)]
    x_shape = paddle.shape(x_swaped)
    index_shape = paddle.shape(index_swaped)
    prod = paddle.prod(x_shape, dtype=dtype) / x_shape[0]

    x_swaped_flattend = paddle.flatten(x_swaped)
    index_swaped_flattend = paddle.flatten(index_swaped)
    index_swaped_flattend *= prod

    bias = paddle.arange(start=0, end=prod, dtype=dtype)
    bias = paddle.reshape(bias, x_shape[1:])
    bias = paddle.crop(bias, index_shape[1:])
    bias = paddle.flatten(bias)
    bias = paddle.tile(bias, [index_shape[0]])

    index_swaped_flattend += bias

    gathered = paddle.index_select(x_swaped_flattend, index_swaped_flattend)
    gathered = paddle.reshape(gathered, index_swaped.shape)

    out = paddle.transpose(gathered, perm=x_range)

    return out
Esempio n. 27
0
    def forward(self, pred, label):
        one_hot = label > 0.5
        sample_weight = label != self._ignore_label

        sample_weight = sample_weight.astype('float32')

        if not self._from_logits:
            pred = F.sigmoid(pred)
        alpha = paddle.where(one_hot, self._alpha * sample_weight,
                             (1 - self._alpha) * sample_weight)
        pt = paddle.where(sample_weight.astype('bool'),
                          1.0 - paddle.abs(label - pred),
                          paddle.ones_like(pred))
        beta = (1 - pt)**self._gamma
        sw_sum = paddle.sum(sample_weight, axis=(-2, -1), keepdim=True)
        beta_sum = paddle.sum(beta, axis=(-2, -1), keepdim=True)
        mult = sw_sum / (beta_sum + self._eps)

        if self._detach_delimeter:
            mult = mult.detach()
        beta = beta * mult
        with paddle.no_grad():
            ignore_area = paddle.sum(
                (label == self._ignore_label).astype('float32'),
                axis=tuple(range(1, len(label.shape)))).numpy()
            sample_mult = paddle.mean(mult,
                                      axis=tuple(range(1, len(
                                          mult.shape)))).numpy()
            if np.any(ignore_area == 0):
                self._k_sum = 0.9 * self._k_sum + 0.1 * sample_mult[
                    ignore_area == 0].mean()
                beta_pmax = paddle.max(paddle.flatten(beta, 1), axis=1)
                beta_pmax = float(paddle.mean(beta_pmax))
                self._m_max = 0.8 * self._m_max + 0.2 * beta_pmax

        loss_mask = pt + self._eps < 1
        loss_mask = loss_mask.astype('float32')
        pt_mask = (pt + self._eps) * loss_mask + (1 - loss_mask) * paddle.ones(
            pt.shape)
        loss = -alpha * beta * paddle.log(pt_mask)
        loss = self._weight * (loss * sample_weight)

        if self._size_average:
            bsum = paddle.sum(sample_weight,
                              axis=misc.get_dims_with_exclusion(
                                  len(sample_weight.shape), self._batch_axis))
            loss = paddle.sum(loss,
                              axis=misc.get_dims_with_exclusion(
                                  len(loss.shape),
                                  self._batch_axis)) / (bsum + self._eps)
        else:
            loss = paddle.sum(loss,
                              axis=paddle.get_dims_with_exclusion(
                                  len(loss.shape), self._batch_axis))

        return loss
Esempio n. 28
0
    def forward(self, x):
        x = self.features(x)

        if self.with_pool:
            x = self.pool2d_avg(x)

        if self.num_classes > 0:
            x = paddle.flatten(x, 1)
            x = self.classifier(x)
        return x
Esempio n. 29
0
 def forward(self, inputs):
     y = self._conv1(inputs)
     y = self._max_pool(y)
     for inv in self._block_list:
         y = inv(y)
     y = self._last_conv(y)
     y = self._pool2d_avg(y)
     y = paddle.flatten(y, start_axis=1, stop_axis=-1)
     y = self._fc(y)
     return y
Esempio n. 30
0
 def forward(self, x):
     out = self.stage0(x)
     out = self.stage1(out)
     out = self.stage2(out)
     out = self.stage3(out)
     out = self.stage4(out)
     out = self.gap(out)
     out = paddle.flatten(out, start_axis=1)
     out = self.linear(out)
     return out