def _forward(self, x):
     x = self.Conv2d_1a_3x3(x)
     x = self.Conv2d_2a_3x3(x)
     x = self.Conv2d_2b_3x3(x)
     x = nn.pool(x, 3, "maximum", stride=2)
     x = self.Conv2d_3b_1x1(x)
     x = self.Conv2d_4a_3x3(x)
     x = nn.pool(x, 3, "maximum", stride=2)
     x = self.Mixed_5b(x)
     x = self.Mixed_5c(x)
     x = self.Mixed_5d(x)
     x = self.Mixed_6a(x)
     x = self.Mixed_6b(x)
     x = self.Mixed_6c(x)
     x = self.Mixed_6d(x)
     x = self.Mixed_6e(x)
     aux_defined = self.aux_logits
     if aux_defined:
         aux = self.AuxLogits(x)
     else:
         aux = None
     x = self.Mixed_7a(x)
     x = self.Mixed_7b(x)
     x = self.Mixed_7c(x)
     x = nn.AdaptiveAvgPool2d(1)(x)
     x = nn.Dropout()(x)
     x = jt.reshape(x, (x.shape[0], (-1)))
     x = self.fc(x)
     return (x, aux)
Beispiel #2
0
 def execute(self, inputs):
     assert len(inputs) == len(self.in_channels)
     outs = []
     outs.append(inputs[0])
     for i in range(1, len(inputs)):
         outs.append(
             nn.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
     out = jt.contrib.concat(outs, dim=1)
     '''
     if out.requires_grad and self.with_checkpoint:
         out = checkpoint(self.reduction_conv, out)
     else:
         out = self.reduction_conv(out)
     '''
     out = self.reduction_conv(out)
     outs = [out]
     for i in range(1, self.num_level):
         outs.append(
             nn.pool(out, kernel_size=2**i, stride=2**i, op=self.pooling))
     outputs = []
     if self.share_conv:
         for i in range(self.num_level):
             outputs.append(self.fpn_conv(outs[i]))
     else:
         for i in range(self.num_level):
             if not outs[i].is_stop_grad() and self.with_checkpoint:
                 tmp_out = checkpoint(self.fpn_conv[i], outs[i])
             else:
                 tmp_out = self.fpn_conv[i](outs[i])
             outputs.append(tmp_out)
     return tuple(outputs)
 def _forward(self, x):
     branch3x3 = self.branch3x3(x)
     branch3x3dbl = self.branch3x3dbl_1(x)
     branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
     branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
     branch_pool = nn.pool(x, 3, "maximum", stride=2)
     outputs = [branch3x3, branch3x3dbl, branch_pool]
     return outputs
Beispiel #4
0
 def test_index_pool2(self):
     pool = jt.nn.Pool(2, return_indices=True)
     a = jt.array([1,0,0,1,
                   0,0,0,0,
                   0,0,0,0,
                   1,0,0,1]).reshape((1,1,4,4))
     b, idx = pool(a)
     assert (idx.data.reshape((4,)) == [0,1,2,3]).all()
    def execute(self, x):
        x = nn.pool(x, kernel_size=5, op="mean", stride=3)
        x = self.conv0(x)
        x = self.conv1(x)

        x = nn.AdaptiveAvgPool2d(1)(x)
        x = jt.reshape(x, (x.shape[0], (-1)))
        x = self.fc(x)
        return x
Beispiel #6
0
 def execute(self, x, mask):
     mask_pool = nn.pool(mask, kernel_size=2, stride=2, op='maximum')
     x = jt.contrib.concat((x, mask_pool), 1)
     for layer_name in self.blocks:
         x = nn.relu(getattr(self, layer_name)(x))
     x = x.reshape(x.shape[0], -1)
     x = nn.relu(self.maskiou_fc1(x))
     x = nn.relu(self.maskiou_fc2(x))
     return x
Beispiel #7
0
    def execute(self, convouts: List[jt.Var]):
        """
        Args:
            - convouts (list): A list of convouts for the corresponding layers in in_channels.
        Returns:
            - A list of FPN convouts in the same order as x with extra downsample layers if requested.
        """

        out = []
        x = jt.zeros((1, ))
        for i in range(len(convouts)):
            out.append(x)

        # For backward compatability, the conv layers are stored in reverse but the input and output is
        # given in the correct order. Thus, use j=-i-1 for the input and output and i for the conv layers.
        j = len(convouts)
        for lat_layer in self.lat_layers.layers.values():
            j -= 1

            if j < len(convouts) - 1:
                _, _, h, w = convouts[j].shape
                #print('hh',(h,w),x.shape[-2:])
                x = nn.interpolate(x,
                                   size=(h, w),
                                   mode=self.interpolation_mode,
                                   align_corners=False)
                # x = interpolate(x, size=(h, w), mode=self.interpolation_mode, align_corners=False)

            x = x + lat_layer(convouts[j])
            out[j] = x

        # This janky second loop is here because jtScript.
        j = len(convouts)
        for pred_layer in self.pred_layers.layers.values():
            j -= 1
            out[j] = pred_layer(out[j])

            if self.relu_pred_layers:
                out[j] = nn.relu(out[j])

        cur_idx = len(out)

        # In the original paper, this takes care of P6
        if self.use_conv_downsample:
            for downsample_layer in self.downsample_layers.layers.values():
                out.append(downsample_layer(out[-1]))
        else:
            for idx in range(self.num_downsample):
                # Note: this is an untested alternative to out.append(out[-1][:, :, ::2, ::2]). Thanks jtScript.
                out.append(nn.pool(out[-1], 1, stride=2, op='maximum'))

        if self.relu_downsample_layers:
            for idx in range(len(out) - cur_idx):
                out[idx] = nn.relu(out[idx + cur_idx])

        return out
 def _forward(self, x):
     branch3x3 = self.branch3x3_1(x)
     branch3x3 = self.branch3x3_2(branch3x3)
     branch7x7x3 = self.branch7x7x3_1(x)
     branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
     branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
     branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
     branch_pool = nn.pool(x, kernel_size=3, op="maximum", stride=2)
     outputs = [branch3x3, branch7x7x3, branch_pool]
     return outputs
 def _forward(self, x):
     branch1x1 = self.branch1x1(x)
     branch5x5 = self.branch5x5_1(x)
     branch5x5 = self.branch5x5_2(branch5x5)
     branch3x3dbl = self.branch3x3dbl_1(x)
     branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
     branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
     branch_pool = nn.pool(x, 3, "mean", stride=1, padding=1)
     branch_pool = self.branch_pool(branch_pool)
     outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
     return outputs
 def execute(self, x):
     out = nn.relu(self.bn1(self.conv1(x)))
     out = self.layer1(out)
     out = self.layer2(out)
     out = self.layer3(out)
     out = self.layer4(out)
     out = nn.pool(out, size=4, op="mean", padding=0)
     out = jt.reshape(out, [out.shape[0], -1])
     out = self.linear1(out)
     out = self.linear2(out)
     return out
    def execute(self, mesh, mode=None):
        image_size = self.image_size * (2 if self.anti_aliasing else 1)
        images = srf.soft_rasterize(mesh.face_vertices, mesh.face_textures,
                                    image_size, self.background_color,
                                    self.near, self.far, self.fill_back,
                                    self.eps, self.sigma_val, self.dist_func,
                                    self.dist_eps, self.gamma_val,
                                    self.aggr_func_rgb, self.aggr_func_alpha,
                                    self.texture_type)

        if self.anti_aliasing:
            images = nn.pool(images, 2, "mean", stride=2)
        return images
Beispiel #12
0
 def _forward(self, x):
     branch1x1 = self.branch1x1(x)
     branch3x3 = self.branch3x3_1(x)
     branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)]
     branch3x3 = jt.contrib.concat(branch3x3, dim=1)
     branch3x3dbl = self.branch3x3dbl_1(x)
     branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
     branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl)]
     branch3x3dbl = jt.contrib.concat(branch3x3dbl, dim=1)
     branch_pool = nn.pool(x, kernel_size=3, op="mean", stride=1, padding=1)
     branch_pool = self.branch_pool(branch_pool)
     outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
     return outputs
Beispiel #13
0
 def test_unpool(self):
     from jittor import nn
     pool = nn.MaxPool2d(2, stride=2, return_indices=True)
     unpool = nn.MaxUnpool2d(2, stride=2)
     input = jt.array([[[[1., 2, 3, 4, 0], [5, 6, 7, 8, 0],
                         [9, 10, 11, 12, 0], [13, 14, 15, 16, 0],
                         [0, 0, 0, 0, 0]]]])
     output, indices = pool(input)
     out = unpool(output, indices, output_size=input.shape)
     assert (out == jt.array([[[[0., 0., 0., 0., 0.], [0., 6., 0., 8., 0.],
                                [0., 0., 0., 0.,
                                 0.], [0., 14., 0., 16., 0.],
                                [0., 0., 0., 0., 0.]]]])).all()
 def _forward(self, x):
     branch1x1 = self.branch1x1(x)
     branch7x7 = self.branch7x7_1(x)
     branch7x7 = self.branch7x7_2(branch7x7)
     branch7x7 = self.branch7x7_3(branch7x7)
     branch7x7dbl = self.branch7x7dbl_1(x)
     branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
     branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
     branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
     branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
     branch_pool = nn.pool(x, kernel_size=3, op="mean", stride=1, padding=1)
     branch_pool = self.branch_pool(branch_pool)
     outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
     return outputs
Beispiel #15
0
    def execute(self, x):
        out = nn.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))

        # Squeeze
        w = nn.pool(out, out.shape[2], 'maximum', 0)
        w = nn.relu(self.fc1(w))
        w = nn.Sigmoid()(self.fc2(w))
        # Excitation
        out = out * w  # New broadcasting feature from v0.2!

        out += self.shortcut(x)
        out = nn.relu(out)
        return out
Beispiel #16
0
    def execute(self, x):
        out = nn.relu(self.bn1(x))
        shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
        out = self.conv1(out)
        out = self.conv2(nn.relu(self.bn2(out)))

        # Squeeze
        w = nn.pool(out, out.shape[2], 'maximum', 0)
        w = nn.relu(self.fc1(w))
        w = self.act(self.fc2(w))
        # Excitation
        out = out * w

        out += shortcut
        return out
Beispiel #17
0
def resnet(x, is_train, block, layers, num_classes=1000):
    layer_in_planes = 64
    x = nn.conv(x, 3, layer_in_planes, 7, 3, 2)
    x = nn.batch_norm(x, is_train)
    x = nn.relu(x)
    x = nn.pool(x, 3, "maximum", 1, 2)
    x, layer_in_planes = block(x, is_train, 64, layers[0], layer_in_planes)
    x, layer_in_planes = block(x, is_train, 128, layers[1], layer_in_planes, 2)
    x, layer_in_planes = block(x, is_train, 256, layers[2], layer_in_planes, 2)
    x, layer_in_planes = block(x, is_train, 512, layers[3], layer_in_planes, 2)

    x = x.reindex_reduce("add", [x.shape[0], x.shape[1]],
                         ["i0", "i1"]) / x.shape[2] / x.shape[3]
    x = nn.linear(x, num_classes)

    return x
    def execute(self, x):
        assert x.ndim == 4
        # 2x2, float32 => downscale using _blur2d().
        if self.blur is not None and x.dtype == jt.float32:
            return self.blur(x)

        # Apply gain.
        if self.gain != 1:
            x = x * self.gain

        # No-op => early exit.
        if self.factor == 1:
            return x

        # Large factor => downscale using tf.nn.avg_pool().
        # NOTE: Requires tf_config['graph_options.place_pruned_graph']=True to work.
        # return F.avg_pool2d(x, self.factor)
        return nn.pool(x,
                       kernel_size=self.factor,
                       op='mean',
                       stride=self.factor)
Beispiel #19
0
    def execute(self, input, step=0, alpha=-1):
        for i in range(step, -1, -1):
            index = self.n_layer - i - 1
            if i == step:
                out = self.from_rgb[index](input)
            if i == 0:
                out_std = jt.sqrt(out.var + 1e-8)
                mean_std = out_std.mean()
                mean_std = mean_std.expand(out.size(0), 1, 4, 4)
                out = jt.cat([out, mean_std], 1)

            out = self.progression[index](out)

            if i > 0:
                if i == step and 0 <= alpha < 1:
                    skip_rgb = nn.pool(input, 2)
                    skip_rgb = self.from_rgb[index + 1](skip_rgb)
                    out = (1 - alpha) * skip_rgb + alpha * out

        out = out.squeeze(2).squeeze(2)
        out = self.linear(out)
        return out
Beispiel #20
0
 def test_index_pool(self):
     pool = jt.nn.Pool(2, return_indices=True)
     a = jt.randn([10, 3, 100, 100])
     b, idx = pool(a)
     idx.sync()
Beispiel #21
0
def rasterize_rgbad(
    faces,
    textures=None,
    image_size=DEFAULT_IMAGE_SIZE,
    anti_aliasing=DEFAULT_ANTI_ALIASING,
    near=DEFAULT_NEAR,
    far=DEFAULT_FAR,
    eps=DEFAULT_EPS,
    background_color=DEFAULT_BACKGROUND_COLOR,
    return_rgb=True,
    return_alpha=True,
    return_depth=True,
):
    """
    Generate RGB, alpha channel, and depth images from faces and textures (for RGB).

    Args:
        faces (jittor.Var): Faces. The shape is [batch size, number of faces, 3 (vertices), 3 (XYZ)].
        textures (jittor.Var): Textures.
            The shape is [batch size, number of faces, texture size, texture size, texture size, 3 (RGB)].
        image_size (int): Width and height of rendered images.
        anti_aliasing (bool): do anti-aliasing by super-sampling.
        near (float): nearest z-coordinate to draw.
        far (float): farthest z-coordinate to draw.
        eps (float): small epsilon for approximated differentiation.
        background_color (tuple): background color of RGB images.
        return_rgb (bool): generate RGB images or not.
        return_alpha (bool): generate alpha channels or not.
        return_depth (bool): generate depth images or not.

    Returns:
        dict:
            {
                'rgb': RGB images. The shape is [batch size, 3, image_size, image_size].
                'alpha': Alpha channels. The shape is [batch size, image_size, image_size].
                'depth': Depth images. The shape is [batch size, image_size, image_size].
            }

    """
    if textures is None:
        inputs = [faces, None]
    else:
        inputs = [faces, textures]

    if anti_aliasing:
        # 2x super-sampling
        rgb, alpha, depth = Rasterize(image_size * 2, near, far, eps,
                                      background_color, return_rgb,
                                      return_alpha, return_depth)(*inputs)
    else:
        rgb, alpha, depth = Rasterize(image_size, near, far, eps,
                                      background_color, return_rgb,
                                      return_alpha, return_depth)(*inputs)

    # transpose & vertical flip
    if return_rgb:
        rgb = rgb.permute((0, 3, 1, 2))
        # may need to look at this again because it seems to be very slow
        rgb = rgb[:, :, list(reversed(range(rgb.shape[2]))), :]
    if return_alpha:
        alpha = alpha[:, list(reversed(range(alpha.shape[1]))), :]
    if return_depth:
        depth = depth[:, list(reversed(range(depth.shape[1]))), :]

    if anti_aliasing:
        # 0.5x down-sampling
        if return_rgb:
            rgb = nn.pool(rgb, 2, "mean", stride=2)
        if return_alpha:
            alpha = nn.pool(alpha.unsqueeze(1), 2, "mean", stride=2)
        if return_depth:
            depth = nn.pool(depth.unsqueeze(1), 2, "mean", stride=2)

    ret = {
        'rgb': rgb if return_rgb else None,
        'alpha': alpha if return_alpha else None,
        'depth': depth if return_depth else None,
    }

    return ret
Beispiel #22
0
    def execute(self, x):
        x = self.maskiou_net(x)
        maskiou_p = nn.pool(x, kernel_size=x.shape[2],
                            op='maximum').squeeze(-1).squeeze(-1)

        return maskiou_p
 def execute(self, x):
     x = self.conv1(x)
     x = self.bn1(x)
     x = nn.relu(x)
     x = nn.pool(x, op='maximum', kernel_size=3, stride=2, padding=1)
     return x
Beispiel #24
0
 def execute(self, x):
     return [nn.pool(x, kernel_size=1, op="maximum", stride=2, padding=0)]