def execute(self, x):
        batch_size = x.shape[0]
        x = nn.relu(self.fc1(x))
        x = nn.relu(self.fc2(x))

        # decoder follows NMR
        centroid = self.fc_centroid(x) * self.centroid_scale

        bias = self.fc_bias(x) * self.bias_scale
        bias = bias.view(-1, self.nv, 3)

        base = self.vertices_base * self.obj_scale

        sign = nn.sign(base)
        base = base.abs()
        base = jt.log(base / (1 - base))

        centroid = jt.tanh(centroid[:, None, :])
        scale_pos = 1 - centroid
        scale_neg = centroid + 1

        vertices = (base + bias).sigmoid() * sign
        vertices = nn.relu(vertices) * scale_pos - nn.relu(
            -vertices) * scale_neg
        vertices = vertices + centroid
        vertices = vertices * 0.5
        faces = self.faces[None, :, :].repeat(batch_size, 1, 1)

        return vertices, faces
 def execute(self, x):
     x = nn.relu(nn.max_pool2d(self.conv1(x), 2))
     x = nn.relu(nn.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
     x = x.view(((-1), 320))
     x = nn.relu(self.fc1(x))
     x = self.fc2(x)
     return nn.log_softmax(x, dim=1), x
Beispiel #3
0
 def execute(self, observations, actions):
     x = self.fc1(jt.contrib.concat([observations, actions], dim=-1))
     x = nn.relu(x)
     x = self.fc2(x)
     x = nn.relu(x)
     x = self.fc3(x)
     return x.squeeze(1)
Beispiel #4
0
 def execute(self, x):
     x = self.fc1(x)
     x = nn.relu(x)
     x = self.fc2(x)
     x = nn.relu(x)
     x = self.fc3(x)
     return jt.tanh(x)
 def execute(self, x):
     out = nn.relu(self.bn1(self.conv1(x)))
     out = nn.relu(self.bn2(self.conv2(out)))
     out = self.bn3(self.conv3(out))
     out += self.shortcut(x)
     out = nn.relu(out)
     return out
 def execute(self, x1, x2):
     x1 = nn.relu(self.fc1a(x1))
     x1 = self.fc1b(x1)
     x2 = nn.relu(self.fc2a(x2))
     x2 = self.fc2b(x2)
     x = jt.contrib.concat((x1, x2), dim=0)
     return nn.log_softmax(x, dim=1)
 def execute(self, x):
     out = nn.relu(self.bn1(x))
     shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
     out = self.conv1(out)
     out = self.conv2(nn.relu(self.bn2(out)))
     out += shortcut
     return out
Beispiel #8
0
    def execute(self, x, proposals):
        x = self.pooler(x, proposals)
        x = x.reshape(x.shape[0], -1)
        x = nn.relu(self.fc6(x))
        x = nn.relu(self.fc7(x))

        return x
Beispiel #9
0
 def execute(self, x):
     residual = x
     out = self.conv1(x)
     out = self.bn1(out)
     out = nn.relu(out)
     spx = jt.split(out, self.width, 1)
     for i in range(self.nums):
         if ((i == 0) or (self.stype == 'stage')):
             sp = spx[i]
         else:
             sp = (sp + spx[i])
         sp = self.convs[i](sp)
         sp = nn.relu(self.bns[i](sp))
         if (i == 0):
             out = sp
         else:
             out = jt.contrib.concat((out, sp), dim=1)
     if ((self.scale != 1) and (self.stype == 'normal')):
         out = jt.contrib.concat((out, spx[self.nums]), dim=1)
     elif ((self.scale != 1) and (self.stype == 'stage')):
         out = jt.contrib.concat((out, self.pool(spx[self.nums])), dim=1)
     out = self.conv3(out)
     out = self.bn3(out)
     if (self.downsample is not None):
         residual = self.downsample(x)
     out += residual
     out = nn.relu(out)
     return out
Beispiel #10
0
def GeometrySmith(N, V, L, roughness):
    NdotV = nn.relu(jt.sum(N * V, dim=2))
    NdotL = nn.relu(jt.sum(N * L, dim=2))
    ggx2 = SchlickGGX(NdotV, roughness)
    ggx1 = SchlickGGX(NdotL, roughness)

    return ggx1 * ggx2
Beispiel #11
0
 def execute(self, x, mask):
     mask_pool = nn.pool(mask, kernel_size=2, stride=2, op='maximum')
     x = jt.contrib.concat((x, mask_pool), 1)
     for layer_name in self.blocks:
         x = nn.relu(getattr(self, layer_name)(x))
     x = x.reshape(x.shape[0], -1)
     x = nn.relu(self.maskiou_fc1(x))
     x = nn.relu(self.maskiou_fc2(x))
     return x
Beispiel #12
0
 def execute(self, x):
     out = nn.relu(self.bn1(self.conv1(x)))
     out = self.layers(out)
     out = nn.relu(self.bn2(self.conv2(out)))
     # NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
     out = self.pool(out)
     out = out.reshape([out.shape[0], -1])
     out = self.linear(out)
     return out
 def execute(self, x):
     out = self.conv1(nn.relu(self.bn1(x)))
     out = self.conv2(nn.relu(self.bn2(out)))
     out = jt.transpose(out, (1, 0, 2, 3))
     x = jt.transpose(x, (1, 0, 2, 3))
     out = jt.concat([out, x], 0)
     out = jt.transpose(out, (1, 0, 2, 3))
     #out = jt.reshape(out, [x.shape[0],-1,out.shape[2],out.shape[3]])
     return out
Beispiel #14
0
    def execute(self, convouts: List[jt.Var]):
        """
        Args:
            - convouts (list): A list of convouts for the corresponding layers in in_channels.
        Returns:
            - A list of FPN convouts in the same order as x with extra downsample layers if requested.
        """

        out = []
        x = jt.zeros((1, ))
        for i in range(len(convouts)):
            out.append(x)

        # For backward compatability, the conv layers are stored in reverse but the input and output is
        # given in the correct order. Thus, use j=-i-1 for the input and output and i for the conv layers.
        j = len(convouts)
        for lat_layer in self.lat_layers.layers.values():
            j -= 1

            if j < len(convouts) - 1:
                _, _, h, w = convouts[j].shape
                #print('hh',(h,w),x.shape[-2:])
                x = nn.interpolate(x,
                                   size=(h, w),
                                   mode=self.interpolation_mode,
                                   align_corners=False)
                # x = interpolate(x, size=(h, w), mode=self.interpolation_mode, align_corners=False)

            x = x + lat_layer(convouts[j])
            out[j] = x

        # This janky second loop is here because jtScript.
        j = len(convouts)
        for pred_layer in self.pred_layers.layers.values():
            j -= 1
            out[j] = pred_layer(out[j])

            if self.relu_pred_layers:
                out[j] = nn.relu(out[j])

        cur_idx = len(out)

        # In the original paper, this takes care of P6
        if self.use_conv_downsample:
            for downsample_layer in self.downsample_layers.layers.values():
                out.append(downsample_layer(out[-1]))
        else:
            for idx in range(self.num_downsample):
                # Note: this is an untested alternative to out.append(out[-1][:, :, ::2, ::2]). Thanks jtScript.
                out.append(nn.pool(out[-1], 1, stride=2, op='maximum'))

        if self.relu_downsample_layers:
            for idx in range(len(out) - cur_idx):
                out[idx] = nn.relu(out[idx + cur_idx])

        return out
Beispiel #15
0
    def execute(self, batch_size):
        base = jt.log(self.vertices.abs() / (1 - self.vertices.abs()))
        centroid = jt.tanh(self.center)
        vertices = (base + self.displace).sigmoid() * nn.sign(self.vertices)
        vertices = nn.relu(vertices) * (1 - centroid) - nn.relu(-vertices) * (centroid + 1)
        vertices = vertices + centroid

        # apply Laplacian and flatten geometry constraints
        laplacian_loss = self.laplacian_loss(vertices).mean()
        flatten_loss = self.flatten_loss(vertices).mean()
        return jr.Mesh(vertices.repeat(batch_size, 1, 1), 
                       self.faces.repeat(batch_size, 1, 1), dr_type='n3mr'), laplacian_loss, flatten_loss
Beispiel #16
0
def basic_block(x, is_train, in_planes, out_planes, stride=1):
    identity = x
    x = nn.conv(x, in_planes, out_planes, 3, 1, stride)
    x = nn.batch_norm(x, is_train)
    x = nn.relu(x)
    x = nn.conv(x, out_planes, out_planes, 3, 1)
    x = nn.batch_norm(x, is_train)
    if in_planes != out_planes:
        identity = nn.conv(identity, in_planes, out_planes, 1, 0, stride)
        identity = nn.batch_norm(identity, is_train)
    x = x + identity
    x = nn.relu(x)
    return x
Beispiel #17
0
    def execute(self, x):
        out = nn.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))

        # Squeeze
        w = nn.pool(out, out.shape[2], 'maximum', 0)
        w = nn.relu(self.fc1(w))
        w = nn.Sigmoid()(self.fc2(w))
        # Excitation
        out = out * w  # New broadcasting feature from v0.2!

        out += self.shortcut(x)
        out = nn.relu(out)
        return out
Beispiel #18
0
    def execute(self):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x = nn.dropout(x, self.dropout)
        x = x_0 = nn.relu(self.lins[0](x))

        for conv in self.convs:
            x = nn.dropout(x, self.dropout)
            x = conv(x, x_0, edge_index, edge_weight)
            x = nn.relu(x)

        x = nn.dropout(x, self.dropout)
        x = self.lins[1](x)

        return nn.log_softmax(x, dim=-1)
Beispiel #19
0
    def execute(self, x):
        out = nn.relu(self.bn1(x))
        shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
        out = self.conv1(out)
        out = self.conv2(nn.relu(self.bn2(out)))

        # Squeeze
        w = nn.pool(out, out.shape[2], 'maximum', 0)
        w = nn.relu(self.fc1(w))
        w = self.act(self.fc2(w))
        # Excitation
        out = out * w

        out += shortcut
        return out
Beispiel #20
0
 def execute(self, x):
     features = self.features(x)
     out = nn.relu(features)
     out = jt.pool.pool(out, kernel_size=7, op="mean",
                        stride=1).reshape([features.shape[0], -1])
     out = self.classifier(out)
     return out
Beispiel #21
0
 def execute(self, x, proposals):
     x = self.pooler(x, proposals)
     roi_feature = x
     for layer_name in self.blocks:
         x = nn.relu(getattr(self, layer_name)(x))
     if self.maskiou:
         return x, roi_feature
     return x
Beispiel #22
0
 def execute(self, x):
     x0 = self.branch0(x)
     x1 = self.branch1(x)
     x2 = self.branch2(x)
     x3 = self.branch3(x)
     x_cat = self.conv_cat(jt.contrib.concat((x0, x1, x2, x3), dim=1))
     x = nn.relu((x_cat + self.conv_res(x)))
     return x
Beispiel #23
0
 def execute(self, x):
     x = nn.AdaptiveAvgPool2d(4)(x)
     x = self.conv(x)
     x = jt.reshape(x, (x.shape[0], (- 1)))
     x = nn.relu(self.fc1(x))
     x = nn.Dropout(0.7)(x)
     x = self.fc2(x)
     return x
Beispiel #24
0
    def execute(self, x):
        logits = []
        bbox_reg = []

        for feature in x:
            t = nn.relu(self.conv(feature))
            logits.append(self.cls_logits(t))
            bbox_reg.append(self.bbox_pred(t))
        return logits, bbox_reg
Beispiel #25
0
 def execute(self, x):
     out = nn.relu(self.bn1(self.conv1(x)))
     out = self.layer1(out)
     out = self.layer2(out)
     out = self.layer3(out)
     out = self.layer4(out)
     out = self.pool(out)
     out = out.reshape([out.shape[0], -1])
     out = self.linear(out)
     return out
 def execute(self, x):
     out = self.conv1(x)
     out = self.trans1(self.dense1(out))
     out = self.trans2(self.dense2(out))
     out = self.trans3(self.dense3(out))
     out = self.dense4(out)
     out = nn.relu(self.bn(out))
     out = self.pool(out)
     out = out.reshape([out.shape[0], -1])
     out = self.linear(out)
     return out
 def execute(self, x, proposals):
     #print('feature_extrac 0',x[0])
     x = self.pooler(x, proposals)
     #print('feature_extrac 1',x[0])
     roi_feature = x
     for layer_name in self.blocks:
         #print('feature_extrac',i,x[0])
         x = nn.relu(getattr(self, layer_name)(x))
     if self.maskiou:
         return x, roi_feature
     return x
 def execute(self, x):
     out = nn.relu(self.bn1(self.conv1(x)))
     out = self.layer1(out)
     out = self.layer2(out)
     out = self.layer3(out)
     out = self.layer4(out)
     out = nn.pool(out, size=4, op="mean", padding=0)
     out = jt.reshape(out, [out.shape[0], -1])
     out = self.linear1(out)
     out = self.linear2(out)
     return out
Beispiel #29
0
def GGX(N, H, roughness):
    a = roughness * roughness
    a2 = a * a
    NdotH = nn.relu(jt.sum(N * H, dim=2))
    NdotH2 = (NdotH * NdotH).unsqueeze(2)

    num = a2
    denom = (NdotH2 * (a2 - 1.0) + 1.0)
    denom = 3.1415 * denom * denom

    return num / denom
Beispiel #30
0
    def execute(self, x):
        #print('predictor 1',jt.mean(x),jt.sum(x),x[0])
        x = self.conv5_mask(x)
        #print('predictor 2',jt.mean(x),jt.sum(x),x[0])

        x = nn.relu(x)
        #print('predictor 3',jt.mean(x),jt.sum(x),x[0])

        x = self.mask_fcn_logits(x)
        #print('predictor 4',jt.mean(x),jt.sum(x),x[0])

        return x