Esempio n. 1
0
 def predictor(self, E, S):
     temp = torch.cat([E, S], 1)
     temp = self.pred_f1(temp)
     temp = F.relu(temp)
     out = self.pred_f2(temp)
     out = F.relu(out)
     return (out)
Esempio n. 2
0
    def forward(self, state, en1, en2):
        """
            Input:
                state -> tensor[B, 128, H, W]
                en1 -> tensor[B, 64, H * 2, W * 2]
                en2 -> tensor[B, 32, H * 4, W * 4]
            Output:
                observation -> tensor[B, C, H * 8, W * 8]
                en1 -> tensor[B, 64, H * 2, W * 2]
                en2 -> tensor[B, 32, H * 4, W * 4]
        """
        upsample1 = F.relu(self.deconv1(state))

        cat1 = torch.cat([upsample1, en2], 1)
        en2 = self.conv1_skip_norm(F.relu(self.conv1_skip(cat1)))
        upsample1 = (self.conv1_norm(F.relu(self.conv1_1(upsample1))) +
                     en2) / 2.0
        upsample1 = F.relu(self.conv1_2(upsample1))
        # upsample1 = F.relu(self.conv1(upsample1)) + en2
        upsample2 = F.relu(self.deconv2(upsample1))

        cat2 = torch.cat([upsample2, en1], 1)
        en1 = self.conv2_skip_norm(F.relu(self.conv2_skip(cat2)))
        upsample2 = (self.conv2_norm(F.relu(self.conv2_1(upsample2))) +
                     en1) / 2.0
        upsample2 = F.relu(self.conv2_2(upsample2))
        # upsample2 = F.relu(self.conv2(upsample2)) + en1
        upsample3 = F.relu(self.deconv3(upsample2))

        upsample3 = F.relu(self.conv3(upsample3))

        observation = self.conv_top(upsample3)

        return observation, en1, en2
Esempio n. 3
0
    def forward(self, x):
        x = F.relu(self._fc1(x))
        x = F.relu(self._fc2(x))
        x = F.relu(self._fc3(x))
        x = self._fc4(x)

        return x
Esempio n. 4
0
 def forward(self, X):
     X = F.relu(self.conv1(X))
     X = F.relu(self.conv2(X))
     X = F.relu(self.conv3(X))
     X = F.relu(self.conv4(X))
     X = X.view(-1, 213 * 213 * 20)
     return F.softmax(self.fc1(X))
Esempio n. 5
0
    def forward(self, x, proposals):
        x = self.pooler(x, proposals)
        x = x.view(x.size(0), -1)

        x = F.relu(self.fc6(x))
        x = F.relu(self.fc7(x))

        return x
Esempio n. 6
0
     def forward(self, x):
         w1 = self.weather_encoder(x[:, 2:-7], encode=True)
         w2 = self.pollution_encoder(x[:, -7:], encode=True)
         feature = torch.cat([w1, w2, x[:, 0:2]], dim=1)
         h1 = F.relu(self.layer1(feature))
         h2 = F.relu(self.layer2(h1))

         return self.layer3(h2)
Esempio n. 7
0
 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
     x = x.view(-1, 320)
     x = F.relu(self.fc1(x))
     x = F.dropout(x, training=self.training)
     x = self.fc2(x)
     return F.log_softmax(x)
Esempio n. 8
0
    def forward_features(self, x):
        x = self.pool1(F.relu(self.conv1_bn(self.conv1(x))))
        x = self.pool2(F.relu(self.conv2_bn(self.conv2(x))))

        x = x.view(-1, 16 * window_size * window_size)
        x = F.relu(self.fc1(x))

        return x
Esempio n. 9
0
 def encoder(self, x):
     h = self.encoder_l(x)
     h = self.d_f1(h)
     h = F.relu(h)
     E = self.d_fE(h)
     E = F.relu(E)
     S = self.d_fS(h)
     S = F.relu(S)
     return E, S
Esempio n. 10
0
    def forward(self, x):
        out = F.relu(self.down_sample(x))
        out1 = F.relu(self.spatial(out))
        out2 = F.relu(self.temporal(out1))
        out3 = self.up_sample(out2)
        out3 = self.temp_up_sample(out3)
        out4 = self.batch_norm(out3)

        return out4
Esempio n. 11
0
 def forward(self, x):
     x = F.relu(self.conv1(x))
     x = self.pool(x)
     x = F.relu(self.conv2(x))
     x = self.pool(x)
     x = F.relu(self.conv3(x))
     x = x.view(-1, 1*1*120)
     x = F.relu(self.fc1(x))
     return self.fc2(x)
Esempio n. 12
0
 def forward(self, x):
     x = F.relu(self.conv1(x))
     x = F.max_pool2d(x, 2, 2)
     x = F.relu(self.conv2(x))
     x = F.max_pool2d(x, 2, 2)
     x = x.view(-1, 4 * 4 * 50)
     x = F.relu(self.fc1(x))
     x = self.fc2(x)
     return F.log_softmax(x, dim=1)
Esempio n. 13
0
    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 4 * 4)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)

        return x
Esempio n. 14
0
    def forward(self, x):
        h1 = self.conv1(x)
        h2 = F.relu(h1)
        h3 = self.max_pool(h2)

        h4 = self.conv2(h3)
        h5 = F.relu(h4)

        return self.full(h5)
Esempio n. 15
0
 def forward(self, input):
     if flags.grayscale_model:
         out = input.view(input.shape[0], 2, 14 * 14).sum(dim=1)
     else:
         out = input.view(input.shape[0], 2 * 14 * 14)
     out1 = F.relu(self.lin1(out))
     out2 = F.relu(self.lin2(out1))
     out3 = self.lin3(out2)
     return out1, out2, out3
Esempio n. 16
0
    def _discriminative_loss(self, embedding, seg_gt):
        batch_size = embedding.shape[0]
        embed_dim = embedding.shape[1]

        var_loss = torch.tensor(0, dtype=embedding.dtype, device=embedding.device)
        dist_loss = torch.tensor(0, dtype=embedding.dtype, device=embedding.device)
        reg_loss = torch.tensor(0, dtype=embedding.dtype, device=embedding.device)

        for b in range(batch_size):
            embedding_b = embedding[b]  # (embed_dim, H, W)
            seg_gt_b = seg_gt[b]

            labels = torch.unique(seg_gt_b)
            labels = labels[labels != 0]
            num_lanes = len(labels)
            if num_lanes == 0:
                # please refer to issue here: https://github.com/harryhan618/LaneNet/issues/12
                _nonsense = embedding.sum()
                _zero = torch.zeros_like(_nonsense)
                var_loss = var_loss + _nonsense * _zero
                dist_loss = dist_loss + _nonsense * _zero
                reg_loss = reg_loss + _nonsense * _zero
                continue

            centroid_mean = []
            for lane_idx in labels:
                seg_mask_i = (seg_gt_b == lane_idx)
                if not seg_mask_i.any():
                    continue
                embedding_i = embedding_b[seg_mask_i]

                mean_i = torch.mean(embedding_i, dim=0)
                centroid_mean.append(mean_i)

                # ---------- var_loss -------------
                var_loss = var_loss + torch.mean(F.relu(
                    torch.norm(embedding_i - mean_i, dim=1) - self.delta_var) ** 2) / num_lanes
            centroid_mean = torch.stack(centroid_mean)  # (n_lane, embed_dim)

            if num_lanes > 1:
                centroid_mean1 = centroid_mean.reshape(-1, 1, embed_dim)
                centroid_mean2 = centroid_mean.reshape(1, -1, embed_dim)
                dist = torch.norm(centroid_mean1 - centroid_mean2, dim=2)  # shape (num_lanes, num_lanes)
                dist = dist + torch.eye(num_lanes, dtype=dist.dtype,
                                        device=dist.device) * self.delta_dist  # diagonal elements are 0, now mask above delta_d

                # divided by two for double calculated loss above, for implementation convenience
                dist_loss = dist_loss + torch.sum(F.relu(-dist + self.delta_dist) ** 2) / (
                        num_lanes * (num_lanes - 1)) / 2

            # reg_loss is not used in original paper
            # reg_loss = reg_loss + torch.mean(torch.norm(centroid_mean, dim=1))

        var_loss = var_loss / batch_size
        dist_loss = dist_loss / batch_size
        reg_loss = reg_loss / batch_size
        return var_loss, dist_loss, reg_loss
Esempio n. 17
0
    def forward(self, x):
        out = F.relu(self.down_sample(x))
        out1 = self.temporal(out)
        out2 = self.temp_up_sample(out1)
        out3 = F.relu(self.spatial(out))
        out4 = F.relu(out3 + out2)
        out5 = self.up_sample(out4)
        out6 = self.batch_norm(out5)

        return out6
Esempio n. 18
0
        def forward(self, x):
            x = x.view(x.size(0), self._in_shape[0], self._in_shape[1],
                       self._in_shape[2])
            x = F.relu(self.conv1(x))
            x = F.relu(self.conv2(x))
            x = F.relu(self.conv3(x))
            x = F.relu(self.fc4(x.view(x.size(0), -1)))
            x = self.fc5(x.view(x.size(0), -1))

            return x
Esempio n. 19
0
File: MRGNN.py Progetto: lpasa/MRGNN
    def funnel_output(self, H):

        x = self.bn_out(H)
        x = (F.relu(self.lin1(x)))
        x = self.dropout(x)
        x = (F.relu(self.lin2(x)))
        x = self.dropout(x)
        x = self.out_fun(self.lin3(x))

        return x
Esempio n. 20
0
 def forward(self, x):
     x = F.relu(self.conv1(x))
     x = F.relu(self.conv2(x))
     x = self.pool(x)
     x = F.relu(self.conv3(x))
     #x = F.relu(self.conv4(x))
     x = self.pool(x)
     x = x.view(-1, 6 * 6 * 128)
     x = F.relu(self.fc1(x))
     return self.fc2(x)
    def forward(self, x):
        x = self.h1(x)
        x = F.relu(x)
        x = self.h2(x)
        x = F.relu(x)

        x = self.out(x)
        x = torch.sigmoid(x)

        return x
    def forward(self, param):
        # Flattened the input to make sure it fits the layer input
        # param = param.view(param.shape[0],-1)
        # Pass in the input to the layer and do forward propagation
        param = F.relu(self.layer_input(param))
        # print("1")
        # param = self.layer_embedding(param)
        param = self.layer_dropout(param)
        param = F.relu(self.layer_hidden_one(param))
        param = F.relu(self.layer_hidden_two(param))
        param = F.relu(self.layer_hidden_two(param))
        param = F.relu(self.layer_hidden_two(param))
        param = F.relu(self.layer_hidden_two(param))
        param = F.relu(self.layer_hidden_two(param))
        # param = F.relu(self.layer_hidden_two(param))
        # param = F.relu(self.layer_hidden_three(param))
        # print("2")
        param = F.relu(self.layer_dropout(param))
        # print("3")


        # print("4")
        # Dimension = 1, to get the sum of the output across the output row matrix
        param = F.relu(self.layer_output(param))
        return param
Esempio n. 23
0
    def forward(self, x):
        # Dropout on image input
        x = self.img_dp(x)

        # Relu after batch_norm
        x = F.relu(self.conv1_bn(self.conv1(x)))
        x = F.relu(self.conv2_bn(self.conv2(x)))
        x = F.relu(self.conv3_bn(self.conv3(x)))

        # Dropout on max pool layer
        x = self.mp1_dp(x)

        # Relu after batch_norm
        x = F.relu(self.conv4_bn(self.conv4(x)))
        x = F.relu(self.conv5_bn(self.conv5(x)))
        x = F.relu(self.conv6_bn(self.conv6(x)))

        # Dropout on max pool layer
        x = self.mp2_dp(x)

        # Relu after batch_norm
        x = F.relu(self.conv7_bn(self.conv7(x)))
        x = F.relu(self.conv8_bn(self.conv8(x)))
        x = F.relu(self.conv9_bn(self.conv9(x)))

        # X here has shape (batch_size, num_features, w, h) which is (64, 100, 1, 1)

        # Global average pooling for each feature map (mean over the flattened feature map dimension)
        x = torch.mean(x.view(x.size(0), x.size(1), -1), dim=2)

        x = self.out(x)
        return x
Esempio n. 24
0
    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x, 2, 2)
        x = F.relu(self.conv2(x))
        x = F.relu(self.conv3(x))
        x = F.max_pool2d(x, 2, 2)

        x = x.view(-1, 7 * 7 * 70)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x
Esempio n. 25
0
 def forward(self, x):
     """Forward feature map of a single scale level."""
     rpn_out = self.rpn_conv_dw(x)
     rpn_out = F.relu(rpn_out, inplace=True)
     rpn_out = self.rpn_conv_linear(rpn_out)
     rpn_out = F.relu(rpn_out, inplace=True)
     sam = F.sigmoid(self.bn(self.sam(rpn_out)))
     x = x * sam
     rpn_cls_score = self.rpn_cls(rpn_out)
     rpn_bbox_pred = self.rpn_reg(rpn_out)
     return rpn_cls_score, rpn_bbox_pred, x
Esempio n. 26
0
    def forward(self, x):
        x = self.pool1(F.relu(self.conv1_bn(self.conv1(x))))
        x = self.pool2(F.relu(self.conv2_bn(self.conv2(x))))

        x = x.view(-1, self.num_conv // 2 * window_size * window_size)
        x = F.relu(self.fc1(x))
        x = self.fc1_dp(x)
        x = self.fc2(x)
        x = self.out(x)

        return x
Esempio n. 27
0
    def forward( self, state ) :
        r"""Forward pass for this deterministic policy, used for the max Q evaluation

        Args:
            state (torch.tensor): state used to decide the action

        """
        x = F.relu( self.fc1( state ) )
        x = F.relu( self.fc2( x ) )
        x = F.tanh( self.fc3( x ) )

        return x
Esempio n. 28
0
    def forward( self, observation ) :
        r"""Forward pass for this deterministic policy, used for the max Q evaluation

        Args:
            observation (torch.tensor): observation used to decide the action

        """
        x = self.bn0( observation )
        x = F.relu( self.bn1( self.fc1( x ) ) )
        x = F.relu( self.bn2( self.fc2( x ) ) )
        x = F.tanh( self.fc3( x ) )

        return x
Esempio n. 29
0
    def forward( self, state, action ) :
        r"""Forward pass for this critic at a given (s,a) pair

        Args:
            state (torch.tensor): state of the pair to be evaluated
            action (torch.tensor): action of the pair to be evaluated

        """
        x = F.relu( self.fc1( state ) )
        x = F.relu( self.fc2( torch.cat( [x, action], dim = 1 ) ) )
        x = self.fc3( x )

        return x
Esempio n. 30
0
 def forward(self, x: Tensor) -> Tensor:
     x = self.conv1(x)
     x = F.relu(x)
     x = self.conv2(x)
     x = F.relu(x)
     x = F.max_pool2d(x, 2)
     x = self.dropout1(x)
     x = torch.flatten(x, 1)
     x = self.fc1(x)
     x = F.relu(x)
     x = self.dropout2(x)
     x = self.fc2(x)
     output = F.log_softmax(x, dim=1)
     return x