예제 #1
0
 def forward(self, x):
     x1 = torch.max_pool2d(torch.relu(self.conv1(x)), 2)
     x2 = torch.max_pool2d(torch.relu(self.conv2(x1)), 2)
     x_ = x2.view(x2.size()[0], -1)
     x3 = torch.relu(self.fc1(x_))
     x4 = torch.relu(self.fc2(x3))
     return self.fc3(x4)
예제 #2
0
    def forward(self, x):
        '''
        Forward prop
        '''
        x = self.layer1(x)
        x = torch.relu(x)
        x = torch.max_pool2d(x, 2)
        x = self.layer2(x)
        x = torch.relu(x)
        x = torch.max_pool2d(x, 2)
        x = self.layer3(x)
        x = torch.relu(x)
        x = torch.max_pool2d(x, 2)
        x = self.layer4(x)
        x = torch.relu(x)
        x = torch.max_pool2d(x, 2)
        # x = self.layer5(x)

        # import pdb; pdb.set_trace()
        x = torch.flatten(x, 1)

        x = self.fc1(x)

        output = torch.sigmoid(x)

        return output
예제 #3
0
    def forward(self, x):
        '''
        '''
        # => (3, 128, 64)
        # print(f'x 0 size: {x.size()}')
        
        x = torch.max_pool2d(torch.relu(self.conv1(x)), (2, 2))
        # => (5, 62, 30)
        # print(f'x 1 size: {x.size()}')

        x = torch.max_pool2d(torch.relu(self.conv2(x)), (2, 2))
        # => (10, 29, 13)
        # print(f'x 2 size: {x.size()}')

        x = torch.max_pool2d(torch.relu(self.conv3(x)), (2, 2))
        # => (16, 12, 4)
        # print(f'x 3 size: {x.size()}')

        x = x.view(-1, 768) # 数据扁平化
        x = self.fc1(x)
        x = torch.relu(x)
        x = self.fc2(x)
        x = torch.relu(x)
        x = self.fc3(x)
        x = torch.relu(x)

        x1 = self.fc41(x)
        x2 = self.fc42(x)
        x3 = self.fc43(x)
        x4 = self.fc44(x)
        x5 = self.fc45(x)

        return x1, x2, x3, x4, x5
예제 #4
0
파일: models.py 프로젝트: e259f381/RTG
    def forward(self, x):
        """
        input float32 tensor of dims [b, c, h, w]
        return output tensor of dims [b, d], where d is the number of units in final layer.
        """
        assert type(
            x) == torch.Tensor, f"Input must be torch tensor not {type(x)}"
        assert x.shape[
            1:] == self.input_dims, f"Input dims {x.shape[1:]} must match {self.input_dims}"
        assert x.dtype == torch.float32, f"Datatype should be torch.float32 not {x.dtype}"

        b = x.shape[0]

        x = torch.relu(self.conv1(x))
        x = torch.max_pool2d(x, 2, 2)
        x = torch.relu(self.conv2(x))
        x = torch.max_pool2d(x, 2, 2)
        x = torch.relu(self.conv3(x))
        x = torch.max_pool2d(x, 2, 2)
        assert x.shape[
            1:] == self.final_dims, f"Expected final shape to be {self.final_dims} but found {x.shape[1:]}"
        x = x.reshape((b, -1))
        x = torch.relu(self.fc(x))

        return x
예제 #5
0
 def forward(self, x: torch.Tensor) -> torch.Tensor:
     x = torch.max_pool2d(torch.relu(self.conv1(x)), kernel_size=(2, 2))
     x = torch.max_pool2d(torch.relu(self.conv2(x)), kernel_size=(2, 2))
     x = x.view(-1, 16 * 5 * 5)
     x = torch.relu(self.fc1(x))
     x = torch.relu(self.fc2(x))
     x = self.fc3(x)
     return x
예제 #6
0
 def forward(self, x):
     x = torch.max_pool2d(torch.sigmoid(self.conv1(x)), (2, 2))
     x = torch.max_pool2d(torch.sigmoid(self.conv2(x)), (2, 2))
     x = x.view(x.shape[0], -1)
     x = torch.sigmoid(self.fc1(x))
     x = torch.sigmoid(self.fc2(x))
     x = torch.softmax(self.fc3(x), dim=1)
     return x
 def forward(self: 'Model', X: torch.Tensor) -> torch.Tensor:
     X = torch.relu(self.conv1(X))
     X = torch.max_pool2d(X, 2, 2)
     X = torch.relu(self.conv2(X))
     X = torch.max_pool2d(X, 2, 2)
     X = X.view(-1, 4 * 4 * 50)
     X = torch.relu(self.fc1(X))
     X = self.fc2(X)
     return X
예제 #8
0
    def forward(self, x: torch.Tensor) -> float:
        x = torch.max_pool2d(torch.relu(self.conv1(x)), (2, 2))
        x = torch.max_pool2d(torch.relu(self.conv2(x)), 2)

        x = x.view(-1, self._num_flat_features(x))
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        x = self.fc3(x)
        return x
 def policy(self, x):
     x = t.tanh(t.conv2d(x, self.params["cnn.1"]))
     x = t.max_pool2d(x, (2, 2))
     x = t.tanh(t.conv2d(x, self.params["cnn.2"], stride=2))
     x = t.max_pool2d(x, (2, 2))
     x = t.flatten(x, 0)
     x = self.heb1.forward(x)
     x = self.heb2.forward(x)
     x = self.heb3.forward(x)
     return last_act_fn(x)
예제 #10
0
 def forward(self, x):
     # 卷积 --> 激活 --> 池化
     x1 = torch.max_pool2d(torch.relu(self.conv1(x)), 2)
     x2 = torch.max_pool2d(torch.relu(self.conv2(x1)), 2)
     # x2.view(1,-1) 意思就是 1,max
     f = x2.view(x2.size()[0], -1)
     f1 = torch.relu(self.fc1(f))
     f2 = torch.relu(self.fc2(f1))
     f3 = torch.relu(self.fc3(f2))
     return f3
예제 #11
0
    def forward(self, X: torch.Tensor) -> torch.Tensor:
        X = torch.max_pool2d(torch.relu(self.conv1(X)), 2)
        X = torch.max_pool2d(torch.relu(self.conv2(X)), 2)

        X = X.view(X.size(0), -1)

        X = torch.relu(self.fc1(X))
        X = torch.relu(self.fc2(X))
        X = torch.relu(self.fc3(X))

        return X
    def policy(self, x):
        x = t.tanh(t.conv2d(x, self.params["cnn.1"]))
        x = t.max_pool2d(x, (2, 2))
        x = t.tanh(t.conv2d(x, self.params["cnn.2"], stride=2))
        x = t.max_pool2d(x, (2, 2))
        x = t.flatten(x, 0)

        x = t.tanh(f.linear(x, self.params["linear.1"].t()))
        x = t.tanh(f.linear(x, self.params["linear.2"].t()))
        x = last_act_fn(f.linear(x, self.params["linear.3"].t()))

        return x
예제 #13
0
    def forward(self, x):
        x = torch.relu(self.conv1(x))
        x = torch.relu(self.conv2(x))
        x = torch.max_pool2d(x, 2)

        x = torch.relu(self.conv3(x))
        x = torch.relu(self.conv4(x))
        x = torch.max_pool2d(x, 2)

        x = x.view(-1, 1024)
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)
예제 #14
0
    def forward(self, x):
        x = self.conv1(x)
        x = torch.relu(x)
        x = torch.max_pool2d(x, kernel_size=2, stride=2)

        x = self.conv2(x)
        x = torch.relu(x)
        x = torch.max_pool2d(x, kernel_size=2, stride=2)

        x = x.view(-1, 4 * 4 * 40)
        x = self.fc1(x)
        x = torch.relu(x)

        x = self.out(x)
        return x
예제 #15
0
    def forward(self, x_in):
        # Passing through wireframe rendering
        x_wf = self.wireframe_rendering(x_in)

        # Passing through conv layers
        x = torch.max_pool2d(F.relu(self.conv1_bn(self.conv1(x_wf))), 2, 2)
        x = torch.max_pool2d(F.relu(self.conv2_bn(self.conv2(x))), 2, 2)
        x = F.relu(self.conv3_bn(self.conv3(x)))

        # Flattening and passing through FC Layers
        x = x.view(x.size(0), -1)
        x = F.relu(self.fc1(x))
        x = torch.sigmoid(self.fc2(x))

        return x
예제 #16
0
def alive_masking(state_grid):
    # Take the alpha channel as the measure of “life”.
    alive = torch.tensor(torch.max_pool2d(
        state_grid[None, 3, :, :], (3, 3), padding=1, stride=1) > 0.1,
                         dtype=torch.float)
    state_grid = state_grid * alive
    return state_grid
예제 #17
0
 def get_mask(self, x, gamma):
     mask = torch.bernoulli(torch.ones_like(x) * gamma)
     mask = 1 - torch.max_pool2d(mask,
                                 kernel_size=self.block_size,
                                 stride=1,
                                 padding=self.block_size // 2)
     return mask
예제 #18
0
    def policy(self, x):
        x = t.tanh(t.conv2d(x, self.params["cnn.1"]))
        x = t.max_pool2d(x, (2, 2))
        x = t.tanh(t.conv2d(x, self.params["cnn.2"], stride=2))
        x = t.max_pool2d(x, (2, 2))
        x = t.flatten(x, 0)

        (hx, cx) = self.hxcx = _VF.lstm_cell(x, self.hxcx,
                                             self.params['lstm.weight_ih'],
                                             self.params['lstm.weight_hh'],
                                             self.params['lstm.bias_ih'],
                                             self.params['lstm.bias_hh'])
        x = t.tanh(f.linear(hx.squeeze(), self.params["linear.2"].t()))
        x = last_act_fn(f.linear(x, self.params["linear.3"].t()))

        return x
예제 #19
0
 def jojo_2(self, input_vars_6, input_vars_5, var_136):
     var_150 = torch.max_pool2d(var_136, [
         2,
         2,
     ], [
         2,
         2,
     ], [
         0,
         0,
     ], [
         1,
         1,
     ], False)
     var_168 = torch._convolution(var_150, input_vars_5, input_vars_6, [
         1,
         1,
     ], [
         1,
         1,
     ], [
         1,
         1,
     ], False, [
         0,
         0,
     ], 1, False, False, True)
     return var_168
예제 #20
0
 def forward(self, input_, kernel_size, stride, padding, dilation,
             ceil_mode):
     ceil_mode = bool(ceil_mode)
     self.params = kernel_size, stride, padding, dilation, ceil_mode
     with torch.no_grad():
         return torch.max_pool2d(input_, kernel_size, stride, padding,
                                 dilation, ceil_mode)
예제 #21
0
 def jojo_1(self, var_169, input_vars_8, input_vars_7):
     var_187 = torch._convolution(var_169, input_vars_7, input_vars_8, [
         1,
         1,
     ], [
         1,
         1,
     ], [
         1,
         1,
     ], False, [
         0,
         0,
     ], 1, False, False, True)
     var_188 = torch.relu_(var_187)
     var_202 = torch.max_pool2d(var_188, [
         2,
         2,
     ], [
         2,
         2,
     ], [
         0,
         0,
     ], [
         1,
         1,
     ], False)
     return var_202
예제 #22
0
 def forward(self, X: torch.Tensor) -> torch.Tensor:
     X = torch.max_pool2d(torch.relu(self.conv1(X)), 2, stride=2)
     X = torch.max_pool2d(torch.relu(self.conv2(X)), 2, stride=2)
     X = torch.relu(self.conv3(X))
     X = torch.max_pool2d(torch.relu(self.conv4(X)), 2, stride=2)
     X = torch.relu(self.conv5(X))
     X = torch.max_pool2d(torch.relu(self.conv6(X)), 2, stride=2)
     X = torch.relu(self.conv7(X))
     X = torch.max_pool2d(torch.relu(self.conv8(X)), 2, stride=2)
     
     X = X.view(X.size(0), -1)
     
     X = self.drop1(torch.relu(self.fc1(X)))
     X = self.drop2(torch.relu(self.fc2(X)))
     X = torch.relu(self.fc3(X))
     
     return X
예제 #23
0
    def forward(self, inputs):
        batch_size = inputs.shape[0]
        nb_input_slices = inputs.shape[1]

        x = inputs
        x = x.view(batch_size * nb_input_slices, 1, inputs.shape[2],
                   inputs.shape[3])
        x = self.l1(x)
        x = self.bn1(x)
        x = torch.relu(x)
        x0 = self.maxpool(x)

        x1 = self.base_model.layer1(x0)
        x2 = self.base_model.layer2(x1)
        x3 = self.base_model.layer3(x2)
        x4 = self.base_model.layer4(x3)

        x2_combined = self.combine_slices(x2,
                                          self.combine_conv2,
                                          batch_size=batch_size,
                                          nb_input_slices=nb_input_slices)
        x3_combined = self.combine_slices(x3,
                                          self.combine_conv3,
                                          batch_size=batch_size,
                                          nb_input_slices=nb_input_slices)
        x4_combined = self.combine_slices(x4,
                                          self.combine_conv4,
                                          batch_size=batch_size,
                                          nb_input_slices=nb_input_slices)

        dec5 = self.dec5(x4_combined)
        dec4 = self.dec4(torch.cat([dec5, x3_combined], 1))
        dec3 = self.dec3(torch.cat([dec4, x2_combined], 1))

        segmentation_result = torch.sigmoid(self.fc_segmentation(dec3))

        # use segmentation as a weight for pooling
        segmentation_low_res = torch.max_pool2d(segmentation_result, 8)
        segmentation_low_res = torch.max(segmentation_low_res, dim=1, keepdim=True)[0] + \
                               torch.mean(segmentation_low_res, dim=1, keepdim=True)  # BxHxW
        m = torch.exp(2 * segmentation_low_res)
        a = m / torch.sum(m, dim=(2, 3), keepdim=True)

        x = torch.sum(a * x4_combined, dim=(2, 3))

        # x = avg_max_pool_2d(x4_combined)

        if self.dropout > 0:
            x = F.dropout(x, self.dropout, self.training)

        segmentation_sum = torch.sum(segmentation_result, dim=(2, 3))
        x = torch.cat([x, segmentation_sum], dim=1)

        x = self.fc1(x)
        x = torch.relu(x)
        cls = self.fc2(x)

        return cls, segmentation_result
예제 #24
0
def T_metrics_by_pixels(model, ds, class_of_interest=2, pool_k=1, pool_s=1):
    assert isinstance(ds, ConcisePixelLevelDs)

    tp_fn_tn_fp = np.array([0., .0, .0, .0])
    total_samples_ = np.array([0.0, .0])
    device = next(model.parameters()).device
    for (x1, x2), future_tensor in ds:
        inp = (x1.view(1, *x1.shape).to(device), x2.view(1,
                                                         *x2.shape).to(device))
        future_tensor = future_tensor[0].to(device)
        future_tensor = future_tensor.view(1, *future_tensor.shape)
        future_tensor = max_pool2d(future_tensor.float(), pool_k, pool_s)[0]

        pred = model(inp)[0]
        pred = pred.argmax(dim=0)
        pred = max_pool2d(pred.view(1, *pred.shape).float(), pool_k, pool_s)[0]

        total_positives_cond = future_tensor == class_of_interest
        total_positives = total_positives_cond.sum().item()
        total_negatives = future_tensor.numel() - total_positives

        true_positives = (
            pred[total_positives_cond] == class_of_interest).sum().item()
        false_negatives = (pred[total_positives_cond] !=
                           class_of_interest).sum().item()

        negative_cond = ~total_positives_cond
        true_negatives = (pred[negative_cond] !=
                          class_of_interest).sum().item()
        false_positives = (
            pred[negative_cond] == class_of_interest).sum().item()

        tp_fn_tn_fp += np.array(
            [true_positives, false_negatives, true_negatives, false_positives])
        total_samples_ += np.array([total_positives, total_negatives])
    # computing the average on the number of real positive values
    positive_avg = tp_fn_tn_fp[:2] / total_samples_[0]
    # computing the average on the number of real negative values
    negative_avg = tp_fn_tn_fp[2:] / total_samples_[1]

    print(
        "True Positives: {0}, False Negatives: {1}, True Negatives: {2}, False Positives: {3}"
        .format(*tp_fn_tn_fp))

    return positive_avg, negative_avg
예제 #25
0
 def forward(self, x):
     x = T.relu(self.conv1(x))
     x = T.relu(self.conv2(x))
     x = T.max_pool2d(x, 2)
     x = T.flatten(x, 1)
     x = T.relu(self.fc1(x))
     x = T.relu(x)
     x = T.log_softmax(self.fc2(x), dim=1)
     return x
예제 #26
0
    def forward(self, x):
        x = self.conv1(x)
        x = torch.max_pool2d(x, 2)
        x = torch.relu(x)

        x = self.conv2(x)
        x = self.dropout1(x)
        x = torch.max_pool2d(x, 2)
        x = torch.relu(x)

        x = x.view(-1, 320)
        x = self.fc1(x)
        x = torch.relu(x)

        x = self.dropout2(x)
        x = self.fc2(x)

        return torch.log_softmax(x, dim=-1)
예제 #27
0
 def jojo_25(self, input_vars_15, input_vars_14, input_vars_8, input_vars_11, input_vars_10, input_vars_17, input_vars_9, var_681, input_vars_16, input_vars_7, input_vars_13):
     var_695 = torch.max_pool2d(var_681, [3, 3, ], [2, 2, ], [1, 1, ], [1, 1, ], False)
     var_714 = torch._convolution(var_695, input_vars_7, None, [1, 1, ], [1, 1, ], [1, 1, ], False, [0, 0, ], 1, False, False, True)
     var_719 = torch.batch_norm(var_714, input_vars_8, input_vars_9, input_vars_10, input_vars_11, False, 0.1, 1e-05, True)
     var_720 = torch.relu_(var_719)
     var_739 = torch._convolution(var_720, input_vars_13, None, [1, 1, ], [1, 1, ], [1, 1, ], False, [0, 0, ], 1, False, False, True)
     var_744 = torch.batch_norm(var_739, input_vars_14, input_vars_15, input_vars_16, input_vars_17, False, 0.1, 1e-05, True)
     var_746 = torch.add(var_744, var_695, alpha=1)
     return var_746
예제 #28
0
    def forward(self, inputs):
        feats = [self.head(inputs, self.knn(inputs[:, 0:3]))]
        for i in range(self.n_blocks-1):
            feats.append(self.backbone[i](feats[-1]))
        feats = torch.cat(feats, dim=1)

        fusion = torch.max_pool2d(self.fusion_block(feats), kernel_size=[feats.shape[2], feats.shape[3]])
        fusion = torch.repeat_interleave(fusion, repeats=feats.shape[2], dim=2)
        return self.prediction(torch.cat((fusion, feats), dim=1)).squeeze(-1)
예제 #29
0
 def __init__(self):
     super(ConvNetSimple, self).__init__()
     self.conv_1_size = 12
     self.out_channels = 20
     self.conv_1 = nn.Conv2d(in_channels=1, out_channels=20, kernel_size=5)
     self.max_pool_2d = lambda x: torch.max_pool2d(
         x, kernel_size=2, stride=2)
     self.view = lambda x: x.view(-1, self.conv_1_size**2 * 2 * 20)
     self.fc_1 = nn.Linear(self.conv_1_size**2 * self.out_channels, 100)
     self.out = nn.Linear(100, OUTPUT_SIZE)
예제 #30
0
    def forward(self, x):
        #x = self.cnn_layers(x)
        #x = x.view(x.size(0), -1)
        #x = self.linear_layers(x)
        #return x
        x = self.conv_layer_1(x)
        x = torch.relu(x)
        x = torch.max_pool2d(x, kernel_size=2, stride=2)

        x = self.conv_layer_2(x)
        x = torch.relu(x)
        x = torch.max_pool2d(x, kernel_size=2, stride=2)
        #print(x.shape)
        x = x.view(-1, 32 * 40 * 53 * 53)
        x = self.fc1(x)
        x = torch.relu(x)

        x = self.out(x)
        return x