コード例 #1
0
ファイル: unet3.py プロジェクト: lclibardi/clab
    def output_shape_for(self, input1_shape, input2_shape):
        """

        Example:
            >>> self = DenseUNetUp(128, 256)
            >>> input1_shape = [4, 128, 24, 24]
            >>> input2_shape = [4, 256, 8, 8]
            >>> output_shape = self.output_shape_for(input1_shape, input2_shape)
            (4, 64, 24, 24)
            >>> inputs1 = torch.autograd.Variable(torch.rand(input1_shape))
            >>> inputs2 = torch.autograd.Variable(torch.rand(input2_shape))
            >>> assert self.forward(inputs1, inputs2).shape == output_shape
        """
        output2_shape = OutputShapeFor(self.up)(input2_shape)
        output2_shape = OutputShapeFor(self.pad)(output2_shape, input1_shape)

        # Taking the easy way out and padding the upsampled layer instead of
        # cropping the down layer

        # output1_shape = OutputShapeFor(self.pad)(input1_shape, output2_shape)
        # cat_shape     = OutputShapeFor(torch.cat)([output1_shape, output2_shape], 1)

        cat_shape = OutputShapeFor(torch.cat)([input1_shape, output2_shape], 1)
        conv_shape = OutputShapeFor(self.conv)(cat_shape)
        output_shape = OutputShapeFor(self.bottleneck)(conv_shape)
        return output_shape
コード例 #2
0
ファイル: unet3.py プロジェクト: lclibardi/clab
 def activation_shapes(self, input_shape):
     norm_shape = OutputShapeFor(self._modules['norm'])(input_shape)
     noli_shape = OutputShapeFor(self._modules['noli'])(norm_shape)
     conv_shape = OutputShapeFor(self._modules['conv'])(noli_shape)
     pool_shape = OutputShapeFor(self._modules['pool'])(conv_shape)
     activations = [norm_shape]
     if not self._modules['noli'].inplace:
         activations.append(np.prod(noli_shape))
     activations += [conv_shape, pool_shape]
     return activations
コード例 #3
0
ファイル: _common.py プロジェクト: lclibardi/clab
    def output_shape_for(self, input_shape):
        residual = input_shape
        out = OutputShapeFor(self.convbnrelu1)(input_shape)
        out = OutputShapeFor(self.convbn2)(out)
        if self.downsample is not None:
            residual = OutputShapeFor(self.downsample)(input_shape)

        if residual[:-3] != out[:-3]:
            print('disagree:')
            print('out      = {!r}'.format(out))
            print('residual = {!r}'.format(residual))
        out = OutputShapeFor(self.relu)(out)
        return out
コード例 #4
0
ファイル: unet3.py プロジェクト: lclibardi/clab
    def activation_shapes(self, input1_shape, input2_shape):
        up2_shape = OutputShapeFor(self.up)(input2_shape)
        pad2_shape = OutputShapeFor(self.pad)(up2_shape, input1_shape)

        cat_shape = OutputShapeFor(torch.cat)([input1_shape, pad2_shape], 1)
        conv_shape = OutputShapeFor(self.conv)(cat_shape)
        output_shape = OutputShapeFor(self.bottleneck)(conv_shape)

        activations = [up2_shape]
        activations += self.pad.activation_shapes(up2_shape, input1_shape)
        activations += [cat_shape]
        activations += self.conv.activation_shapes(cat_shape)
        activations += [output_shape]
        return activations
コード例 #5
0
    def __init__(self, input_shape):
        """
        """
        super(C3D, self).__init__()
        # nonlinearity = partial(nn.ReLU)
        # kernels are specified in D, H, W

        feats = [64, 128, 256, 512, 512]
        conv_blocks = nn.Sequential(OrderedDict([
            ('block1', Conv3DBlock(in_channels=3, out_channels=feats[0], n_conv=1,
                                   pool_kernel=(1, 2, 2), pool_stride=(1, 2, 2))),
            ('block2', Conv3DBlock(in_channels=feats[0], out_channels=feats[1], n_conv=1)),
            ('block3', Conv3DBlock(in_channels=feats[1], out_channels=feats[2], n_conv=2)),
            ('block4', Conv3DBlock(in_channels=feats[2], out_channels=feats[3], n_conv=2)),
            ('block5', Conv3DBlock(in_channels=feats[3], out_channels=feats[4], n_conv=2)),
        ]))
        output_shape = OutputShapeFor(conv_blocks)(input_shape)
        print('output_shape = {!r}'.format(output_shape))
        import numpy as np

        self.input_shape = input_shape
        self.conv_blocks = conv_blocks
        self.n_conv_output = int(np.prod(output_shape[1:]))
        self.block6 = FCBlock(self.n_conv_output, 4096)
        self.block7 = FCBlock(4096, 4096)

        self.softmax = nn.Softmax(dim=1)
コード例 #6
0
ファイル: unet3.py プロジェクト: lclibardi/clab
 def activation_shapes(self, input_shape):
     shape = input_shape
     activations = []
     for i in range(self.num_layers):
         module = self._modules['denselayer%d' % (i + 1)]
         activations += module.activation_shapes(shape)
         shape = OutputShapeFor(module)(shape)
     return activations
コード例 #7
0
ファイル: unet.py プロジェクト: lclibardi/clab
 def output_shape_for(self, input1_shape, input2_shape):
     """
     Example:
         >>> self = UNetUp(256, 128)
         >>> input1_shape = [4, 128, 24, 24]
         >>> input2_shape = [4, 256, 8, 8]
         >>> output_shape = self.output_shape_for(input1_shape, input2_shape)
         >>> output_shape
         (4, 128, 12, 12)
         >>> inputs1 = torch.autograd.Variable(torch.rand(input1_shape))
         >>> inputs2 = torch.autograd.Variable(torch.rand(input2_shape))
         >>> assert self.forward(inputs1, inputs2).shape == output_shape
     """
     output2_shape = OutputShapeFor(self.up)(input2_shape)
     output1_shape = OutputShapeFor(self.pad)(input1_shape, output2_shape)
     cat_shape     = OutputShapeFor(torch.cat)([output1_shape, output2_shape], 1)
     output_shape  = OutputShapeFor(self.conv)(cat_shape)
     return output_shape
コード例 #8
0
ファイル: mixin.py プロジェクト: lclibardi/clab
 def io_shapes(conn, self, input_shape):
     output_shapes = ub.odict()
     input_shapes = ub.odict()
     # prev = None
     for node in conn.topsort:
         in_names = conn.input_nodes[node]
         if in_names is None:
             in_shapes = [input_shape]
         else:
             in_shapes = list(ub.take(output_shapes, in_names))
         input_shapes[node] = in_shapes
         out_shapes = OutputShapeFor(getattr(self, node))(*in_shapes)
         output_shapes[node] = out_shapes
     conn.output_shapes = output_shapes
     conn.input_shapes = input_shapes
コード例 #9
0
ファイル: siamese.py プロジェクト: lclibardi/clab
    def resnet_prepool_output_shape(self, input_shape):
        """
        input_shape = (1, 3, 224, 224)
        input_shape = (1, 3, 416, 416)
        """
        # Figure out how big the output will be and redo the average pool layer
        # to account for it
        branch = self.branch
        shape = input_shape
        shape = OutputShapeFor(branch.conv1)(shape)
        shape = OutputShapeFor(branch.bn1)(shape)
        shape = OutputShapeFor(branch.relu)(shape)
        shape = OutputShapeFor(branch.maxpool)(shape)

        shape = OutputShapeFor(branch.layer1)(shape)
        shape = OutputShapeFor(branch.layer2)(shape)
        shape = OutputShapeFor(branch.layer3)(shape)
        shape = OutputShapeFor(branch.layer4)(shape)
        prepool_shape = shape
        return prepool_shape
コード例 #10
0
ファイル: unet3.py プロジェクト: lclibardi/clab
    def activation_shapes(self, input_shape):
        norm1_shape = OutputShapeFor(self._modules['norm.1'])(input_shape)
        noli1_shape = OutputShapeFor(self._modules['noli.1'])(norm1_shape)
        conv1_shape = OutputShapeFor(self._modules['conv.1'])(noli1_shape)
        norm2_shape = OutputShapeFor(self._modules['norm.2'])(conv1_shape)
        noli2_shape = OutputShapeFor(self._modules['noli.2'])(norm2_shape)
        conv2_shape = OutputShapeFor(self._modules['conv.2'])(noli2_shape)

        activations = [
            norm1_shape,
        ]
        if not self._modules['noli.1'].inplace:
            activations.append(np.prod(noli1_shape))
        activations += [
            conv1_shape,
            norm2_shape,
        ]
        if not self._modules['noli.2'].inplace:
            activations.append(np.prod(noli2_shape))
        activations += [
            conv2_shape,
        ]
        return activations
コード例 #11
0
ファイル: _common.py プロジェクト: lclibardi/clab
 def output_shape_for(self, input_shape):
     return OutputShapeFor(self.cbr_unit)(input_shape)
コード例 #12
0
ファイル: siamese.py プロジェクト: lclibardi/clab
 def output_shape_for(self, input_shape1, input_shape2):
     shape1 = OutputShapeFor(self.branch)(input_shape1)
     shape2 = OutputShapeFor(self.branch)(input_shape2)
     assert shape1 == shape2
     output_shape = (shape1[0], 1)
     return output_shape
コード例 #13
0
ファイル: unet.py プロジェクト: lclibardi/clab
 def output_shape_for(self, input_shape, math=math):
     shape = OutputShapeFor(self.conv1[0])(input_shape)
     shape = OutputShapeFor(self.conv2[0])(shape)
     output_shape = shape
     return output_shape
コード例 #14
0
ファイル: unet.py プロジェクト: lclibardi/clab
    def raw_output_shape_for(self, input_shape):
        # output shape without fancy prepad mirrors and post crops
        shape = conv1 = OutputShapeFor(self.conv1)(input_shape)
        shape = OutputShapeFor(self.maxpool1)(shape)

        shape = conv2 = OutputShapeFor(self.conv2)(shape)
        shape = OutputShapeFor(self.maxpool2)(shape)

        shape = conv3 = OutputShapeFor(self.conv3)(shape)
        shape = OutputShapeFor(self.maxpool3)(shape)

        shape = conv4 = OutputShapeFor(self.conv4)(shape)
        shape = OutputShapeFor(self.maxpool4)(shape)

        shape = OutputShapeFor(self.center)(shape)

        shape = OutputShapeFor(self.up_concat4)(conv4, shape)
        shape = OutputShapeFor(self.up_concat3)(conv3, shape)
        shape = OutputShapeFor(self.up_concat2)(conv2, shape)
        shape = OutputShapeFor(self.up_concat1)(conv1, shape)

        shape = OutputShapeFor(self.final)(shape)
        output_shape = shape
        return output_shape
コード例 #15
0
ファイル: unet.py プロジェクト: lclibardi/clab
    def output_shape_for(self, input_shape):
        # N1, C1, W1, H1 = input_shape
        # output_shape = (N1, self.n_classes, W1, H1)
        shape = input_shape
        shape = conv1 = OutputShapeFor(self.conv1)(shape)
        shape = OutputShapeFor(self.maxpool1)(shape)

        shape = conv2 = OutputShapeFor(self.conv2)(shape)
        shape = OutputShapeFor(self.maxpool2)(shape)

        shape = conv3 = OutputShapeFor(self.conv3)(shape)
        shape = OutputShapeFor(self.maxpool3)(shape)

        shape = conv4 = OutputShapeFor(self.conv4)(shape)
        shape = OutputShapeFor(self.maxpool4)(shape)

        shape = OutputShapeFor(self.center)(shape)

        shape = OutputShapeFor(self.up_concat4)(conv4, shape)
        shape = OutputShapeFor(self.up_concat3)(conv3, shape)
        shape = OutputShapeFor(self.up_concat2)(conv2, shape)
        shape = OutputShapeFor(self.up_concat1)(conv1, shape)

        shape = OutputShapeFor(self.final)(shape)
        output_shape = shape
        return output_shape
コード例 #16
0
 def output_shape_for(self, input_shape):
     return OutputShapeFor(self.sequence)(input_shape)