def __init__(self, in_chan, params, kernel_size=3):
        super(BCNN, self).__init__()

        c1_targets, c2_targets, out_chan = params

        self.convblock1 = ConvBlock(in_channels=in_chan,
                                    hidden=32,
                                    out_channels=64)
        self.convblock2 = ConvBlock(in_channels=64,
                                    hidden=128,
                                    out_channels=128)
        self.coarse1 = CoarseBlock(in_features=128 * 12 * 12,
                                   hidden=128,
                                   out_features=c1_targets)
        self.convblock3 = ConvBlock(in_channels=128,
                                    hidden=256,
                                    out_channels=256)
        self.coarse2 = CoarseBlock(in_features=256 * 6 * 6,
                                   hidden=1024,
                                   out_features=c2_targets)
        self.convblock4 = ConvBlock(in_channels=256,
                                    hidden=512,
                                    out_channels=512)
        self.coarse3 = CoarseBlock(in_features=512 * 3 * 3,
                                   hidden=1024,
                                   out_features=out_chan)
Exemple #2
0
	def __init__(self):
		super(ConvNet, self).__init__()
		
		self.conv_layers == nn.ModuleList()
		self.flat = Flatten()
		
		self.conv_layers.append(ConvBlock(CHANNELS, 32, 8, 4, act = nn.ReLU()))
		self.conv_layers.append(ConvBlock(32, 64, 4, 2, act = nn.ReLU()))
		self.conv_layers.append(ConvBlock(64, 64, 3, 1, act = nn.ReLU()))
Exemple #3
0
 def __init__(self, z_dim=10, outsize=56):
     super().__init__()
     self.outsize = outsize
     self.linear_size = int((outsize / 8)**2)
     self.linear = nn.Linear(z_dim, self.linear_size)
     self.net = nn.Sequential(nn.ELU(), UpResBloc(1, 32), nn.ELU(),
                              nn.BatchNorm2d(32), ConvBlock(32, bias=False),
                              UpResBloc(32, 32), nn.ELU(),
                              ConvBlock(32, bias=False),
                              ConvBlock(32, bias=False), UpResBloc(32, 1),
                              nn.Sigmoid())
Exemple #4
0
 def __init__(self, in_c, num_classes):
     super(AllCNNC, self).__init__()
     self.blc1 = nn.Sequential(ConvBlock(in_c, 96), ConvBlock(96, 96),
                               ConvBlock(96, 96, s=2))
     self.blc2 = nn.Sequential(ConvBlock(96, 192), ConvBlock(192, 192),
                               ConvBlock(192, 192, s=2))
     self.blc3 = ConvBlock(192, 192)
     self.blc4 = ConvBlock(192, 192, k=1, p=0)
     self.blc5 = ConvBlock(192, num_classes, k=1, p=0)
     self.gap = nn.AdaptiveAvgPool2d(1)
Exemple #5
0
 def __init__(self, z_size=100, x_size=80, y_size=3):
     super().__init__()
     self.y_size = y_size
     self.z_size = z_size
     self.x_size = x_size
     self.linear_size = int((x_size / 8)**2)
     self.linear = nn.Linear(self.z_size + self.y_size, self.linear_size)
     self.net = nn.Sequential(nn.ELU(), UpResBloc(1, 32), nn.ELU(),
                              BatchNorm2dEnum(32), ConvBlock(32,
                                                             bias=False),
                              UpResBloc(32, 32), nn.ELU(),
                              ConvBlock(32, bias=False),
                              ConvBlock(32, bias=False), UpResBloc(32, 1),
                              nn.Sigmoid())
Exemple #6
0
 def __init__(self, x_size=80, y_size=3):
     super().__init__()
     self.y_size = y_size
     self.x_size = x_size
     self.linear_size = int((x_size / 8)**2)
     self.net = nn.Sequential(
         Conv2dEnum(1, 32, kernel_size=7, padding=3, bias=False), nn.Tanh(),
         nn.AvgPool2d(2), ConvBlock(32, 5, bias=False),
         Conv2dEnum(32, 16, kernel_size=5, padding=2, bias=False),
         nn.Tanh(), ConvBlock(16, bias=False), nn.AvgPool2d(2),
         Conv2dEnum(16, 1, kernel_size=5, padding=2, bias=False),
         nn.AvgPool2d(2), nn.Tanh())
     self.linear = nn.Linear(self.linear_size, self.y_size)
     self.sigmoid = nn.Sigmoid()
 def __init__(self, in_c, num_classes):
     super(MobileNetV1, self).__init__()
     self.blc1 = nn.Sequential(
         ConvBlock(in_c, 32),
         DepSepConvBlock(32, 64)
     )
     self.blc2 = nn.Sequential(
         DepSepConvBlock(64, 128),
         DepSepConvBlock(128, 128)
     )
     self.blc3 = nn.Sequential(
         DepSepConvBlock(128, 256, s=2),
         DepSepConvBlock(256, 256)
     )
     self.blc4 = nn.Sequential(
         DepSepConvBlock(256, 512, s=2),
         DepSepConvBlock(512, 512),
         DepSepConvBlock(512, 512),
         DepSepConvBlock(512, 512),
         DepSepConvBlock(512, 512),
         DepSepConvBlock(512, 512)
     )
     self.blc5 = nn.Sequential(
         DepSepConvBlock(512, 1024, s=2),
         DepSepConvBlock(1024, 1024)
     )
     self.gap = nn.AdaptiveAvgPool2d(1)
     self.fc = nn.Linear(1024, num_classes)
Exemple #8
0
 def __init__(self, x_size=80, z_size=100, y_size=3):
     super().__init__()
     self.y_size = y_size
     self.x_size = x_size
     self.z_size = z_size
     self.linear_size = int((self.x_size / 8)**2)
     self.net = nn.Sequential(
         Conv2dEnum(1, 32, kernel_size=7, padding=3, bias=False), nn.ELU(),
         nn.AvgPool2d(2), ConvBlock(32, bias=False),
         Conv2dEnum(32, 16, kernel_size=5, padding=2, bias=False), nn.ELU(),
         ConvBlock(16, bias=False), nn.AvgPool2d(2),
         Conv2dEnum(16, 1, kernel_size=5, padding=2, bias=False),
         nn.AvgPool2d(2), nn.ELU())
     self.linear = nn.Linear(self.linear_size + self.y_size,
                             self.linear_size)
     self.loc = nn.Linear(self.linear_size, self.z_size)
     self.scale = nn.Linear(self.linear_size, self.z_size)
 def __init__(self,
              transformer: transformers.Transformer,
              insize=56,
              z_dim=10):
     super().__init__()
     self.transformer = transformer
     self.insize = insize
     self.linear_size = int((insize / 8)**2)
     self.net = nn.Sequential(
         nn.Conv2d(1, 32, kernel_size=7, padding=3, bias=False), nn.ELU(),
         nn.AvgPool2d(2), ConvBlock(32, bias=False),
         nn.Conv2d(32, 16, kernel_size=5, padding=2, bias=False), nn.ELU(),
         ConvBlock(16, bias=False), nn.AvgPool2d(2),
         nn.Conv2d(16, 1, kernel_size=5, padding=2, bias=False),
         nn.AvgPool2d(2), nn.ELU())
     self.loc = nn.Linear(self.linear_size, z_dim)
     self.scale = nn.Linear(self.linear_size, z_dim)
Exemple #10
0
    def __init__(self, args):
        super(CNN3D, self).__init__()
        self.args = args
        self.size = 20

        # self.conv = ConvBlock(54, 64, args.dropout_rate)
        self.conv = ConvBlock(54, 128, args.dropout_rate)

        # self.predict = PredictBlock(64*40*40*40, 1, args.dropout_rate, True)
        self.predict = PredictBlock(128 * 40 * 40 * 40, 1, args.dropout_rate,
                                    True)
 def __init__(self, in_c, num_classes, l=22, widen=8, se=False, r=16):
     super(WideResNet, self).__init__()
     num_block = int((l - 4) / 6)
     self.blc1 = ConvBlock(in_c, 16)
     self.blc2 = nn.Sequential(
         *self._make_layer(16, 16 * widen, 1, num_block, se, r))
     self.blc3 = nn.Sequential(
         *self._make_layer(16 * widen, 32 * widen, 2, num_block, se, r))
     self.blc4 = nn.Sequential(
         *self._make_layer(32 * widen, 64 * widen, 2, num_block, se, r))
     self.gap = nn.AdaptiveAvgPool2d(1)
     self.fc = nn.Linear(64 * widen, num_classes)
Exemple #12
0
    def build_discriminator(self):
        input_lyr = tf.keras.Input(self.mul_hr_shape)
        x = ConvBlock(64)(input_lyr)
        x = ConvBlock(64, 2)(x)
        x = ConvBlock(128)(x)
        x = ConvBlock(128, 2)(x)
        x = ConvBlock(256)(x)
        x = ConvBlock(256, 2)(x)
        x = ConvBlock(512)(x)
        x = ConvBlock(512, 2)(x)
        x = tf.keras.layers.Dense(1024)(x)
        x = tf.keras.layers.LeakyReLU(0.2)(x)
        out_lyr = tf.keras.layers.Dense(1)(x)

        model = tf.keras.Model(inputs=input_lyr, outputs=out_lyr)
        return model
Exemple #13
0
 def __init__(self, in_c, num_classes):
     super(PreAct34, self).__init__()
     self.blc1 = ConvBlock(in_c, 64, 3, 1, 1)
     self.blc2 = nn.Sequential(PreActBlock(64, 64), PreActBlock(64, 64),
                               PreActBlock(64, 64))
     self.blc3 = nn.Sequential(PreActBlock(64, 128, s=2),
                               PreActBlock(128, 128), PreActBlock(128, 128),
                               PreActBlock(128, 128))
     self.blc4 = nn.Sequential(PreActBlock(128, 256, s=2),
                               PreActBlock(256, 256), PreActBlock(256, 256),
                               PreActBlock(256, 256), PreActBlock(256, 256),
                               PreActBlock(256, 256))
     self.blc5 = nn.Sequential(PreActBlock(256, 512, s=2),
                               PreActBlock(512, 512), PreActBlock(512, 512))
     self.gap = nn.AdaptiveAvgPool2d(1)
     self.fc = nn.Linear(512, num_classes)
 def __init__(self, in_c, num_classes, r=16):
     super(SEPreAct50, self).__init__()
     self.blc1 = ConvBlock(in_c, 64, 3, 1, 1)
     self.blc2 = nn.Sequential(SEPreActBottleneck(64, 256, r=r),
                               SEPreActBottleneck(256, 256, r=r),
                               SEPreActBottleneck(256, 256, r=r))
     self.blc3 = nn.Sequential(
         SEPreActBottleneck(256, 512, r=r, s=2),
         SEPreActBottleneck(512, 512, r=r),
         SEPreActBottleneck(512, 512, r=r),
         SEPreActBottleneck(512, 512, r=r),
     )
     self.blc4 = nn.Sequential(SEPreActBottleneck(512, 1024, r=r, s=2),
                               SEPreActBottleneck(1024, 1024, r=r),
                               SEPreActBottleneck(1024, 1024, r=r),
                               SEPreActBottleneck(1024, 1024, r=r),
                               SEPreActBottleneck(1024, 1024, r=r),
                               SEPreActBottleneck(1024, 1024, r=r))
     self.blc5 = nn.Sequential(SEPreActBottleneck(1024, 2048, r=r, s=2),
                               SEPreActBottleneck(2048, 2048, r=r),
                               SEPreActBottleneck(2048, 2048, r=r))
     self.gap = nn.AdaptiveAvgPool2d(1)
     self.fc = nn.Linear(2048, num_classes)
Exemple #15
0
    def __init__(self,
        layers,
        radix=2,
        groups=1,
        bottleneck_width=64,
        n_classes=1000,
        stem_width=64
    ):
        super(ResNeSt, self).__init__()
        self.radix = radix
        self.groups = groups
        self.bottleneck_width = bottleneck_width

        self.deep_stem = nn.Sequential(
            ConvBlock(
                in_channels=3,
                out_channels=stem_width,
                kernel_size=3,
                stride=2,
                padding=1
            ),
            ConvBlock(
                in_channels=stem_width,
                out_channels=stem_width,
                kernel_size=3,
                stride=1,
                padding=1
            ),
            ConvBlock(
                in_channels=stem_width,
                out_channels=stem_width*2,
                kernel_size=3,
                stride=1,
                padding=1
            ),
            nn.MaxPool2d(
                kernel_size=3,
                stride=2,
                padding=1
            )
        )

        self.in_channels = stem_width*2

        self.layer1 = self._make_layers(
            channels=64,
            blocks=layers[0],
            stride=1,
            is_first=False
        )
        self.layer2 = self._make_layers(
            channels=128,
            blocks=layers[1],
            stride=2
        )
        self.layer3 = self._make_layers(
            channels=256,
            blocks=layers[2],
            stride=2
        )
        self.layer4 = self._make_layers(
            channels=512,
            blocks=layers[3],
            stride=2
        )

        self.classifier = nn.Sequential(
            GlobalAvgPool2d(),
            nn.Linear(
                in_features=512*BottleneckBlock.expansion,
                out_features=n_classes
            )
        )
Exemple #16
0
        return x

    def compute_output_shape(self, input_shape):
        return tf.TensorShape(input_shape)


if __name__ == "__main__":
    import numpy as np
    f = 3
    k = 3
    s = (1, 64, 64, 3)
    nx = np.random.rand(*s).astype(np.float32)

    custom_layers = [
        FlatConv(f, k),
        ConvBlock(f, k),
        ResBlock(f, k),
        UpSampleConv(f, k)
    ]

    for layer in custom_layers:
        tf.keras.backend.clear_session()
        out = layer(nx)
        layer.summary()
        print(f"Input  Shape: {nx.shape}")
        print(f"Output Shape: {out.shape}")
        print("\n" * 2)

    tf.keras.backend.clear_session()
    g = Generator()
    shape = (1, 256, 256, 3)