def __init__(self, in_chans, out_chans, drop_prob): """ Args: in_chans (int): Number of channels in the input. out_chans (int): Number of channels in the output. drop_prob (float): Dropout probability. """ super().__init__() self.in_chans = int(in_chans) self.out_chans = int(out_chans) self.drop_prob = drop_prob self.layers = ComplexSequential( ComplexConv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False, setW=False), ComplexBatchNorm2d(out_chans), ComplexReLU(), ComplexDropout2d(p=drop_prob, inplace=True), ComplexConv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False, setW=False), ComplexBatchNorm2d(out_chans), ComplexReLU(), ComplexDropout2d(p=drop_prob, inplace=True))
def __init__(self, in_chans=1, out_chans=1, drop_prob=0.05, resolution=128): """ Args: in_chans (int): Number of channels in the input. out_chans (int): Number of channels in the output. """ super().__init__() self.in_chans = int(in_chans) self.out_chans = int(out_chans) self.drop_prob = drop_prob self.resolution = resolution self.c_layer1 = ComplexConv2d(in_channels=1, out_channels=resolution, kernel_size=(1, resolution), padding=(0, 0), stride=1, bias=False) self.c_layer2 = ComplexConv2d(in_channels=1, out_channels=resolution, kernel_size=(1, resolution), padding=(0, 0), stride=1, bias=False)
def __init__(self, in_chans, out_chans, drop_prob, chans, num_pool_layers, resolution): super().__init__() self.in_chans = in_chans self.out_chans = out_chans self.drop_prob = drop_prob self.resolution= resolution self.layer1=ComplexConv2d(in_channels=1, out_channels=resolution, kernel_size=(1,resolution),padding=(0,0), stride=1, bias=False) self.layer2=ComplexConv2d(in_channels=1, out_channels=resolution, kernel_size=(1,resolution),padding=(0,0), stride=1, bias=False) self.chans = chans self.num_pool_layers = num_pool_layers self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)]) ch = chans for i in range(num_pool_layers - 1): self.down_sample_layers += [ConvBlock(ch, ch * 2, drop_prob)] ch *= 2 self.conv = ConvBlock(ch, ch * 2, drop_prob) self.up_conv = nn.ModuleList() self.up_transpose_conv = nn.ModuleList() for i in range(num_pool_layers - 1): self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)] self.up_conv += [ConvBlock(ch * 2, ch, drop_prob)] ch //= 2 self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)] self.up_conv += [ nn.Sequential( ConvBlock(ch * 2, ch, drop_prob), nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1), )]
def __init__(self, k1=2, c1=40, k2=2, c2=100, d1=96, d2=10): super(Encoder_f, self).__init__() self.conv1 = ComplexConv2d(1, c1, k1, 1, padding=0) self.bn = ComplexBatchNorm2d(c1) self.conv2 = ComplexConv2d(c1, c2, k2, 1, padding=0) self.c2 = c2 self.fc1 = ComplexLinear(2 * 2 * c2, d1) self.fc2 = ComplexLinear(d1, d2)
def __init__(self, in_chans, out_chans, drop_prob, resolution): super().__init__() self.in_chans = in_chans self.out_chans = out_chans self.drop_prob = drop_prob self.resolution= resolution self.layer1=ComplexConv2d(in_channels=1, out_channels=resolution, kernel_size=(1,resolution),padding=(0,0), stride=1, bias=False) self.layer2=ComplexConv2d(in_channels=1, out_channels=resolution, kernel_size=(1,resolution),padding=(0,0), stride=1, bias=False)
def __init__(self): super(ComplexNet, self).__init__() self.pool = ComplexMaxPool2d(kernel_size=2, stride=2) self.conv1 = ComplexConv2d(1, 20, 5, 1) self.bn = NaiveComplexBatchNorm2d(20) self.conv2 = ComplexConv2d(20, 50, 5, 1) self.fc1 = ComplexLinear(4 * 4 * 50, 500) self.fc2 = ComplexLinear(500, 10)
def __init__(self, k1=2, c1=40, k2=2, c2=100, k3=3, c3=3, d1=96, d2=10): super(Encoder_r, self).__init__() self.conv1 = ComplexConv2d(1, c1, k1, 1, padding=0) self.bn1 = ComplexBatchNorm2d(c1) self.conv2 = ComplexConv2d(c1, c2, k2, 1, padding=0) self.bn2 = ComplexBatchNorm2d(c2) self.conv3 = ComplexConv2d(c2, c3, k3, 1, padding=0) self.fc1 = ComplexLinear(2 * 2 * c3, d1) self.fc2 = ComplexLinear(d1, d2) self.c3 = c3
def __init__(self): super(ComplexDiscriminator, self).__init__() # torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, # padding=0, dilation=1, groups=1, bias=True) # input(N, C_in, H_in, W_in), output(N, C_out, H_out, W_out) # H_out=[H_in + 2×padding[0] - dilation[0]×(kernel_size[0]−1) − 1]/stride[0] + 1 self.conv1 = ComplexConv2d(1, 64, 4, 2, 1) self.conv2 = ComplexConv2d(64, 128, 4, 2, 1) self.conv3 = ComplexConv2d(128, 256, 4, 2, 1) self.conv4 = ComplexConv2d(256, 512, 4, 2, 1) self.conv5 = ComplexConv2d(512, 1024, 4, 2, 1) self.dense = ComplexLinear(1024 * 4 * 4, 1)
def __init__(self, in_chans, out_chans, chans, num_pool_layers, drop_prob): """ Args: in_chans (int): Number of channels in the input to the U-Net model. out_chans (int): Number of channels in the output to the U-Net model. chans (int): Number of output channels of the first convolution layer. num_pool_layers (int): Number of down-sampling and up-sampling layers. drop_prob (float): Dropout probability. """ super().__init__() self.in_chans = int(in_chans) self.out_chans = int(out_chans) self.chans = chans self.num_pool_layers = num_pool_layers self.drop_prob = drop_prob self.down_sample_layers = nn.ModuleList([ComplexConvBlock(in_chans, chans, drop_prob)]) ch = chans for i in range(num_pool_layers - 1): self.down_sample_layers += [ComplexConvBlock(ch, ch * 2, drop_prob)] ch *= 2 self.conv = ComplexConvBlock(ch, ch * 2, drop_prob) self.up_conv = nn.ModuleList() self.up_transpose_conv = nn.ModuleList() for i in range(num_pool_layers - 1): self.up_transpose_conv += [ComplexTransposeConvBlock(ch * 2, ch)] self.up_conv += [ComplexConvBlock(ch * 2, ch, drop_prob)] ch //= 2 self.up_transpose_conv += [ComplexTransposeConvBlock(ch * 2, ch)] self.up_conv += [ ComplexSequential( ComplexConvBlock(ch * 2, ch, drop_prob), ComplexConv2d(ch, self.out_chans, kernel_size=1, stride=1), )]