def __init__(self, in_chans, out_chans, drop_prob): """ Args: in_chans (int): Number of channels in the input. out_chans (int): Number of channels in the output. drop_prob (float): Dropout probability. """ super().__init__() self.in_chans = int(in_chans) self.out_chans = int(out_chans) self.drop_prob = drop_prob self.layers = ComplexSequential( ComplexConv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False, setW=False), ComplexBatchNorm2d(out_chans), ComplexReLU(), ComplexDropout2d(p=drop_prob, inplace=True), ComplexConv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False, setW=False), ComplexBatchNorm2d(out_chans), ComplexReLU(), ComplexDropout2d(p=drop_prob, inplace=True))
def __init__(self, k1=2, c1=40, k2=2, c2=100, k3=3, c3=3, d1=96, d2=10): super(Encoder_f, self).__init__() self.conv1 = ComplexConv2d(1, c1, k1, 1, padding=0) self.bn1 = ComplexBatchNorm2d(c1) self.conv2 = ComplexConv2d(c1, c2, k2, 1, padding=0) self.bn2 = ComplexBatchNorm2d(c2) self.conv3 = ComplexConv2d(c2, c3, k3, 1, padding=0) self.fc1 = ComplexLinear(2 * 2 * c3, d1) self.fc2 = ComplexLinear(d1, d2) self.c3 = c3
def __init__(self, k1=2, c1=40, k2=2, c2=100, k3=3, c3=3, d1=96, d2=10): super(Generator, self).__init__() self.convt1 = ComplexConvTranspose2d(c1, 1, k1, 1, padding=0) self.bn1 = ComplexBatchNorm2d(c1) self.convt2 = ComplexConvTranspose2d(c2, c1, k2, 1, padding=0) self.bn2 = ComplexBatchNorm2d(c2) self.convt3 = ComplexConvTranspose2d(c3, c2, k3, 1, padding=0) # k = 2,p' = k - 1 self.fc1 = ComplexLinear(d1, 2 * 2 * c3) self.fc2 = ComplexLinear(d2, d1) self.c3 = c3
def __init__(self, k1=2, c1=40, k2=2, c2=100, d1=96, d2=10): super(Encoder_r, self).__init__() self.conv1 = ComplexConv2d(1, c1, k1, 1, padding=0) self.bn = ComplexBatchNorm2d(c1) self.conv2 = ComplexConv2d(c1, c2, k2, 1, padding=0) self.c2 = c2 self.fc1 = ComplexLinear(2 * 2 * c2, d1) self.fc2 = ComplexLinear(d1, d2)
def __init__(self): super(ComplexGenerator, self).__init__() # torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=1, # padding=0, output_padding=0, groups=1, bias=True, dilation=1) # input(N, C_in, H_in, W_in), output(N, C_out, H_out, W_out) # H_out=(H_in−1)×stride[0]−2×padding[0] + kernel_size[0] + output_padding[0] self.dense = ComplexLinear(100, 1024 * 4 * 4) self.deconv1 = ComplexConvTranspose2d( 1024, 512, 4, 2, 1) # (1024, 4, 4) --> (512, 8, 8) self.batchnorm1 = ComplexBatchNorm2d(512) self.deconv2 = ComplexConvTranspose2d( 512, 256, 4, 2, 1) # (512, 8, 8) --> (256, 16, 16) self.batchnorm2 = ComplexBatchNorm2d(256) self.deconv3 = ComplexConvTranspose2d( 256, 128, 4, 2, 1) # (256, 16, 16) --> (128, 32, 32) self.batchnorm3 = ComplexBatchNorm2d(128) self.deconv4 = ComplexConvTranspose2d( 128, 64, 4, 2, 1) # (128, 32, 32) --> (64, 64, 64) self.batchnorm4 = ComplexBatchNorm2d(64) self.deconv5 = ComplexConvTranspose2d( 64, 1, 4, 2, 1) # (64, 64, 64) --> (1, 128, 128)
def __init__(self, in_chans, out_chans): """ Args: in_chans (int): Number of channels in the input. out_chans (int): Number of channels in the output. """ super().__init__() self.in_chans = int(in_chans) self.out_chans = int(out_chans) self.layers = ComplexSequential( ComplexConvTranspose2d(in_chans, out_chans, kernel_size=2, stride=2, bias=False,setW=False), ComplexBatchNorm2d(out_chans), ComplexReLU(), )