def __init__(self, v_compress, shrink, bits, fuse_level, level): super(DecoderCell, self).__init__() # Init. self.v_compress = v_compress self.fuse_level = fuse_level print('\tDecoder fuse level: {}'.format(self.fuse_level)) # Layers. self.conv1 = nn.Conv2d(bits * (level + 1), 512, kernel_size=1, stride=1, padding=0, bias=False) self.rnn1 = ConvLSTMCell(512, 512, kernel_size=3, stride=1, padding=1, hidden_kernel_size=1, bias=False) self.rnn2 = ConvLSTMCell( (((128 + 256 // shrink * 2) if v_compress else 128) if self.fuse_level >= 3 else 128), #out1=256 512, kernel_size=3, stride=1, padding=1, hidden_kernel_size=1, bias=False) self.rnn3 = ConvLSTMCell( (((128 + 128 // shrink * 2) if v_compress else 128) if self.fuse_level >= 2 else 128), #out2=128 256, kernel_size=3, stride=1, padding=1, hidden_kernel_size=3, bias=False) self.rnn4 = ConvLSTMCell( (64 + 64 // shrink * 2) if v_compress else 64, #out3=64 128, kernel_size=3, stride=1, padding=1, hidden_kernel_size=3, bias=False) self.conv2 = nn.Conv2d(32, 3, kernel_size=1, stride=1, padding=0, bias=False)
def __init__(self): super(EncoderCell, self).__init__() self.conv = nn.Conv2d( 3, 64, kernel_size=3, stride=2, padding=1, bias=False) self.rnn1 = ConvLSTMCell( 64, 256, kernel_size=3, stride=2, padding=1, hidden_kernel_size=1, bias=False) self.rnn2 = ConvLSTMCell( 256, 512, kernel_size=3, stride=2, padding=1, hidden_kernel_size=1, bias=False) self.rnn3 = ConvLSTMCell( 512, 512, kernel_size=3, stride=2, padding=1, hidden_kernel_size=1, bias=False)
def __init__(self, v_compress, stack, fuse_encoder, fuse_level): super(EncoderCell, self).__init__() # Init. self.v_compress = v_compress self.fuse_encoder = fuse_encoder self.fuse_level = fuse_level if fuse_encoder: print('\tEncoder fuse level: {}'.format(self.fuse_level)) # Layers. self.conv = nn.Conv2d(9 if stack else 3, 64, kernel_size=3, stride=2, padding=1, bias=False) self.rnn1 = ConvLSTMCell( 64, #128 if fuse_encoder and v_compress else 64, 128, kernel_size=3, stride=2, padding=1, hidden_kernel_size=1, bias=False) self.rnn2 = ConvLSTMCell( 128, #((384 if fuse_encoder and v_compress else 256) #if self.fuse_level >= 2 else 256), 128, kernel_size=3, stride=2, padding=1, hidden_kernel_size=1, bias=False) self.rnn3 = ConvLSTMCell( 128, #((384 if fuse_encoder and v_compress else 512) #if self.fuse_level >= 3 else 256), 128, kernel_size=4, stride=2, padding=1, hidden_kernel_size=1, bias=False)
def __init__(self, v_compress, stack, fuse_encoder, fuse_level): super(EncoderCell, self).__init__() # Init. self.v_compress = v_compress self.fuse_encoder = fuse_encoder self.fuse_level = fuse_level if fuse_encoder: print('\tEncoder fuse level: {}'.format(self.fuse_level)) # Layers. self.conv = nn.Conv2d(9 if stack else 3, 64, kernel_size=3, stride=2, padding=1, bias=False) for param in self.conv.parameters(): param.requires_grad = False self.rnn1 = ConvLSTMCell(128 if fuse_encoder and v_compress else 64, 256, kernel_size=3, stride=2, padding=1, hidden_kernel_size=1, bias=False) # print(fuse_encoder,"fuse_encoder",v_compress,"v_compress") self.rnn2 = ConvLSTMCell(((384 if fuse_encoder and v_compress else 256) if self.fuse_level >= 2 else 256), 512, kernel_size=3, stride=2, padding=1, hidden_kernel_size=1, bias=False) self.rnn3 = ConvLSTMCell(((768 if fuse_encoder and v_compress else 512) if self.fuse_level >= 3 else 512), 512, kernel_size=3, stride=2, padding=1, hidden_kernel_size=1, bias=False)
def __init__(self, bottleneck, output_channels): super(DecoderCell, self).__init__() #self.conv1= nn.Conv2d(16, 32, kernel_size=1, stride=1, padding=0, bias=False) self.conv1 = nn.Conv2d(bottleneck, 512, kernel_size=1, stride=1, padding=0, bias=False) self.rnn1 = ConvLSTMCell(512, 512, kernel_size=3, stride=1, padding=1, hidden_kernel_size=1, bias=False) self.rnn2 = ConvLSTMCell(128, 512, kernel_size=3, stride=1, padding=1, hidden_kernel_size=1, bias=False) self.rnn3 = ConvLSTMCell(128, 256, kernel_size=3, stride=1, padding=1, hidden_kernel_size=3, bias=False) self.rnn4 = ConvLSTMCell(64, 128, kernel_size=3, stride=1, padding=1, hidden_kernel_size=3, bias=False) self.conv2 = nn.Conv2d(32, output_channels, kernel_size=1, stride=1, padding=0, bias=False)
def __init__(self): super(DecoderCell, self).__init__() self.conv1 = nn.Conv2d(32, 512, kernel_size=1, stride=1, padding=0, bias=False) self.rnn1 = ConvLSTMCell(512, 512, kernel_size=3, stride=1, padding=1, hidden_kernel_size=1, bias=False) self.rnn2 = ConvLSTMCell(128, 512, kernel_size=3, stride=1, padding=1, hidden_kernel_size=1, bias=False) self.rnn3 = ConvLSTMCell(128, 256, kernel_size=3, stride=1, padding=1, hidden_kernel_size=3, bias=False) self.rnn4 = ConvLSTMCell(64, 128, kernel_size=3, stride=1, padding=1, hidden_kernel_size=3, bias=False) self.conv2 = nn.Conv2d(32, 3, kernel_size=1, stride=1, padding=0, bias=False)
def __init__(self): super(DecoderCell, self).__init__() self.conv1 = nn.Conv2d( 32, 512, kernel_size=1, stride=1, padding=0, bias=False) # changed input channels from 32 to 16 to accomodate the above change self.rnn1 = ConvLSTMCell( 512, 512, kernel_size=3, stride=1, padding=1, hidden_kernel_size=1, bias=False) self.rnn2 = ConvLSTMCell( 128, 512, kernel_size=3, stride=1, padding=1, hidden_kernel_size=1, bias=False) self.rnn3 = ConvLSTMCell( 128, 256, kernel_size=3, stride=1, padding=1, hidden_kernel_size=3, bias=False) self.rnn4 = ConvLSTMCell( 64, 128, kernel_size=3, stride=1, padding=1, hidden_kernel_size=3, bias=False) self.conv2 = nn.Conv2d( 32, 3, kernel_size=1, stride=1, padding=0, bias=False)
def __init__(self, v_compress, shrink, bits, fuse_level): super(DecoderCell2, self).__init__() # Init. self.v_compress = v_compress self.fuse_level = fuse_level print('\tDecoder fuse level: {}'.format(self.fuse_level)) # Layers. #self.conv1 = nn.Conv2d( # bits*10, 512, kernel_size=1, stride=1, padding=0, bias=False) self.conv1 = nn.Conv2d(bits*10, 64*10, groups=10, kernel_size=1, stride=1, padding=0, bias=False) # groups=10 self.conv2 = nn.Conv2d(64*10, 128*10, groups=10, kernel_size=1, stride=1, padding=0, bias=False) self.conv3 = nn.Conv2d(128*10, 512*10, groups=10, kernel_size=1, stride=1, padding=0, bias=False) #self.bn1 = nn.BatchNorm2d(64*10) #self.bn2 = nn.BatchNorm2d(128*10) #self.bn3 = nn.BatchNorm2d(512*10) self.rnn1 = ConvLSTMCell( 512, 512, kernel_size=3, stride=1, padding=1, hidden_kernel_size=1, bias=False) self.rnn2 = ConvLSTMCell( (((128 + 256 // shrink * 2) if v_compress else 128) if self.fuse_level >= 3 else 128), #out1=256 512, kernel_size=3, stride=1, padding=1, hidden_kernel_size=1, bias=False) self.rnn3 = ConvLSTMCell( (((128 + 128//shrink*2) if v_compress else 128) if self.fuse_level >= 2 else 128), #out2=128 256, kernel_size=3, stride=1, padding=1, hidden_kernel_size=3, bias=False) self.rnn4 = ConvLSTMCell( (64 + 64//shrink*2) if v_compress else 64, #out3=64 128, kernel_size=3, stride=1, padding=1, hidden_kernel_size=3, bias=False) self.conv_end = nn.Conv2d( 32, 3, kernel_size=1, stride=1, padding=0, bias=False)