Beispiel #1
0
 def __init__(self) :
     super(Model,self).__init__()
     
     self.c1 = ConvBlock(1,32,3)
     self.c2 = ConvBlock(32,32,3)
     self.c3 = ConvBlock(32,32,3)
     self.c4 = ConvBlock(32,32,3)
     self.fc = nn.Linear(3200,1000)
     self.fc2 = nn.Linear(1000,10)
 def __init__(self, spec_norm=True, LR=0.2):
     super(Discriminator, self).__init__()
     self.main = list()
     self.main.append(ConvBlock(4, 16, spec_norm, stride=2, LR=LR)) # 256 -> 128
     self.main.append(ConvBlock(16, 32, spec_norm, stride=2, LR=LR)) # 128 -> 64
     self.main.append(ConvBlock(32, 64, spec_norm, stride=2, LR=LR)) # 64 -> 32
     self.main.append(ConvBlock(64, 128, spec_norm, stride=2, LR=LR)) # 32 -> 16
     self.main.append(nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1))
     self.main = nn.Sequential(*self.main)
Beispiel #3
0
 def __init__(self, in_channels=3, spec_norm=False, LR=0.2):
     super(Encoder, self).__init__()
     self.layer1 = ConvBlock(in_channels, 16, spec_norm, LR=LR)  # 256
     self.layer2 = ConvBlock(16, 16, spec_norm, LR=LR)  # 256
     self.layer3 = ConvBlock(16, 32, spec_norm, stride=2, LR=LR)  # 128
     self.layer4 = ConvBlock(32, 32, spec_norm, LR=LR)  # 128
     self.layer5 = ConvBlock(32, 64, spec_norm, stride=2, LR=LR)  # 64
     self.layer6 = ConvBlock(64, 64, spec_norm, LR=LR)  # 64
     self.layer7 = ConvBlock(64, 128, spec_norm, stride=2, LR=LR)  # 32
     self.layer8 = ConvBlock(128, 128, spec_norm, LR=LR)  # 32
     self.layer9 = ConvBlock(128, 256, spec_norm, stride=2, LR=LR)  # 16
     self.layer10 = ConvBlock(256, 256, spec_norm, LR=LR)  # 16
     self.down_sampling = nn.AdaptiveAvgPool2d((16, 16))
 def __init__(self, spec_norm=False, LR=0.2):
     super(Decoder, self).__init__()
     self.layer10 = ConvBlock(992 * 2, 256, spec_norm, LR=LR) # 16->16
     self.layer9 = ConvBlock(256 + 256, 256, spec_norm, LR=LR) # 16->16
     self.layer8 = ConvBlock(256 + 128, 128, spec_norm, LR=LR, up=True) # 16->32
     self.layer7 = ConvBlock(128 + 128, 128, spec_norm, LR=LR) # 32->32
     self.layer6 = ConvBlock(128 + 64, 64, spec_norm, LR=LR, up=True) # 32-> 64
     self.layer5 = ConvBlock(64 + 64, 64, spec_norm, LR=LR) # 64 -> 64
     self.layer4 = ConvBlock(64 + 32, 32, spec_norm, LR=LR, up=True) # 64 -> 128
     self.layer3 = ConvBlock(32 + 32, 32, spec_norm, LR=LR) # 128 -> 128
     self.layer2 = ConvBlock(32 + 16, 16, spec_norm, LR=LR, up=True) # 128 -> 256
     self.layer1 = ConvBlock(16 + 16, 16, spec_norm, LR=LR) # 256 -> 256
     self.last_conv = nn.Conv2d(16, 3, kernel_size=3, stride=1, padding=1)
     self.tanh = nn.Tanh()
Beispiel #5
0
    def addNode(self,**args):
        '''
            for Conv :
            block = conv
            in_channels, out_channels,kernel_size
            
            for MaxPool
            block = 'maxpool'
            kernel_size
            
            for Concat -- not implemented
            block = 'concat'
            
            for Merge
            block = 'merge
            
            for Add 
            block = 'add'
            @returns : created node
        '''
        
        if(args['block'] == 'conv') :
            node = ConvBlock(args['in_channels'],args['out_channels'],args['kernel_size'],padding=args['padding'])
            self.nodesByType['conv'].append(node.id)
            self.name2type[node.name] = 'conv'
            
        elif(args['block'] == 'maxpool') :
            node = MaxPool(args['kernel_size'])
            self.name2type[node.name] = 'maxpool'
            
        elif(args['block'] == 'add') :
            node = Add()
            self.name2type[node.name] = 'add'
            
        elif(args['block'] == 'merge') :
            node = Merge()
            self.name2type[node.name] = 'merge'
#         elif(args['block'] == 'concat') :
#             node = Concat()
        
        try :
            self.nodes[node.id] = node
        except : 
            raise "Hashmap ERR"
            
        if len(self.nodes) == 1 :
            self.begin = node.id
        
        return node.id