Ejemplo n.º 1
0
	def __init__(self):
		super(Generator,self).__init__()

		#first layer of convolution without the residual block
		self.conv1=nn.Sequuential(nn.Conv2d(3,64,9,1,4),
								  nn.Prelu())

		#a serie of residual blocks (8)
		resBlocks=[]
		for i in range(8):
			resBlocks.append(ResBlock(64))
		self.resBlocks=nn.Sequuential(*resBlocks)

		#second convolution after the residual blocks
		self.conv2=nn.Sequential(nn.Conv2d(64,64,3,1,1),nn.BatchNorm2D(64,0.8))

		# upsampling layer with PixelShuffle like stated in the paper
		self.upsampling=nn.Sequential(nn.Conv2d(64,256,3,1,1),
									  nn.BatchNorm2D(256),
									  nn.PixelShuffle(2),
									  nn.PRelu(),
									  nn.Conv2d(64,256,3,1,1),
									  nn.BatchNorm2D(256),
									  nn.PixelShuffle(2),
									  nn.PRelu())
		self.conv3=nn.Sequential(nn.Conv2D(64,3,9,1,4),nn.Tahn())
Ejemplo n.º 2
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = nn.Conv2d(3, 6, 5)
     self.pool = nn.MaxPool2d(2, 2)
     self.BN1 = nn.BatchNorm2D(6)
     self.conv2 = nn.Conv2d(6, 16, 5)
     self.BN2 = nn.BatchNorm2D(16)
     self.fc1 = nn.Linear(16 * 5 * 5, 120)
     self.fc2 = nn.Linear(120, 84)
     self.fc3 = nn.Linear(84, 10)
Ejemplo n.º 3
0
	def __init__(self,embed_size):
		super(CNN_Encoder,self).__init__()
		# Load the pretrained ResNet-152 model and replace the top fc layer
		resnet = models.resnet(pretrained=True)
		modules = list[resnet.children()[:,-1]]
		self.resnet = nn.Sequential(*modules)
		self.linear = nn.Linear(resnet.fc.in_features,embed_size)
		self.bn = nn.BatchNorm2D(embed_size,momentum = 0.01)
Ejemplo n.º 4
0
 def __init__(self,
              sp1,
              sp2,
              func,
              num_channels,
              hidden_size=20000,
              bias=True):
     super(SparseMNIST_CNN_BN, self).__init__()
     # sp1:sp1稀疏比;也可以为k值
     # sp2:sp2稀疏比
     # func:k-WTA/ReLU激活函数
     # num_channels:每一层的通道数
     # hidden_size:隐藏层神经元数量,默认为20000
     # bias:是否求偏差,默认为True
     self.cnn = nn.Sequential(
         nn.Conv2d(1, num_channels[0], 3, padding=1, bias=bias),
         nn.BatchNorm2D(channels[0]),
         activation_function.sparse_func_dict[func](sp1),
         nn.Conv2d(num_channels[0],
                   num_channels[1],
                   3,
                   padding=1,
                   stride=2,
                   bias=bias),
         nn.BatchNorm2D(channels[1]),
         activation_function.sparse_func_dict[func](sp1),
         nn.Conv2d(num_channels[1],
                   num_channels[2],
                   3,
                   padding=1,
                   bias=bias),
         nn.BatchNorm2D(channels[2]),
         activation_function.sparse_func_dict[func](sp1),
         nn.Conv2d(num_channels[2],
                   num_channels[3],
                   3,
                   padding=1,
                   stride=2,
                   bias=bias),
         nn.BatchNorm2D(channels[3]),
         activation_function.sparse_func_dict[func](sp1),
         Flatten(),
         nn.Linear(7 * 7 * num_channels[3], hidden_size),
         activation_function.Sparsify1D(sp2),
         # nn.Linear(hidden_size, hidden_size), activation_function.Sparsify1D(sp),
         nn.Linear(hidden_size, 10))
Ejemplo n.º 5
0
 def __init__(self,
              sp1,
              sp2,
              func,
              num_channels,
              hidden_size=20000,
              bias=True):
     super(SparseMNIST_CNN_BN, self).__init__()
     self.cnn = nn.Sequential(
         nn.Conv2d(1, num_channels[0], 3, padding=1, bias=bias),
         nn.BatchNorm2D(channels[0]),
         models.sparse_func_dict[func](sp1),
         nn.Conv2d(num_channels[0],
                   num_channels[1],
                   3,
                   padding=1,
                   stride=2,
                   bias=bias),
         nn.BatchNorm2D(channels[1]),
         models.sparse_func_dict[func](sp1),
         nn.Conv2d(num_channels[1],
                   num_channels[2],
                   3,
                   padding=1,
                   bias=bias),
         nn.BatchNorm2D(channels[2]),
         models.sparse_func_dict[func](sp1),
         nn.Conv2d(num_channels[2],
                   num_channels[3],
                   3,
                   padding=1,
                   stride=2,
                   bias=bias),
         nn.BatchNorm2D(channels[3]),
         models.sparse_func_dict[func](sp1),
         Flatten(),
         nn.Linear(7 * 7 * num_channels[3], hidden_size),
         models.Sparsify1D(sp2),
         #   nn.Linear(hidden_size, hidden_size), models.Sparsify1D(sp),
         nn.Linear(hidden_size, 10))
Ejemplo n.º 6
0
    def __init__(self):
        super(simpleModel, self).__init__()

        self.conv = nn.Conv2D(in_channels=3,
                              out_channels=16,
                              kernel_size=3,
                              padding=1,
                              strides=1,
                              bias=False)

        self.batchnorm = nn.BatchNorm2D(16, affine=True)
        self.relu = nn.Activation('relu')
        self.avg_pool = AvgPool2D(8)
        self.flatten = Flatten()
        self.fc = nn.Linear(256,
                            10)  #torch에는 softmax가 없고 대신 crossetnrotpyloss를 쓴다
Ejemplo n.º 7
0
 def __init__(self, in_channels,out_channels, **kwargs):
     super().__init__(self)
     self.conv1 = nn.Conv2d(in_channels, out_channels,**kwargs)
     self.bn1 =nn.BatchNorm2D(out_channels)
     self.relu = nn.Relu()