def __init__(self, nout, insize, nch, gpu_ids, opt=None): super(DecD, self).__init__() def opt_default(): 1 opt_default.noise_std = 0 opt_default.nclasses = 1 self.opt = init_opts(opt, opt_default) self.gpu_ids = gpu_ids self.fcsize = insize/64 self.noise = torch.zeros(0) self.main = nn.Sequential( nn.Conv2d(nch, 64, ksize, dstep, 1), # nn.BatchNorm2d(64), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, 128, ksize, dstep, 1), nn.BatchNorm2d(128), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(128, 256, ksize, dstep, 1), nn.BatchNorm2d(256), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(256, 512, ksize, dstep, 1), nn.BatchNorm2d(512), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(512, 512, ksize, dstep, 1), nn.BatchNorm2d(512), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(512, 512, ksize, dstep, 1), nn.BatchNorm2d(512), nn.LeakyReLU(0.2, inplace=True) ) self.fc = nn.Linear(512*int(self.fcsize**2), nout) if nout == 1: self.nlEnd = nn.Sigmoid() else: self.nlEnd = nn.LogSoftmax()
def __init__(self, nout, nch, gpu_ids, opt=None): super(DecD, self).__init__() def opt_default(): 1 opt_default.noise_std = 0 opt_default.nclasses = 1 self.opt = init_opts(opt, opt_default) self.gpu_ids = gpu_ids self.fcsize = 2 self.noise = torch.zeros(0) self.main = nn.Sequential( nn.Conv3d(nch, 64, ksize, dstep, 1), # nn.BatchNorm3d(64), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d(64, 128, ksize, dstep, 1), nn.BatchNorm3d(128), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d(128, 256, ksize, dstep, 1), nn.BatchNorm3d(256), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d(256, 512, ksize, dstep, 1), nn.BatchNorm3d(512), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d(512, 1024, ksize, dstep, 1), nn.BatchNorm3d(1024), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d(1024, 1024, ksize, dstep, 1), nn.BatchNorm3d(1024), nn.LeakyReLU(0.2, inplace=True) ) self.fc = nn.Linear(1024*int(self.fcsize), nout)