def __init__(self): super().__init__() self.num_layer = 6 node_dim = 64 edge_dim = 64 self.edge_dim = 64 self.loss = WeightedNormalizedAbsoluteError() with self.init_scope(): self.gn1 = ElementLinear(node_dim) for layer in range(self.num_layer): self.add_link('eup{}'.format(layer), EdgeUpdate(edge_dim)) self.add_link('int{}'.format(layer), InteractionNetwork(node_dim)) self.gn2 = ElementLinear(8) self.n1 = L.Linear(256) self.n2 = L.Linear(5) self.conv_3d_1 = L.Convolution3D(1, 4, ksize=3, pad=1, stride=2) self.conv_3d_2 = L.Convolution3D(4, 8, ksize=3, pad=1) self.conv_3d_3 = L.Convolution3D(8, 16, ksize=3, pad=1) self.conv_3d_4 = L.Convolution3D(16, 32, ksize=3, pad=1) self.bn1 = L.BatchNormalization(4) self.bn2 = L.BatchNormalization(8) self.bn3 = L.BatchNormalization(16) self.bn4 = L.BatchNormalization(32)
def __init__(self, n_point, with_count): self._n_point = n_point self._with_count = with_count super().__init__() with self.init_scope(): C = [None, 32, 64, 128, 256, 512] self.conv1_1 = L.Convolution3D(C[0], C[1], 3, 1, pad=1) # 32 # self.conv1_2 = L.Convolution3D(C[1], C[1], 1, 1, pad=0) self.conv2_1 = L.Convolution3D(C[1], C[2], 4, 2, pad=1) # 32 -> 16 # self.conv2_2 = L.Convolution3D(C[2], C[2], 1, 1, pad=0) self.conv3_1 = L.Convolution3D(C[2], C[3], 4, 2, pad=1) # 16 -> 8 # self.conv3_2 = L.Convolution3D(C[3], C[3], 1, 1, pad=0) self.conv4_1 = L.Convolution3D(C[3], C[4], 4, 2, pad=1) # 8 -> 4 # self.conv4_2 = L.Convolution3D(C[4], C[4], 1, 1, pad=0) self.conv5_1 = L.Convolution3D(C[4], C[5], 4, 1, pad=0) # 4 -> 1
def __init__( self, base_channels=256, norm=GroupNormalization, bn_first=True, grid_size=32, input_shape=(160, 160, 128), upper=True, # whether to predict the upper half in the CPC task cpc_pattern='updown', comm=None): x1, x2, x3 = input_shape ssize = int(grid_size * 0.5) self.gl0 = int(x1 / ssize - 1) self.gl1 = int(x2 / ssize - 1) self.gl2 = int(x3 / ssize - 1) self.cut_l = int(self.gl2 / 2) self.base_channels = base_channels self.upper = upper self.cpc_pattern = cpc_pattern super(CPCPredictor, self).__init__() with self.init_scope(): for i in range(8): layer = ResBlock(base_channels, norm, bn_first, comm) setattr(self, 'pred_block{}'.format(i), layer) self.pred1 = L.Convolution3D(None, base_channels, ksize=(1, 1, 1), stride=1, pad=0)
def init_layers(self): n_step = int(math.log2(self.out_ch / self.in_ch)) i0 = self.in_ch i1 = self.in_ch // 2 with self.init_scope(): setattr(self, 'd', CBR( 3, i0, i1, ksize=3, stride=1, activation=F.relu, sample='up', bn=self.use_batch_norm, dropout=self.dropout, )) i0 = i1 for i in range(self.n_hidden_layers): setattr(self, 'c%02d' % i, CBR( 3, i0, i1, ksize=3, stride=1, activation=F.relu, sample='down', bn = self.use_batch_norm, dropout=self.dropout, )) i0 = i1 i1 = max(i1 // 2, self.out_ch) setattr( self, 'c%02d' % self.n_hidden_layers, L.Convolution3D(i0, self.out_ch, 3, 1, pad=1) )
def __init__(self, comm=None): super(Attention, self).__init__() with self.init_scope(): self.c = L.Convolution3D(None, out_channels=1, ksize=1, stride=1, pad=0)
def __init__(self, input_shape): self.bottom_shape = tuple(i // 16 for i in input_shape) bottom_size = 16 * np.prod(list(self.bottom_shape)) self.output_shape = tuple(i // 8 for i in input_shape) super(VU, self).__init__() with self.init_scope(): self.dense = L.Linear(None, bottom_size) self.conv1 = L.Convolution3D(None, 256, 1, 1, 0)
def __init__(self, ch=128): super(Discriminator, self).__init__() self.ch = ch self.up_sample_dim = ((4, 4, 2)) self.out_size = self.xp.prod(self.up_sample_dim) with self.init_scope(): w = chainer.initializers.HeNormal(0.02) self.c1 = L.Convolution3D(3, ch // 8, ksize=4, stride=2, pad=1, initialW=w) self.c2 = L.Convolution3D(ch // 8, ch // 4, ksize=4, stride=2, pad=1, initialW=w) self.c3 = L.Convolution3D(ch // 4, ch // 2, ksize=4, stride=2, pad=1, initialW=w) self.c4 = L.Convolution3D(ch // 2, ch, ksize=4, stride=2, pad=1, initialW=w) self.c5 = L.Convolution3D(ch, 1, ksize=3, stride=1, pad=1, initialW=w) self.l1 = L.Linear(self.out_size, 1, initialW=w) self.ln2 = L.LayerNormalization( ) # all have layer norm except first and last layer self.ln3 = L.LayerNormalization() self.ln4 = L.LayerNormalization()
def __init__(self, channels, norm=GroupNormalization, down_sample=True, n_blocks=1, bn_first=True, comm=None): self.down_sample = down_sample self.n_blocks = n_blocks super(DownBlock, self).__init__() with self.init_scope(): if down_sample: self.d = L.Convolution3D(None, channels, 3, 2, 1) else: self.d = L.Convolution3D(None, channels, 3, 1, 1) for i in range(n_blocks): layer = ResBlock(channels, norm, bn_first, comm) setattr(self, 'block{}'.format(i), layer)
def __init__(self, channels, norm=GroupNormalization, down_sampling=False, comm=None): super(CNR, self).__init__() with self.init_scope(): if down_sampling: self.c = L.Convolution3D(None, channels, 3, 2, 1) else: self.c = L.Convolution3D(None, channels, 3, 1, 1) if norm.__name__ == 'MultiNodeBatchNormalization': self.n = norm(channels, comm, eps=1e-5) elif norm.__name__ == 'BatchNormalization': self.n = norm(channels, eps=1e-5) elif norm.__name__ == 'GroupNormalization': self.n = norm(groups=8, size=channels) else: self.n = norm(channels)
def __init__(self, in_channels, out_channels): super(SelfAttention3D, self).__init__() initializer = chainer.initializers.GlorotUniform() with self.init_scope(): self.query_conv = L.Convolution3D(in_channels=in_channels, out_channels=in_channels // 8, ksize=1, pad=0, initialW=initializer) self.key_conv = L.Convolution3D(in_channels=in_channels, out_channels=in_channels // 8, ksize=1, pad=0, initialW=initializer) self.value_conv = L.Convolution3D(in_channels=in_channels, out_channels=out_channels, ksize=1, pad=0, initialW=initializer) self.gamma = chainer.Parameter( initializer=np.zeros(1, dtype=np.float32))
def __init__(self, channels, norm=GroupNormalization, bn_first=True, mode='sum', comm=None): self.mode = mode concat_mode = True if mode == 'concat' else False super(UpBlock, self).__init__() with self.init_scope(): self.c1 = L.Convolution3D(None, channels, 1, 1, 0) self.rb = ResBlock(channels, norm, bn_first, comm, concat_mode)
def __init__(self, groups, dropout_rate): super(ChannelwiseConv2d, self).__init__() self.use_dropout = (dropout_rate > 0.0) with self.init_scope(): self.conv = L.Convolution3D(in_channels=1, out_channels=groups, ksize=(4 * groups, 1, 1), stride=(groups, 1, 1), pad=(2 * groups - 1, 0, 0), nobias=True) if self.use_dropout: self.dropout = partial(F.dropout, ratio=dropout_rate)
def __init__(self, use_bn, k): self.use_bn = use_bn self.k = k super().__init__() with self.init_scope(): if CONFIG.conv_type == "2D": self.conv5_1 = L.Convolution2D(None, 512, ksize=3, stride=1, pad=1) elif CONFIG.conv_type in ["3D", "k"]: self.conv5_1 = L.Convolution3D(None, 512, ksize=3, stride=1, pad=1) self.bn5_1 = L.BatchNormalization(512) if CONFIG.conv_type == "2D": self.conv5_2 = L.Convolution2D(None, 512, ksize=3, stride=1, pad=1) elif CONFIG.conv_type in ["3D", "k"]: self.conv5_2 = L.Convolution3D(None, 512, ksize=3, stride=1, pad=1) self.bn5_2 = L.BatchNormalization(512) self.fc1 = L.Linear(512, 512) self.fc2 = L.Linear(512, 512) self.fc3 = L.Linear(512, 128)
def __init__(self, base_channels, out_channels, norm=GroupNormalization, bn_first=True, mode='sum', comm=None): super(BoundaryStream, self).__init__() with self.init_scope(): self.c = L.Convolution3D(None, 2**2 * base_channels, 1, 1, 0) self.rb = ResBlock(2**2 * base_channels, norm, bn_first, comm) self.att = Attention(comm) for i in range(3): layer = UpBlock(2**i * base_channels, norm, bn_first, mode, comm) setattr(self, 'dec_block{}'.format(i), layer) att = Attention(comm) setattr(self, 'att_block{}'.format(i), att) self.dec_end = L.Convolution3D(None, out_channels, ksize=1, stride=1, pad=0)
def __init__(self, base_channels, out_channels, norm=GroupNormalization, bn_first=True, mode='sum', comm=None): super(Decoder, self).__init__() with self.init_scope(): for i in range(3): layer = UpBlock(2**i * base_channels, norm, bn_first, mode, comm) setattr(self, 'dec_block{}'.format(i), layer) self.dec_end = L.Convolution3D(None, out_channels, 1, 1, 0)
def __init__(self): super().__init__() with self.init_scope(): self.Conv1 = L.Convolution3D(in_channels=4, out_channels=16, ksize=3, stride=1) self.Conv2 = L.Convolution3D(in_channels=16, out_channels=32, ksize=4, stride=1) self.Conv3 = L.Convolution3D(in_channels=32, out_channels=64, ksize=4, stride=2) self.Conv4 = L.Convolution3D(in_channels=64, out_channels=128, ksize=3, stride=1) self.Conv5 = L.Convolution3D(in_channels=128, out_channels=256, ksize=2, stride=1) self.Conv6 = L.Convolution3D(in_channels=256, out_channels=256, ksize=2, stride=1) self.Conv7 = L.Convolution3D(in_channels=256, out_channels=128, ksize=2, stride=1) self.FC1 = L.Linear(None, 2048) self.FC2 = L.Linear(None, 256) self.Output = L.Linear(None, 4)
def __init__(self, in_channels, base_channels, norm=GroupNormalization, bn_first=True, input_shape=(160, 192, 128), comm=None): super(VAE, self).__init__() with self.init_scope(): self.vu = VU(input_shape) for i in range(3): layer = UpBlock(2**i * base_channels, norm, bn_first, None, comm) setattr(self, 'vae_block{}'.format(i), layer) self.vae_end = L.Convolution3D(None, in_channels, 1, 1, 0)
def __init__(self,base=64): w=initializers.Normal(0.01) super(Generator,self).__init__() with self.init_scope(): self.l0 = L.Linear(128, 512*4*4, initialW=w) self.cbr3d_0=CBR3D(base*8,base*4,up=True) self.cbr3d_1=CBR3D(base*4,base*2,up=True) self.cbr3d_2=CBR3D(base*2,base,up=True) self.cbr3d_3=CBR3D(base,base,up=True) self.cbr3d_4 = L.Convolution3D(base,3,3,1,1,initialW=w) self.c_m=L.Convolution3D(base,1,3,1,1,initialW=w) self.cbr2d_0=CBR2D(3,base,down=True) self.cbr2d_1=CBR2D(base,base*2,down=True) self.cbr2d_2=CBR2D(base*2,base*4,down=True) self.cbr2d_3=CBR2D(base*4,base*8,down=True) self.res0 = ResBlock_2D(base*8, base*8) self.res1 = ResBlock_2D(base*8, base*8) self.cbr2d_4=CBR2D(base*8,base*4,up=True) self.cbr2d_5=CBR2D(base*4,base*2,up=True) self.cbr2d_6=CBR2D(base*2,base,up=True) self.cbr2d_7=CBR2D(base,base,up=True) self.cbr2d_8=L.Convolution2D(base,3,3,1,1,initialW=w)
def __init__(self, base_channels=32, norm=GroupNormalization, bn_first=True, ndim_latent=128, comm=None): super(Encoder, self).__init__() with self.init_scope(): self.enc_initconv = L.Convolution3D(None, base_channels, 3, 1, 1) self.enc_block0 = DownBlock(base_channels, norm, False, 1, bn_first, comm) self.enc_block1 = DownBlock(2 * base_channels, norm, True, 2, bn_first, comm) self.enc_block2 = DownBlock(4 * base_channels, norm, True, 2, bn_first, comm) self.enc_block3 = DownBlock(8 * base_channels, norm, True, 4, bn_first, comm)
def __init__(self, in_ch, out_ch, ksize=1, stride=1, pad=0, nobias=False, gain=np.sqrt(2), lrmul=1): w = chainer.initializers.Normal(1.0 / lrmul) # equalized learning rate self.inv_c = gain * np.sqrt(1.0 / (in_ch * ksize**2)) self.inv_c = self.inv_c * lrmul super(EqualizedConv3d, self).__init__() with self.init_scope(): self.c = L.Convolution3D(in_ch, out_ch, ksize, stride, pad, initialW=w, nobias=nobias)
k = k.reshape((1, 1, 3, 3, 3)) k = k.transpose((0, 1, 3, 4, 2)) default_config = { 'mn': True, 'gpu_start_id': 0, } config = copy.copy(default_config) comm, is_master, device = _setup_communicator(config, gpu_start_id=0) get_device_from_id(device).use() conv3d = L.Convolution3D(in_channels=1, out_channels=1, ksize=3, stride=1, pad=1, initialW=k) conv3d.to_gpu() with chainer.no_backprop_mode(), chainer.using_config('train', False): for num in range(1, num_range + 1): if dataset == 'nested_brats': num = "{0:0=3d}".format(num) path = label_path + 'BRATS_' + str(num) + '.nii.gz' try: nii_img = nib.load(path) affine = nii_img.affine img = nii_img.get_data() shape = img.shape
def __init__(self, in_channels=1, hidden_channels=16, out_channels=1, n_latent=3): initializer = chainer.initializers.HeNormal() self.n_latent = n_latent super(Decoder, self).__init__() with self.init_scope(): self.hidden = L.Linear(in_size=None, out_size=(4 * 4 * 4), initialW=initializer) # S4 self.dc6 = L.Deconvolution3D(in_channels=1, out_channels=hidden_channels * 4, ksize=4, stride=2, initialW=initializer) self.dc5 = L.Convolution3D(in_channels=hidden_channels * 4, out_channels=hidden_channels * 4, ksize=3, stride=1, pad=0, initialW=initializer) self.bnd5 = L.BatchNormalization(hidden_channels * 4) # S3 self.dc4 = L.Deconvolution3D(in_channels=hidden_channels * 4, out_channels=hidden_channels * 2, ksize=4, stride=2, initialW=initializer) self.dc3 = L.Convolution3D(in_channels=hidden_channels * 2, out_channels=hidden_channels * 2, ksize=3, stride=1, pad=0, initialW=initializer) self.bnd3 = L.BatchNormalization(hidden_channels * 2) # S2 self.dc2 = L.Deconvolution3D(in_channels=hidden_channels * 2, out_channels=hidden_channels, ksize=4, stride=2, initialW=initializer) self.dc1 = L.Convolution3D(in_channels=hidden_channels, out_channels=hidden_channels, ksize=3, stride=1, pad=0, initialW=initializer) self.bnd1 = L.BatchNormalization(hidden_channels) # S1 self.dc0 = L.Deconvolution3D(in_channels=hidden_channels, out_channels=hidden_channels, ksize=4, stride=2, initialW=initializer) self.dc_1 = L.Convolution3D(in_channels=hidden_channels, out_channels=out_channels, ksize=3, stride=1, pad=0, initialW=initializer)
def __init__(self, n_actions): w = chainer.initializers.HeNormal() wI = np.zeros((1, 1, 33, 33, 33)) wI[:, :, 16, 16, 16] = 1 net = Network_trained(n_actions) super(Network, self).__init__( conv1=L.Convolution3D(1, 16, 3, stride=1, pad=1, nobias=False, initialW=net.conv1.W.data, initial_bias=net.conv1.b.data), diconv2=L.Convolution3D(16, 16, 3, dilate=2, pad=2, initialW=net.diconv2.W.data, initial_bias=net.diconv2.b.data), diconv3=L.Convolution3D(16, 16, 3, dilate=3, stride=1, pad=3, initialW=net.diconv3.W.data, initial_bias=net.diconv3.b.data), diconv4=L.Convolution3D(16, 16, 3, dilate=4, stride=1, pad=4, initialW=net.diconv4.W.data, initial_bias=net.diconv4.b.data), diconv5_pi=L.Convolution3D(16, 16, 3, dilate=3, stride=1, pad=3, initialW=net.diconv5_pi.W.data, initial_bias=net.diconv5_pi.b.data), diconv6_pi=L.Convolution3D(16, 16, 3, dilate=2, stride=1, pad=2, initialW=net.diconv6_pi.W.data, initial_bias=net.diconv6_pi.b.data), conv7_Wz=L.Convolution3D(16, 16, 3, stride=1, pad=1, nobias=True, initialW=net.conv7_Wz.W.data), conv7_Uz=L.Convolution3D(16, 16, 3, stride=1, pad=1, nobias=True, initialW=net.conv7_Uz.W.data), conv7_Wr=L.Convolution3D(16, 16, 3, stride=1, pad=1, nobias=True, initialW=net.conv7_Wr.W.data), conv7_Ur=L.Convolution3D(16, 16, 3, stride=1, pad=1, nobias=True, initialW=net.conv7_Ur.W.data), conv7_W=L.Convolution3D(16, 16, 3, stride=1, pad=1, nobias=True, initialW=net.conv7_W.W.data), conv7_U=L.Convolution3D(16, 16, 3, stride=1, pad=1, nobias=True, initialW=net.conv7_U.W.data), conv8_pi=chainerrl.policies.SoftmaxPolicy( L.Convolution3D(16, n_actions, 3, stride=1, pad=1, nobias=False, initialW=net.conv8_pi.model.W.data, initial_bias=net.conv8_pi.model.b.data)), diconv5_V=L.Convolution3D(16, 16, 3, dilate=3, stride=1, pad=3, initialW=net.diconv5_V.W.data, initial_bias=net.diconv5_V.b.data), diconv6_V=L.Convolution3D(16, 16, 3, dilate=2, stride=1, pad=2, initialW=net.diconv6_V.W.data, initial_bias=net.diconv6_V.b.data), conv7_V=L.Convolution3D(16, 1, 3, stride=1, pad=1, nobias=False, initialW=net.conv7_V.W.data, initial_bias=net.conv7_V.b.data), conv_R=L.Convolution3D(1, 1, 33, stride=1, pad=16, nobias=True, initialW=wI), ) self.train = True
def __init__(self, in_channels=1, hidden_channels=16, out_channels=1, n_latent=3): self.n_latent = n_latent initializer = chainer.initializers.HeNormal() super(Encoder, self).__init__() with self.init_scope(): self.ec0 = L.Convolution3D(in_channels=in_channels, out_channels=hidden_channels, ksize=4, stride=2, pad=1, initialW=initializer) self.bne0 = L.BatchNormalization(hidden_channels) self.ec1 = L.Convolution3D(in_channels=hidden_channels, out_channels=hidden_channels, ksize=3, stride=1, pad=1, initialW=initializer) self.bne1 = L.BatchNormalization(hidden_channels) self.ec2 = L.Convolution3D(in_channels=hidden_channels, out_channels=hidden_channels * 2, ksize=4, stride=2, pad=1, initialW=initializer) self.bne2 = L.BatchNormalization(hidden_channels * 2) self.ec3 = L.Convolution3D(in_channels=hidden_channels * 2, out_channels=hidden_channels * 2, ksize=3, stride=1, pad=1, initialW=initializer) self.bne3 = L.BatchNormalization(hidden_channels * 2) self.ec4 = L.Convolution3D(in_channels=hidden_channels * 2, out_channels=hidden_channels * 4, ksize=4, stride=2, pad=1, initialW=initializer) self.bne4 = L.BatchNormalization(hidden_channels * 4) self.ec5 = L.Convolution3D(in_channels=hidden_channels * 4, out_channels=hidden_channels * 4, ksize=3, stride=1, pad=1, initialW=initializer) self.bne5 = L.BatchNormalization(hidden_channels * 4) self.ec6 = L.Convolution3D(in_channels=hidden_channels * 4, out_channels=1, ksize=3, stride=1, pad=1, initialW=initializer) self.bne6 = L.BatchNormalization(1) self.mu = L.Linear(in_size=None, out_size=n_latent, initialW=initializer) self.ln_var = L.Linear(in_size=None, out_size=n_latent, initialW=initializer)
def __init__(self, n_actions): super(Network_trained, self).__init__( conv1=L.Convolution3D(1, 16, 3, stride=1, pad=1, nobias=False, initialW=None, initial_bias=None), diconv2=L.Convolution3D(16, 16, 3, dilate=2, stride=1, pad=2), diconv3=L.Convolution3D(16, 16, 3, dilate=3, stride=1, pad=3), diconv4=L.Convolution3D(16, 16, 3, dilate=4, stride=1, pad=4), diconv5_pi=L.Convolution3D(16, 16, 3, dilate=3, stride=1, pad=3), diconv6_pi=L.Convolution3D(16, 16, 3, dilate=2, stride=1, pad=2), conv7_Wz=L.Convolution3D(16, 16, 3, stride=1, pad=1, nobias=True, initialW=None), conv7_Uz=L.Convolution3D(16, 16, 3, stride=1, pad=1, nobias=True, initialW=None), conv7_Wr=L.Convolution3D(16, 16, 3, stride=1, pad=1, nobias=True, initialW=None), conv7_Ur=L.Convolution3D(16, 16, 3, stride=1, pad=1, nobias=True, initialW=None), conv7_W=L.Convolution3D(16, 16, 3, stride=1, pad=1, nobias=True, initialW=None), conv7_U=L.Convolution3D(16, 16, 3, stride=1, pad=1, nobias=True, initialW=None), conv8_pi=chainerrl.policies.SoftmaxPolicy( L.Convolution3D(16, n_actions, 3, stride=1, pad=1, nobias=False, initialW=None)), diconv5_V=L.Convolution3D(16, 16, 3, dilate=3, stride=1, pad=3), diconv6_V=L.Convolution3D(16, 16, 3, dilate=2, stride=1, pad=2), conv7_V=L.Convolution3D(16, 1, 3, stride=1, pad=1, nobias=False, initialW=None, initial_bias=None), )
def __init__( self, *, n_fg_class, pretrained_resnet18=False, with_occupancy=False, loss=None, loss_scale=None, ): super().__init__() self._n_fg_class = n_fg_class self._with_occupancy = with_occupancy if loss is None: loss = "add/add_s" assert loss in [ "add", "add/add_s", "add+occupancy", "add/add_s+occupancy", ] self._loss = loss if loss_scale is None: loss_scale = { "occupancy": 1.0, } self._loss_scale = loss_scale with self.init_scope(): # extractor if pretrained_resnet18: self.resnet_extractor = morefusion.models.ResNet18Extractor() else: self.resnet_extractor = ( morefusion.models.dense_fusion.ResNet18() ) self.pspnet_extractor = ( morefusion.models.dense_fusion.PSPNetExtractor() ) # conv1 self.conv1_rgb = L.Convolution1D(32, 64, 1) self.conv1_pcd = L.Convolution1D(3, 8, 1) # conv2 self.conv2_rgb = L.Convolution1D(64, 128, 1) self.conv2_pcd = L.Convolution1D(8, 16, 1) if self._with_occupancy: self.conv1_occ = L.Convolution3D(1, 8, 3, 1, pad=1) self.conv2_occ = L.Convolution3D(8, 16, 3, 1, pad=2, dilate=2) # conv3, conv4 self.conv3 = L.Convolution3D(None, 256, 4, 2, pad=1) self.conv4 = L.Convolution3D(256, 512, 4, 2, pad=1) # conv1 self.conv1_rot = L.Convolution1D(None, 640, 1) self.conv1_trans = L.Convolution1D(None, 640, 1) self.conv1_conf = L.Convolution1D(None, 640, 1) # conv2 self.conv2_rot = L.Convolution1D(640, 256, 1) self.conv2_trans = L.Convolution1D(640, 256, 1) self.conv2_conf = L.Convolution1D(640, 256, 1) # conv3 self.conv3_rot = L.Convolution1D(256, 128, 1) self.conv3_trans = L.Convolution1D(256, 128, 1) self.conv3_conf = L.Convolution1D(256, 128, 1) # conv4 self.conv4_rot = L.Convolution1D(128, n_fg_class * 4, 1) self.conv4_trans = L.Convolution1D(128, n_fg_class * 3, 1) self.conv4_conf = L.Convolution1D(128, n_fg_class, 1)
def __init__(self, input_chn=4, n_actions=6, C=16, pretrained=True): dil = [1, 2, 3, 4, 5] # dilate rates init_w = chainer.initializers.HeNormal() # net = RNet_trained(input_chn=2, n_actions=n_actions) # rnet_trained_model_path = os.path.join(result_path, init_model_name,'%s_%d.pth' % (init_model_name, init_model_epoch)) # rnet_trained_model_path = os.path.join(result_path, init_model_name, # '%s_%d' % (init_model_name, init_model_epoch), 'model.npz') # chainer.serializers.load_npz(rnet_trained_model_path, net) # d1 = list(net.block1.children())[0].W.data # s = d1.shape # d2 = np.random.randn(*s) / np.sqrt(2.) # d2 = d1[:, 1:2, ...] # d = np.concatenate((d1, d2, d2), axis=1) super(RNet, self).__init__( block11=Sequential( L.Convolution3D(input_chn, C, ksize=3, stride=1, pad=dil[0], dilate=dil[0], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[0], dil[0], 0), dilate=dil[0], nobias=True, initialW=init_w), F.relu), comp1=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block2=Sequential( L.Convolution3D(C, C, ksize=3, stride=1, pad=dil[1], dilate=dil[1], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[1], dil[1], 0), dilate=dil[1], nobias=True, initialW=init_w), F.relu), comp2=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block3=Sequential( L.Convolution3D(C, C, ksize=3, stride=1, pad=dil[2], dilate=dil[2], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[2], dil[2], 0), dilate=dil[2], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[2], dil[2], 0), dilate=dil[2], nobias=True, initialW=init_w), F.relu), comp3=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block4_pi=Sequential( L.Convolution3D(C, C, ksize=3, stride=1, pad=dil[3], dilate=dil[3], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[3], dil[3], 0), dilate=dil[3], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[3], dil[3], 0), dilate=dil[3], nobias=True, initialW=init_w), F.relu), comp4_pi=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block4_v=Sequential( L.Convolution3D(C, C, ksize=3, stride=1, pad=dil[3], dilate=dil[3], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[3], dil[3], 0), dilate=dil[3], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[3], dil[3], 0), dilate=dil[3], nobias=True, initialW=init_w), F.relu), comp4_v=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block5_pi=Sequential( L.Convolution3D(C, C, ksize=3, stride=1, pad=dil[4], dilate=dil[4], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[4], dil[4], 0), dilate=dil[4], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[4], dil[4], 0), dilate=dil[4], nobias=True, initialW=init_w), F.relu), comp5_pi=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block5_v=Sequential( L.Convolution3D(C, C, ksize=3, stride=1, pad=dil[4], dilate=dil[4], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[4], dil[4], 0), dilate=dil[4], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[4], dil[4], 0), dilate=dil[4], nobias=True, initialW=init_w), F.relu), comp5_v=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block6_pi=Sequential( L.Convolution3D(5 * C // 4, C, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), F.relu, chainerrl.policies.SoftmaxPolicy( L.Convolution3D(C, n_actions, ksize=3, stride=1, pad=1, dilate=1, nobias=True, initialW=init_w))), block6_v=Sequential( L.Convolution3D(5 * C // 4, C, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, 1, ksize=3, stride=1, pad=1, dilate=1, nobias=True, initialW=init_w), F.relu), ) self.train = True
def __init__(self, input_chn=2, n_actions=2, C=16): dil = [1, 2, 3, 4, 5] # dilate rates init_w = chainer.initializers.HeNormal() super(RNet_trained, self).__init__( block1=Sequential( L.Convolution3D(input_chn, C, ksize=3, stride=1, pad=dil[0], dilate=dil[0], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[0], dil[0], 0), dilate=dil[0], nobias=True, initialW=init_w), F.relu), comp1=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block2=Sequential( L.Convolution3D(C, C, ksize=3, stride=1, pad=dil[1], dilate=dil[1], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[1], dil[1], 0), dilate=dil[1], nobias=True, initialW=init_w), F.relu), comp2=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block3=Sequential( L.Convolution3D(C, C, ksize=3, stride=1, pad=dil[2], dilate=dil[2], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[2], dil[2], 0), dilate=dil[2], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[2], dil[2], 0), dilate=dil[2], nobias=True, initialW=init_w), F.relu), comp3=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block4_pi=Sequential( L.Convolution3D(C, C, ksize=3, stride=1, pad=dil[3], dilate=dil[3], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[3], dil[3], 0), dilate=dil[3], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[3], dil[3], 0), dilate=dil[3], nobias=True, initialW=init_w), F.relu), comp4_pi=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block4_v=Sequential( L.Convolution3D(C, C, ksize=3, stride=1, pad=dil[3], dilate=dil[3], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[3], dil[3], 0), dilate=dil[3], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[3], dil[3], 0), dilate=dil[3], nobias=True, initialW=init_w), F.relu), comp4_v=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block5_pi=Sequential( L.Convolution3D(C, C, ksize=3, stride=1, pad=dil[4], dilate=dil[4], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[4], dil[4], 0), dilate=dil[4], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[4], dil[4], 0), dilate=dil[4], nobias=True, initialW=init_w), F.relu), comp5_pi=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block5_v=Sequential( L.Convolution3D(C, C, ksize=3, stride=1, pad=dil[4], dilate=dil[4], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[4], dil[4], 0), dilate=dil[4], nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, C, ksize=(3, 3, 1), stride=1, pad=(dil[4], dil[4], 0), dilate=dil[4], nobias=True, initialW=init_w), F.relu), comp5_v=L.Convolution3D(C, C // 4, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), block6_pi=Sequential( L.Convolution3D(5 * C // 4, C, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), F.relu, chainerrl.policies.SoftmaxPolicy( L.Convolution3D(C, n_actions, ksize=3, stride=1, pad=1, dilate=1, nobias=True, initialW=init_w))), block6_v=Sequential( L.Convolution3D(5 * C // 4, C, ksize=1, stride=1, pad=0, dilate=1, nobias=True, initialW=init_w), F.relu, L.Convolution3D(C, 1, ksize=3, stride=1, pad=1, dilate=1, nobias=True, initialW=init_w), F.relu), ) self.train = True