def __init__(self, num_class=1): self.conv1_1 = rm.Conv2d(64, padding=1, filter=3) self.bn1_1 = rm.BatchNormalize(mode='feature') self.conv1_2 = rm.Conv2d(64, padding=1, filter=3) self.bn1_2 = rm.BatchNormalize(mode='feature') self.conv2_1 = rm.Conv2d(128, padding=1, filter=3) self.bn2_1 = rm.BatchNormalize(mode='feature') self.conv2_2 = rm.Conv2d(128, padding=1, filter=3) self.bn2_2 = rm.BatchNormalize(mode='feature') self.conv3_1 = rm.Conv2d(256, padding=1, filter=3) self.bn3_1 = rm.BatchNormalize(mode='feature') self.conv3_2 = rm.Conv2d(256, padding=1, filter=3) self.bn3_2 = rm.BatchNormalize(mode='feature') self.conv4_1 = rm.Conv2d(512, padding=1, filter=3) self.bn4_1 = rm.BatchNormalize(mode='feature') self.conv4_2 = rm.Conv2d(512, padding=1, filter=3) self.bn4_2 = rm.BatchNormalize(mode='feature') self.conv5_1 = rm.Conv2d(1024, padding=1, filter=3) self.bn5_1 = rm.BatchNormalize(mode='feature') self.conv5_2 = rm.Conv2d(1024, padding=1, filter=3) self.bn5_2 = rm.BatchNormalize(mode='feature') self.deconv1 = rm.Deconv2d(512, stride=2) self.conv6_1 = rm.Conv2d(256, padding=1) self.conv6_2 = rm.Conv2d(256, padding=1) self.deconv2 = rm.Deconv2d(256, stride=2) self.conv7_1 = rm.Conv2d(128, padding=1) self.conv7_2 = rm.Conv2d(128, padding=1) self.deconv3 = rm.Deconv2d(128, stride=2) self.conv8_1 = rm.Conv2d(64, padding=1) self.conv8_2 = rm.Conv2d(64, padding=1) self.deconv4 = rm.Deconv2d(64, stride=2) self.conv9 = rm.Conv2d(num_class, filter=1)
def __init__(self, input_shape, output_shape, units=10, depth=3, growth_rate=12, dropout=False, initializer=rm.utility.initializer.Gaussian(std=0.3), active=rm.tanh): self.input_shape = input_shape self.output_shape = output_shape self.units = units self.depth = depth self.dropout = dropout self.active = active parameters = [] add_units = units for _ in range(depth - 1): add_units += growth_rate parameters.append(rm.BatchNormalize()) parameters.append(rm.Dense(add_units, initializer=initializer)) self.hidden = rm.Sequential(parameters) self.input_batch = rm.BatchNormalize() self.input = rm.Dense(units) self.multi_output = False if isinstance(self.output_shape, tuple): self.multi_output = True parameters = [] for _ in range(output_shape[0]): parameters.append(rm.BatchNormalize()) parameters.append( rm.Dense(output_shape[1], initializer=initializer)) self.output = rm.Sequential(parameters) else: self.output = rm.Dense(output_shape)
def __init__(self, num_class): self.base1 = rm.Sequential([ InceptionV2Stem(), InceptionV2BlockA([64, 48, 64, 64, 96, 32]), InceptionV2BlockA(), InceptionV2BlockA(), InceptionV2BlockB(), InceptionV2BlockC([192, 128, 192, 128, 192, 192]), InceptionV2BlockC(), InceptionV2BlockC(), InceptionV2BlockC()]) self.aux1 = rm.Sequential([ rm.AveragePool2d(filter=5, stride=3), rm.Conv2d(128, filter=1), rm.BatchNormalize(mode='feature'), rm.Relu(), rm.Conv2d(768, filter=1), rm.BatchNormalize(mode='feature'), rm.Relu(), rm.Flatten(), rm.Dense(num_class)]) self.base2 = rm.Sequential([ InceptionV2BlockD(), InceptionV2BlockE(), InceptionV2BlockE(), rm.AveragePool2d(filter=8), rm.Flatten()]) self.aux2 = rm.Dense(num_class)
def conv_block(growth_rate): return rm.Sequential([ rm.BatchNormalize(epsilon=0.001, mode='feature'), rm.Relu(), rm.Conv2d(growth_rate * 4, 1, padding=0), rm.BatchNormalize(epsilon=0.001, mode='feature'), rm.Relu(), rm.Conv2d(growth_rate, 3, padding=1), ])
def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(planes, stride) self.bn1 = rm.BatchNormalize(mode='feature') self.relu = rm.Relu() self.conv2 = conv3x3(planes) self.bn2 = rm.BatchNormalize(mode='feature') self.downsample = downsample self.stride = stride
def __init__(self, channels=[64, 96, 384]): self.conv1_reduced = rm.Conv2d(channels[0], filter=1) self.batch_norm1_reduced = rm.BatchNormalize(mode='feature') self.conv1_1 = rm.Conv2d(channels[1], filter=3, padding=1) self.batch_norm1_1 = rm.BatchNormalize(mode='feature') self.conv1_2 = rm.Conv2d(channels[1], filter=3, stride=2) self.batch_norm1_2 = rm.BatchNormalize(mode='feature') self.conv2 = rm.Conv2d(channels[2], filter=3, stride=2) self.batch_norm2 = rm.BatchNormalize(mode='feature')
def __init__(self): # k, l, m, n # 192, 224, 256, 384 self.conv1 = rm.Conv2d(384, filter=3, stride=2) self.batch_norm1 = rm.BatchNormalize(mode='feature') self.conv2_red = rm.Conv2d(192, filter=1) self.batch_norm2_red = rm.BatchNormalize(mode='feature') self.conv2_1 = rm.Conv2d(224, filter=3, padding=1) self.batch_norm2_1 = rm.BatchNormalize(mode='feature') self.conv2_2 = rm.Conv2d(256, filter=3, stride=2) self.batch_norm2_2 = rm.BatchNormalize(mode='feature')
def __init__(self, channels=[192, 320, 192, 192]): self.conv1_reduced = rm.Conv2d(channels[0], filter=1) self.batch_norm1_reduced = rm.BatchNormalize(mode='feature') self.conv1 = rm.Conv2d(channels[1], filter=3, stride=2) self.batch_norm1 = rm.BatchNormalize(mode='feature') self.conv2_reduced = rm.Conv2d(channels[2], filter=1) self.batch_norm2_reduced = rm.BatchNormalize(mode='feature') self.conv2_1 = rm.Conv2d(channels[3], filter=3, padding=1) self.batch_norm2_1 = rm.BatchNormalize(mode='feature') self.conv2_2 = rm.Conv2d(channels[3], filter=3, stride=2) self.batch_norm2_2 = rm.BatchNormalize(mode='feature')
def test_batch_normalize_featuremap(a): layer = rm.BatchNormalize(mode=BATCH_NORMALIZE_FEATUREMAP, momentum=0.1) set_cuda_active(True) g1 = Variable(a) for _ in range(10): g3 = layer(g1) g3.to_cpu() layer.set_models(inference=True) g4 = layer(g1) layer.set_models(inference=False) set_cuda_active(False) layer._mov_mean = 0 layer._mov_std = 0 for _ in range(10): c3 = layer(g1) layer.set_models(inference=True) c4 = layer(g1) layer.set_models(inference=False) close(g3, c3) close(g4, c4) close(g3.attrs._m.new_array(), c3.attrs._m) close(g3.attrs._v.new_array(), c3.attrs._v) close(g3.attrs._mov_m.new_array(), c3.attrs._mov_m) close(g3.attrs._mov_v.new_array(), c3.attrs._mov_v)
def denseblock( self, dim_v=8, dim_h=8, input_channels=10, dropout=False, out_ch=0.5, ): parameters = [] c = input_channels print('-> {}'.format(c)) for _ in range(self.depth): c += self.growth_rate print('Batch Normalize') parameters.append(rm.BatchNormalize()) print(' Conv2d > {}x{} {}ch'.format(dim_v, dim_h, self.growth_rate)) parameters.append( rm.Conv2d(self.growth_rate, filter=3, padding=(1, 1))) if self.dropout: print(' Dropout') c = int(c*out_ch) if isinstance(out_ch, float) \ else out_ch print('*Conv2d > {}x{} {}ch'.format(dim_v, dim_h, c)) parameters.append(rm.Conv2d(c, filter=1)) if self.dropout: print(' Dropout') print(' Average Pooling') print('<- {}'.format(c)) return parameters, c
def __init__(self, num_classes, block, layers, cardinality): self.inplanes = 128 self.cardinality = cardinality super(ResNeXt, self).__init__() self.conv1 = rm.Conv2d(64, filter=7, stride=2, padding=3, ignore_bias=True) self.bn1 = rm.BatchNormalize(epsilon=0.00001, mode='feature') self.relu = rm.Relu() self.maxpool = rm.MaxPool2d(filter=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 128, layers[0], stride=1, cardinality=self.cardinality) self.layer2 = self._make_layer(block, 256, layers[1], stride=2, cardinality=self.cardinality) self.layer3 = self._make_layer(block, 512, layers[2], stride=2, cardinality=self.cardinality) self.layer4 = self._make_layer(block, 1024, layers[3], stride=2, cardinality=self.cardinality) self.flat = rm.Flatten() self.fc = rm.Dense(num_classes)
def transition_layer(growth_rate): return rm.Sequential([ rm.BatchNormalize(epsilon=0.001, mode='feature'), rm.Relu(), rm.Conv2d(growth_rate, filter=1, padding=0, stride=1), rm.AveragePool2d(filter=2, stride=2) ])
def _gen_model(self): N = self.batch input_shape = self.arch['input_shape'] output_shape = self.arch['output_shape'] if 'debug' in self.arch.keys(): debug = self.arch['debug'] else: debug = False self.batch_input_shape = self.get_shape(N, input_shape) self.batch_output_shape = self.get_shape(N, output_shape) depth = self.arch['depth'] unit = self.arch['unit'] units = np.ones(depth + 1) * unit _unit = np.prod(output_shape) units[-1] = _unit units = units.astype('int') layer = [rm.Flatten()] for _unit in units: layer.append(rm.BatchNormalize()) layer.append(rm.Relu()) layer.append(rm.Dense(_unit)) #layer = layer[:-1] + [rm.Dropout()] + [layer[-1]] self.fcnn = rm.Sequential(layer) if debug: x = np.zeros(self.batch_input_shape) for _layer in layer: x = _layer(x) print(x.shape, str(_layer.__class__).split('.')[-1]) x = rm.reshape(x, self.batch_output_shape) print(x.shape)
def __init__( self, latent_dim = 10, output_shape = (28, 28), batch_normal = False, dropout = False, min_channels = 16, ): self.batch_normal = batch_normal self.latent_dim = latent_dim self.output_shape = output_shape self.dropout = dropout self.min_channels = min_channels print('--- Generator Network ---') parameters = [] print_params = [] dim = output_shape[0] channels = self.min_channels while dim%2 == 0 and dim > 2: parameters.append(rm.Deconv2d( channel=channels, stride=2, filter=2)) if batch_normal: parameters.append(rm.BatchNormalize()) dim = dim // 2 print_params.append([dim, channels]) channels *= 2 if dim%2 == 1: parameters.append(rm.Deconv2d( channel=channels, stride=2, filter=3)) if batch_normal: parameters.append(rm.BatchNormalize()) dim = (dim - 1) // 2 print_params.append([dim, channels]) channels *= 2 parameters.reverse() print_params.reverse() print('Dense {}x{}x{} & Reshape'.format(dim, dim,channels)) self.channels = channels self.transform = rm.Dense(channels*1*dim*dim) for item in print_params: print('Deconv2d to {}x{} {}ch '.format( item[0], item[0], item[1])) self.hidden = rm.Sequential(parameters) self.output = rm.Conv2d(channel=1,stride=1,filter=1) print('Conv2d to {}x{} 1ch'.format( output_shape[0], output_shape[0])) self.dim = dim
def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = rm.Conv2d(planes, filter=1, ignore_bias=True) self.bn1 = rm.BatchNormalize(mode='feature') self.conv2 = rm.Conv2d(planes, filter=3, stride=stride, padding=1, ignore_bias=True) self.bn2 = rm.BatchNormalize(mode='feature') self.conv3 = rm.Conv2d(planes * self.expansion, filter=1, ignore_bias=True) self.bn3 = rm.BatchNormalize(mode='feature') self.relu = rm.Relu() self.downsample = downsample self.stride = stride
def __init__(self, channel, filter=3, prev_ch=None): pad = int((filter - 1) / 2) if prev_ch is not None: self._conv = rm.Conv2d(channel=channel, filter=filter, padding=pad) self._conv.params = { "w": rm.Variable(self._conv._initializer( (channel, prev_ch, filter, filter)), auto_update=True), "b": rm.Variable(np.zeros((1, channel, 1, 1), dtype=np.float32), auto_update=False), } self._bn = rm.BatchNormalize(mode='feature', momentum=0.99) else: self._conv = rm.Conv2d(channel=channel, filter=filter, padding=pad) self._bn = rm.BatchNormalize(mode='feature', momentum=0.99)
def __init__(self, class_map=None, cells=7, bbox=2, imsize=(224, 224), load_pretrained_weight=False, train_whole_network=False): if not hasattr(cells, "__getitem__"): cells = (cells, cells) self._cells = cells self._bbox = bbox model = Darknet() super(Yolov1, self).__init__(class_map, imsize, load_pretrained_weight, train_whole_network, model) self._last_dense_size = (self.num_class + 5 * bbox) * cells[0] * cells[1] self._freezed_network = rm.Sequential(model[:-4]) self._network = rm.Sequential([ rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1, stride=2, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Flatten(), rm.Dense( 4096 ), # instead of locally connected layer, we are using Dense layer rm.LeakyRelu(slope=0.1), rm.Dropout(0.5), rm.Dense(self._last_dense_size) ]) self._opt = rm.Sgd(0.0005, 0.9)
def __init__(self, planes, stride=1, downsample=None, cardinality=32): super(Bottleneck, self).__init__() self.cardinality = cardinality self.conv1 = rm.Conv2d(planes, filter=1, ignore_bias=True) self.bn1 = rm.BatchNormalize(epsilon=0.00001, mode='feature') self.conv2 = rm.GroupConv2d(planes, filter=3, stride=stride, padding=1, ignore_bias=True, groups=self.cardinality) self.bn2 = rm.BatchNormalize(epsilon=0.00001, mode='feature') self.conv3 = rm.Conv2d(planes * self.expansion, filter=1, ignore_bias=True) self.bn3 = rm.BatchNormalize(epsilon=0.00001, mode='feature') self.relu = rm.Relu() self.downsample = downsample self.stride = stride
def _gen_model(self): depth = self.arch['depth'] unit = self.arch['unit'] # excluding mini-batch size input_shape = self.arch['input_shape'] output_shape = self.arch['output_shape'] seq = [] for i in range(depth): seq.append(rm.Dense(unit)) seq.append(rm.Relu()) if i < 1 or i == depth - 1: seq.append(rm.BatchNormalize()) seq.append(rm.Dense(output_shape)) self._model = rm.Sequential(seq)
def __init__(self, num_class): self.base1 = rm.Sequential([rm.Conv2d(64, filter=7, padding=3, stride=2), rm.Relu(), rm.MaxPool2d(filter=3, stride=2, padding=1), rm.BatchNormalize(mode='feature'), rm.Conv2d(64, filter=1, stride=1), rm.Relu(), rm.Conv2d(192, filter=3, padding=1, stride=1), rm.Relu(), rm.BatchNormalize(mode='feature'), rm.MaxPool2d(filter=3, stride=2, padding=1), InceptionV1Block(), InceptionV1Block([128, 128, 192, 32, 96, 64]), rm.MaxPool2d(filter=3, stride=2), InceptionV1Block([192, 96, 208, 16, 48, 64]), ]) self.aux1 = rm.Sequential([rm.AveragePool2d(filter=5, stride=3), rm.Flatten(), rm.Dense(1024), rm.Dense(num_class)]) self.base2 = rm.Sequential([InceptionV1Block([160, 112, 224, 24, 64, 64]), InceptionV1Block([128, 128, 256, 24, 64, 64]), InceptionV1Block([112, 144, 288, 32, 64, 64])]) self.aux2 = rm.Sequential([rm.AveragePool2d(filter=5, stride=3), rm.Flatten(), rm.Dense(1024), rm.Dense(num_class)]) self.base3 = rm.Sequential([InceptionV1Block([256, 160, 320, 32, 128, 128]), InceptionV1Block([256, 160, 320, 32, 128, 128]), InceptionV1Block([192, 384, 320, 48, 128, 128]), rm.AveragePool2d(filter=7, stride=1), rm.Flatten()]) self.aux3 = rm.Dense(num_class)
def __init__(self, input_shape, output_shape, growth_rate = 12, depth = 3, dropout = False, ): self.growth_rate = growth_rate self.depth = depth self.dropout = dropout self.input = rm.Dense(input_shape) self.output = rm.Dense(output_shape) parameters = [] for _ in range(depth): parameters.append(rm.BatchNormalize()) parameters.append(rm.Dense(growth_rate)) self.hidden = rm.Sequential(parameters)
def __init__(self, channels=[192, 128, 192, 128, 192, 192]): self.conv1 = rm.Conv2d(channels[0], filter=1) self.batch_norm1 = rm.BatchNormalize(mode='feature') self.conv2_reduced = rm.Conv2d(channels[1], filter=1) self.batch_norm2_reduced = rm.BatchNormalize(mode='feature') self.conv2_1 = rm.Conv2d(channels[1], filter=(3, 1), padding=(1, 0)) self.batch_norm2_1 = rm.BatchNormalize(mode='feature') self.conv2_2 = rm.Conv2d(channels[2], filter=(1, 3), padding=(0, 1)) self.batch_norm2_2 = rm.BatchNormalize(mode='feature') self.conv3_reduced = rm.Conv2d(channels[3], filter=1) self.batch_norm3_reduced = rm.BatchNormalize(mode='feature') self.conv3_1 = rm.Conv2d(channels[3], filter=(3, 1), padding=(1, 0)) self.batch_norm3_1 = rm.BatchNormalize(mode='feature') self.conv3_2 = rm.Conv2d(channels[3], filter=(1, 3), padding=(0, 1)) self.batch_norm3_2 = rm.BatchNormalize(mode='feature') self.conv3_3 = rm.Conv2d(channels[3], filter=(3, 1), padding=(1, 0)) self.batch_norm3_3 = rm.BatchNormalize(mode='feature') self.conv3_4 = rm.Conv2d(channels[4], filter=(1, 3), padding=(0, 1)) self.batch_norm3_4 = rm.BatchNormalize(mode='feature') self.conv4 = rm.Conv2d(channels[5], filter=1) self.batch_norm4 = rm.BatchNormalize(mode='feature')
def __init__(self): self.conv1 = rm.Conv2d(256, filter=1) self.batch_norm1 = rm.BatchNormalize(mode='feature') self.conv2 = rm.Conv2d(256, filter=1) self.batch_norm2 = rm.BatchNormalize(mode='feature') self.conv3_red = rm.Conv2d(384, filter=1) self.batch_norm3_red = rm.BatchNormalize(mode='feature') self.conv3_1 = rm.Conv2d(256, filter=(1, 3), padding=(0, 1)) self.batch_norm3_1 = rm.BatchNormalize(mode='feature') self.conv3_2 = rm.Conv2d(256, filter=(3, 1), padding=(1, 0)) self.batch_norm3_2 = rm.BatchNormalize(mode='feature') self.conv4_red = rm.Conv2d(384, filter=1) self.batch_norm4_red = rm.BatchNormalize(mode='feature') self.conv4_1 = rm.Conv2d(448, filter=(1, 3), padding=(0, 1)) self.batch_norm4_1 = rm.BatchNormalize(mode='feature') self.conv4_2 = rm.Conv2d(512, filter=(3, 1), padding=(1, 0)) self.batch_norm4_2 = rm.BatchNormalize(mode='feature') self.conv4_3 = rm.Conv2d(256, filter=(1, 3), padding=(0, 1)) self.batch_norm4_3 = rm.BatchNormalize(mode='feature') self.conv4_4 = rm.Conv2d(256, filter=(3, 1), padding=(1, 0)) self.batch_norm4_4 = rm.BatchNormalize(mode='feature')
def __init__(self): self.conv1 = rm.Conv2d(128, filter=1) self.batch_norm1 = rm.BatchNormalize(mode='feature') self.conv2 = rm.Conv2d(384, filter=3, padding=1) self.batch_norm2 = rm.BatchNormalize(mode='feature') self.conv3_1 = rm.Conv2d(192, filter=1) self.batch_norm3_1 = rm.BatchNormalize(mode='feature') self.conv3_2 = rm.Conv2d(224, filter=(1, 7), padding=(0, 3)) self.batch_norm3_2 = rm.BatchNormalize(mode='feature') self.conv3_3 = rm.Conv2d(256, filter=(7, 1), padding=(3, 0)) self.batch_norm3_3 = rm.BatchNormalize(mode='feature') self.conv4_1 = rm.Conv2d(192, filter=1) self.batch_norm4_1 = rm.BatchNormalize(mode='feature') self.conv4_2 = rm.Conv2d(192, filter=(1, 7), padding=(0, 3)) self.batch_norm4_2 = rm.BatchNormalize(mode='feature') self.conv4_3 = rm.Conv2d(224, filter=(7, 1), padding=(3, 0)) self.batch_norm4_3 = rm.BatchNormalize(mode='feature') self.conv4_4 = rm.Conv2d(224, filter=(1, 7), padding=(0, 3)) self.batch_norm4_4 = rm.BatchNormalize(mode='feature') self.conv4_5 = rm.Conv2d(256, filter=(7, 1), padding=(3, 0)) self.batch_norm4_5 = rm.BatchNormalize(mode='feature')
def __init__(self, num_class, layer_per_block=[6, 12, 24, 16], growth_rate=32, train_whole_network=False): self.layer_per_block = layer_per_block self.growth_rate = growth_rate layers = [] layers.append(rm.Conv2d(64, 7, padding=3, stride=2)) layers.append(rm.BatchNormalize(epsilon=0.001, mode='feature')) for i in layer_per_block[:-1]: for j in range(i): layers.append(conv_block(growth_rate)) layers.append(transition_layer(growth_rate)) for i in range(layer_per_block[-1]): layers.append(conv_block(growth_rate)) self.base = rm.Sequential(layers) self.fc = rm.Dense(num_class)
def test_batch_normalize(a): layer = rm.Sequential([rm.BatchNormalize(momentum=0.1)]) set_cuda_active(True) g1 = Variable(a) g2 = layer(g1) g3 = rm.sum(g2) g = g3.grad() g_g1 = g.get(g1) g_g2 = g.get(layer.l0.params["w"]) g_g3 = g.get(layer.l0.params["b"]) layer.set_models(inference=True) g4 = layer(g1) layer.set_models(inference=False) g2.to_cpu() g3.to_cpu() g4.to_cpu() g_g1.to_cpu() g_g2.to_cpu() g_g3.to_cpu() set_cuda_active(False) layer.l0._mov_mean = 0 layer.l0._mov_std = 0 c2 = layer(g1) c3 = rm.sum(c2) c = c3.grad() c_g1 = c.get(g1) c_g2 = g.get(layer.l0.params["w"]) c_g3 = g.get(layer.l0.params["b"]) layer.set_models(inference=True) c4 = layer(g1) layer.set_models(inference=False) close(g2, c2) close(g3, c3) close(g4, c4) close(c_g1, g_g1) close(c_g2, g_g2) close(c_g3, g_g3)
def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = rm.Sequential([ rm.Conv2d(planes * block.expansion, filter=1, stride=stride, ignore_bias=True), rm.BatchNormalize(mode='feature') ]) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return rm.Sequential(layers)
def denseblock(self, dim=8, input_channels=10, dropout=False): parameters = [] c = input_channels print('-> {}'.format(c)) for _ in range(self.depth): c += self.growth_rate print('Batch Normalize') parameters.append(rm.BatchNormalize()) print(' Conv2d > {}x{} {}ch'.format(dim, dim, self.growth_rate)) parameters.append( rm.Conv2d(self.growth_rate, filter=3, padding=(1, 1))) if self.dropout: print('Dropout') c = int(c * self.compression) print('*Conv2d > {}x{} {}ch'.format(dim, dim, c)) parameters.append(rm.Conv2d(c, filter=1)) print(' Average Pooling') print('<- {}'.format(c)) return parameters, c
def __init__( self, enc_base, dec, batch_size, latent_dim = 2, mode = 'simple', label_dim = 0, prior = 'normal', prior_dist = None, hidden = 1000, full_rate=0.1, # 全体の形を重視するかラベル毎を重視するか fm_rate=1., # full_rateと同じ目的 ): self.latent_dim = latent_dim self.mode = mode self.label_dim = label_dim self.batch_size = batch_size self.prior = prior self.prior_dist = prior_dist self.full_rate = full_rate self.fm_rate = fm_rate if self.mode=='clustering' or self.mode=='reduction': self.enc = Enc(enc_base, (latent_dim, label_dim), output_act=(None, rm.softmax)) else: self.enc = Enc(enc_base, latent_dim) self.dec = dec self.dis = rm.Sequential([ rm.Dense(hidden), rm.LeakyRelu(), rm.Dense(hidden), rm.LeakyRelu(), rm.Dense(1), rm.Sigmoid() ]) if self.mode=='clustering' or self.mode=='reduction': self.cds = rm.Sequential([ # xxx rm.BatchNormalize(), # Disの最初にBNは配置してはだめ rm.Dense(hidden), rm.LeakyRelu(), rm.BatchNormalize(), rm.Dense(hidden), rm.LeakyRelu(), #rm.BatchNormalize(), rm.Dense(1), rm.Sigmoid() ])
def __init__( self, input_shape, output_shape, growth_rate=12, depth=3, dropout=False): self.depth = depth self.dropout = dropout if depth != 1: under_growth_rate = input_shape + growth_rate self.under_model = mymodel( under_growth_rate, output_shape, growth_rate = growth_rate, depth = depth - 1) else: self.output = rm.Dense(output_shape) self.batch = rm.BatchNormalize() self.conv = rm.Dense(growth_rate)