def __init__(self, num_class): self.base1 = rm.Sequential([ InceptionV2Stem(), InceptionV2BlockA([64, 48, 64, 64, 96, 32]), InceptionV2BlockA(), InceptionV2BlockA(), InceptionV2BlockB(), InceptionV2BlockC([192, 128, 192, 128, 192, 192]), InceptionV2BlockC(), InceptionV2BlockC(), InceptionV2BlockC()]) self.aux1 = rm.Sequential([ rm.AveragePool2d(filter=5, stride=3), rm.Conv2d(128, filter=1), rm.Relu(), rm.Conv2d(768, filter=1), rm.Relu(), rm.Flatten(), rm.Dense(num_class)]) self.base2 = rm.Sequential([ InceptionV2BlockD(), InceptionV2BlockE(), InceptionV2BlockE(), rm.AveragePool2d(filter=8), rm.Flatten()]) self.aux2 = rm.Dense(num_class)
def __init__(self,): channel = 8 intermidiate_dim = 128 self.cnn1 = rm.Sequential([ # 28x28 -> 28x28 rm.Conv2d(channel=channel,filter=3,stride=1,padding=1), rm.LeakyRelu(), rm.Dropout(), # 28x28 -> 14x14 rm.Conv2d(channel=channel*2,filter=3,stride=2,padding=1), rm.LeakyRelu(), rm.Dropout(), # 14x14 -> 8x8 rm.Conv2d(channel=channel*4,filter=3,stride=2,padding=2), rm.LeakyRelu(), rm.Dropout(), # 8x8 -> 4x4 rm.Conv2d(channel=channel*8,filter=3,stride=2,padding=1), rm.LeakyRelu(), rm.Dropout(), ]) self.cnn2 = rm.Sequential([ #rm.Dropout(), rm.Flatten(), #rm.Dense(intermidiate_dim) ]) self.output = rm.Dense(1)
def __init__(self, num_classes, block, layers, cardinality): self.inplanes = 128 self.cardinality = cardinality super(ResNeXt, self).__init__() self.conv1 = rm.Conv2d(64, filter=7, stride=2, padding=3, ignore_bias=True) self.bn1 = rm.BatchNormalize(epsilon=0.00001, mode='feature') self.relu = rm.Relu() self.maxpool = rm.MaxPool2d(filter=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 128, layers[0], stride=1, cardinality=self.cardinality) self.layer2 = self._make_layer(block, 256, layers[1], stride=2, cardinality=self.cardinality) self.layer3 = self._make_layer(block, 512, layers[2], stride=2, cardinality=self.cardinality) self.layer4 = self._make_layer(block, 1024, layers[3], stride=2, cardinality=self.cardinality) self.flat = rm.Flatten() self.fc = rm.Dense(num_classes)
def __init__(self, num_class): self.block1 = rm.Sequential([InceptionV4Stem(), InceptionV4BlockA(), InceptionV4BlockA(), InceptionV4BlockA(), InceptionV4BlockA(), InceptionV4ReductionA()]) self.block2 = rm.Sequential([ InceptionV4BlockB(), InceptionV4BlockB(), InceptionV4BlockB(), InceptionV4BlockB(), InceptionV4BlockB(), InceptionV4BlockB(), InceptionV4BlockB(), InceptionV4ReductionB()]) self.block3 = rm.Sequential([ InceptionV4BlockC(), InceptionV4BlockC(), InceptionV4BlockC(), rm.AveragePool2d(filter=8), rm.Flatten(), rm.Dropout(0.2) ]) self.fc = rm.Dense(num_class)
def _gen_model(self): N = self.batch input_shape = self.arch['input_shape'] output_shape = self.arch['output_shape'] if 'debug' in self.arch.keys(): debug = self.arch['debug'] else: debug = False self.batch_input_shape = self.get_shape(N, input_shape) self.batch_output_shape = self.get_shape(N, output_shape) depth = self.arch['depth'] unit = self.arch['unit'] units = np.ones(depth + 1) * unit _unit = np.prod(output_shape) units[-1] = _unit units = units.astype('int') layer = [rm.Flatten()] for _unit in units: layer.append(rm.BatchNormalize()) layer.append(rm.Relu()) layer.append(rm.Dense(_unit)) #layer = layer[:-1] + [rm.Dropout()] + [layer[-1]] self.fcnn = rm.Sequential(layer) if debug: x = np.zeros(self.batch_input_shape) for _layer in layer: x = _layer(x) print(x.shape, str(_layer.__class__).split('.')[-1]) x = rm.reshape(x, self.batch_output_shape) print(x.shape)
def __init__(self, classes=10): super(VGG19, self).__init__([ layer_factory(channel=64, conv_layer_num=2), layer_factory(channel=128, conv_layer_num=2), layer_factory(channel=256, conv_layer_num=4), layer_factory(channel=512, conv_layer_num=4), layer_factory(channel=512, conv_layer_num=4), rm.Flatten(), rm.Dense(4096), rm.Dropout(0.5), rm.Dense(4096), rm.Dropout(0.5), rm.Dense(classes) ])
def __init__(self, num_class): self.base1 = rm.Sequential([rm.Conv2d(64, filter=7, padding=3, stride=2), rm.Relu(), rm.MaxPool2d(filter=3, stride=2, padding=1), rm.BatchNormalize(mode='feature'), rm.Conv2d(64, filter=1, stride=1), rm.Relu(), rm.Conv2d(192, filter=3, padding=1, stride=1), rm.Relu(), rm.BatchNormalize(mode='feature'), rm.MaxPool2d(filter=3, stride=2, padding=1), InceptionV1Block(), InceptionV1Block([128, 128, 192, 32, 96, 64]), rm.MaxPool2d(filter=3, stride=2), InceptionV1Block([192, 96, 208, 16, 48, 64]), ]) self.aux1 = rm.Sequential([rm.AveragePool2d(filter=5, stride=3), rm.Flatten(), rm.Dense(1024), rm.Dense(num_class)]) self.base2 = rm.Sequential([InceptionV1Block([160, 112, 224, 24, 64, 64]), InceptionV1Block([128, 128, 256, 24, 64, 64]), InceptionV1Block([112, 144, 288, 32, 64, 64])]) self.aux2 = rm.Sequential([rm.AveragePool2d(filter=5, stride=3), rm.Flatten(), rm.Dense(1024), rm.Dense(num_class)]) self.base3 = rm.Sequential([InceptionV1Block([256, 160, 320, 32, 128, 128]), InceptionV1Block([256, 160, 320, 32, 128, 128]), InceptionV1Block([192, 384, 320, 48, 128, 128]), rm.AveragePool2d(filter=7, stride=1), rm.Flatten()]) self.aux3 = rm.Dense(num_class)
def test_reshape(tmpdir): model = rm.Sequential([rm.Flatten()]) input = renom.Variable(np.random.random((10, 10, 10, 10))) m = _run_onnx(tmpdir, model, input) # check input id_input, id_shape = m.graph.node[0].input assert get_shape(m.graph.input[0]) == input.shape inis = load_initializer(m.graph.initializer) _test_initializer(inis, id_shape, [10, -1]) # check output assert get_shape(m.graph.output[0]) == (10, 1000)
def __init__(self, class_map=None, cells=7, bbox=2, imsize=(224, 224), load_pretrained_weight=False, train_whole_network=False): if not hasattr(cells, "__getitem__"): cells = (cells, cells) self._cells = cells self._bbox = bbox model = Darknet() super(Yolov1, self).__init__(class_map, imsize, load_pretrained_weight, train_whole_network, model) self._last_dense_size = (self.num_class + 5 * bbox) * cells[0] * cells[1] self._freezed_network = rm.Sequential(model[:-4]) self._network = rm.Sequential([ rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1, stride=2, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Flatten(), rm.Dense( 4096 ), # instead of locally connected layer, we are using Dense layer rm.LeakyRelu(slope=0.1), rm.Dropout(0.5), rm.Dense(self._last_dense_size) ]) self._opt = rm.Sgd(0.0005, 0.9)
def __init__(self, last_unit_size, load_weight_path=None): # TODO: Passing last_unit_size is not good. assert load_weight_path is None or isinstance(load_weight_path, str) super(Darknet, self).__init__([ # 1st Block rm.Conv2d(channel=64, filter=7, stride=2, padding=3), rm.LeakyRelu(slope=0.1), rm.MaxPool2d(stride=2, filter=2), # 2nd Block rm.Conv2d(channel=192, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.MaxPool2d(stride=2, filter=2), # 3rd Block rm.Conv2d(channel=128, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=256, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=256, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.MaxPool2d(stride=2, filter=2), # 4th Block rm.Conv2d(channel=256, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=256, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=256, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=256, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.MaxPool2d(stride=2, filter=2), # 5th Block rm.Conv2d(channel=512, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, stride=2, padding=1), rm.LeakyRelu(slope=0.1), # 6th Block rm.Conv2d(channel=1024, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1), rm.LeakyRelu(slope=0.1), # 7th Block rm.Flatten(), rm.Dense(1024), rm.LeakyRelu(slope=0.1), rm.Dense(4096), rm.LeakyRelu(slope=0.1), rm.Dropout(0.5), # 8th Block rm.Dense(last_unit_size), ]) if load_weight_path is not None: # Call download method. path, ext = os.path.splitext(load_weight_path) if ext: self.load(load_weight_path) else: self.load(path + '.h5')