def __init__(self, num_class): init_deconv = DeconvInitializer() self.block1 = layer_factory(channel=64, conv_layer_num=2, first=True) self.block2 = layer_factory(channel=128, conv_layer_num=2) self.block3 = layer_factory(channel=256, conv_layer_num=3) self.block4 = layer_factory(channel=512, conv_layer_num=3) self.block5 = layer_factory(channel=512, conv_layer_num=3) self.fc6 = rm.Conv2d(4096, filter=7, stride=1, padding=0) self.dr1 = rm.Dropout(dropout_ratio=0.5) self.fc7 = rm.Conv2d(4096, filter=1, stride=1, padding=0) self.dr2 = rm.Dropout(dropout_ratio=0.5) self.score_fr = rm.Conv2d(num_class, filter=1, stride=1, padding=0) # n_classes self.score_pool4 = rm.Conv2d(num_class, filter=1, padding=0) self.upscore2 = rm.Deconv2d(num_class, filter=4, stride=2, padding=0, ignore_bias=True, initializer=init_deconv) # n_classes self.upscore16 = rm.Deconv2d(num_class, filter=32, stride=16, padding=0, ignore_bias=True, initializer=init_deconv) # n_classes
def __init__(self,): channel = 8 intermidiate_dim = 128 self.cnn1 = rm.Sequential([ # 28x28 -> 28x28 rm.Conv2d(channel=channel,filter=3,stride=1,padding=1), rm.LeakyRelu(), rm.Dropout(), # 28x28 -> 14x14 rm.Conv2d(channel=channel*2,filter=3,stride=2,padding=1), rm.LeakyRelu(), rm.Dropout(), # 14x14 -> 8x8 rm.Conv2d(channel=channel*4,filter=3,stride=2,padding=2), rm.LeakyRelu(), rm.Dropout(), # 8x8 -> 4x4 rm.Conv2d(channel=channel*8,filter=3,stride=2,padding=1), rm.LeakyRelu(), rm.Dropout(), ]) self.cnn2 = rm.Sequential([ #rm.Dropout(), rm.Flatten(), #rm.Dense(intermidiate_dim) ]) self.output = rm.Dense(1)
def __init__(self, num_class=1000): self.block1 = layer_factory(channel=64, conv_layer_num=1) self.block2 = layer_factory(channel=128, conv_layer_num=1) self.block3 = layer_factory(channel=256, conv_layer_num=2) self.block4 = layer_factory(channel=512, conv_layer_num=2) self.block5 = layer_factory(channel=512, conv_layer_num=2) self.fc1 = rm.Dense(4096) self.dropout1 = rm.Dropout(dropout_ratio=0.5) self.fc2 = rm.Dense(4096) self.dropout2 = rm.Dropout(dropout_ratio=0.5) self.fc3 = rm.Dense(num_class)
def __init__(self, classes=10): super(VGG19, self).__init__([ layer_factory(channel=64, conv_layer_num=2), layer_factory(channel=128, conv_layer_num=2), layer_factory(channel=256, conv_layer_num=4), layer_factory(channel=512, conv_layer_num=4), layer_factory(channel=512, conv_layer_num=4), rm.Flatten(), rm.Dense(4096), rm.Dropout(0.5), rm.Dense(4096), rm.Dropout(0.5), rm.Dense(classes) ])
def __init__(self, num_class): self.block1 = rm.Sequential([InceptionV4Stem(), InceptionV4BlockA(), InceptionV4BlockA(), InceptionV4BlockA(), InceptionV4BlockA(), InceptionV4ReductionA()]) self.block2 = rm.Sequential([ InceptionV4BlockB(), InceptionV4BlockB(), InceptionV4BlockB(), InceptionV4BlockB(), InceptionV4BlockB(), InceptionV4BlockB(), InceptionV4BlockB(), InceptionV4ReductionB()]) self.block3 = rm.Sequential([ InceptionV4BlockC(), InceptionV4BlockC(), InceptionV4BlockC(), rm.AveragePool2d(filter=8), rm.Flatten(), rm.Dropout(0.2) ]) self.fc = rm.Dense(num_class)
def __init__(self, num_class): self.block1 = layer_factory(channel=64, conv_layer_num=2) self.block2 = layer_factory(channel=128, conv_layer_num=2) self.block3 = layer_factory(channel=256, conv_layer_num=3) self.block4 = layer_factory(channel=512, conv_layer_num=3) self.block5 = layer_factory(channel=512, conv_layer_num=3) self.fc6 = rm.Conv2d(4096, filter=7, padding=3) self.dr1 = rm.Dropout(0.5) self.fc7 = rm.Conv2d(4096, filter=1) self.dr2 = rm.Dropout(0.5) self.score_fr = rm.Conv2d(num_class, filter=1) self.upscore2 = rm.Deconv2d(num_class, filter=2, stride=2, padding=0) self.upscore8 = rm.Deconv2d(num_class, filter=8, stride=8, padding=0) self.score_pool3 = rm.Conv2d(num_class, filter=1) self.score_pool4 = rm.Conv2d(num_class, filter=1) self.upscore_pool4 = rm.Deconv2d(num_class, filter=2, stride=2, padding=0)
def test_dropout(tmpdir): model = rm.Sequential([rm.Dropout(0.5)]) input = renom.Variable(np.random.random((10, 10, 10, 10))) m = _run_onnx(tmpdir, model, input) # check input id_input, = m.graph.node[0].input assert get_shape(m.graph.input[0]) == input.shape # check output assert get_shape(m.graph.output[0]) == input.shape attrs = dict((a.name, a) for a in m.graph.node[0].attribute) assert attrs['ratio'].f == 0.5
def __init__(self, class_map=None, cells=7, bbox=2, imsize=(224, 224), load_pretrained_weight=False, train_whole_network=False): if not hasattr(cells, "__getitem__"): cells = (cells, cells) self._cells = cells self._bbox = bbox model = Darknet() super(Yolov1, self).__init__(class_map, imsize, load_pretrained_weight, train_whole_network, model) self._last_dense_size = (self.num_class + 5 * bbox) * cells[0] * cells[1] self._freezed_network = rm.Sequential(model[:-4]) self._network = rm.Sequential([ rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1, stride=2, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Flatten(), rm.Dense( 4096 ), # instead of locally connected layer, we are using Dense layer rm.LeakyRelu(slope=0.1), rm.Dropout(0.5), rm.Dense(self._last_dense_size) ]) self._opt = rm.Sgd(0.0005, 0.9)
def __init__(self, feature_graph, num_target=1, fc_unit=(100, 50), neighbors=5, channels=(10, 20, 20)): super(GCNet, self).__init__() self.gc1 = GraphCNN(channel=channels[0], neighbors=neighbors, feature_graph=feature_graph) self.gc2 = GraphCNN(channel=channels[1], neighbors=neighbors, feature_graph=feature_graph) self.gc3 = GraphCNN(channel=channels[2], neighbors=neighbors, feature_graph=feature_graph) self.fc1 = rm.Dense(fc_unit[0]) self.fc2 = rm.Dense(fc_unit[1]) self.fc3 = rm.Dense(num_target) self.dropout = rm.Dropout(dropout_ratio=0.01)
def test_gpu_node_dropout(a): set_cuda_active(True) g1 = Variable(a) layer = rm.Dropout() np.random.seed(1) g3 = rm.sum(layer(g1)) g = g3.grad() g_g1 = g.get(g1) g3.to_cpu() set_cuda_active(False) np.random.seed(1) c3 = rm.sum(layer(g1)) c = c3.grad() c_g1 = c.get(g1) close(g3, c3) close(c_g1, g_g1)
def __init__(self, last_unit_size, load_weight_path=None): # TODO: Passing last_unit_size is not good. assert load_weight_path is None or isinstance(load_weight_path, str) super(Darknet, self).__init__([ # 1st Block rm.Conv2d(channel=64, filter=7, stride=2, padding=3), rm.LeakyRelu(slope=0.1), rm.MaxPool2d(stride=2, filter=2), # 2nd Block rm.Conv2d(channel=192, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.MaxPool2d(stride=2, filter=2), # 3rd Block rm.Conv2d(channel=128, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=256, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=256, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.MaxPool2d(stride=2, filter=2), # 4th Block rm.Conv2d(channel=256, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=256, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=256, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=256, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.MaxPool2d(stride=2, filter=2), # 5th Block rm.Conv2d(channel=512, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=512, filter=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, stride=2, padding=1), rm.LeakyRelu(slope=0.1), # 6th Block rm.Conv2d(channel=1024, filter=3, padding=1), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1), rm.LeakyRelu(slope=0.1), # 7th Block rm.Flatten(), rm.Dense(1024), rm.LeakyRelu(slope=0.1), rm.Dense(4096), rm.LeakyRelu(slope=0.1), rm.Dropout(0.5), # 8th Block rm.Dense(last_unit_size), ]) if load_weight_path is not None: # Call download method. path, ext = os.path.splitext(load_weight_path) if ext: self.load(load_weight_path) else: self.load(path + '.h5')