def __init__(self, class_map=None, imsize=(300, 300), overlap_threshold=0.5, load_pretrained_weight=False, train_whole_network=False): assert imsize == (300, 300), \ "SSD of ReNomIMG v1.1 only accepts image size of (300, 300)." vgg = VGG16() super(SSD, self).__init__(class_map, imsize, load_pretrained_weight, train_whole_network, vgg._model) self.num_class = len(self.class_map) + 1 self._network = DetectorNetwork(self.num_class, vgg) self._freezed_network = rm.Sequential( [vgg._model.block1, vgg._model.block2]) self.prior = PriorBox() self.prior_box = self.prior.create() self.num_prior = len(self.prior_box) self.overlap_threshold = overlap_threshold self._opt = rm.Sgd(1e-3, 0.9)
def __init__(self, class_map=None, train_final_upscore=False, imsize=(224, 224), load_pretrained_weight=False, train_whole_network=False): self.decay_rate = 5e-4 self._opt = rm.Sgd(1e-5, 0.9) self._train_final_upscore = train_final_upscore self._model = CNN_FCN8s(1) super(FCN8s, self).__init__(class_map, imsize, load_pretrained_weight, train_whole_network, load_target=self._model) self._model.score_fr._channel = self.num_class self._model.score_pool3._channel = self.num_class self._model.score_pool4._channel = self.num_class self._model.upscore2._channel = self.num_class self._model.upscore_pool4._channel = self.num_class self._model.upscore8._channel = self.num_class self._freeze()
def __init__(self, class_map=[], imsize=(224, 224), load_pretrained_weight=False, train_whole_network=False): if not hasattr(imsize, "__getitem__"): imsize = (imsize, imsize) self.imsize = imsize self.num_class = len(class_map) self.class_map = [c.encode("ascii", "ignore") for c in class_map] self._model = CNN_VGG11(self.num_class) self._train_whole_network = train_whole_network self._opt = rm.Sgd(0.01, 0.9) self.decay_rate = 0.0005 if load_pretrained_weight: if isinstance(load_pretrained_weight, bool): load_pretrained_weight = self.__class__.__name__ + '.h5' if not os.path.exists(load_pretrained_weight): download(self.WEIGHT_URL, load_pretrained_weight) self._model.load(load_pretrained_weight) self._model.fc1.params = {} self._model.fc2.params = {} self._model.fc3.params = {} assert not load_pretrained_weight, "Currently pretrained weight of %s is not prepared. Please set False to `load_pretrained_weight` flag." % self.__class__.___name__
def __init__(self, class_map=[], imsize=(224, 224), load_pretrained_weight=False, train_whole_network=False): assert not load_pretrained_weight, "In ReNomIMG version {}, pretained weight of {} is not prepared.".format( __version__, self.__class__.__name__) if not hasattr(imsize, "__getitem__"): imsize = (imsize, imsize) layer_per_block = [6, 12, 48, 32] growth_rate = 32 self.imsize = imsize self.num_class = len(class_map) self.class_map = [c.encode("ascii", "ignore") for c in class_map] self._train_whole_network = train_whole_network self._model = CNN_DenseNet(self.num_class, layer_per_block, growth_rate, train_whole_network) self._opt = rm.Sgd(0.01, 0.9) self.decay_rate = 0.0005 if load_pretrained_weight: if isinstance(load_pretrained_weight, bool): load_pretrained_weight = self.__class__.__name__ + '.h5' if not os.path.exists(load_pretrained_weight): download(self.WEIGHT_URL, load_pretrained_weight) self._model.load(load_pretrained_weight) for layer in self._model._network.iter_models(): layer.params = {} if self.num_class != 1000: self._model.params = {} self._freeze()
def test_save(tmpdir_factory): class NN2(rm.Model): def __init__(self): super(NN2, self).__init__() self.layer1 = rm.Dense(output_size=2) self.layer2 = rm.Dense(output_size=2) self.bn = rm.BatchNormalize() def forward(self, x): return self.layer2(self.bn(rm.relu(self.layer1(x)))) class NN3(rm.Model): SERIALIZED = ('AAA', 'BBB') def __init__(self): super(NN3, self).__init__() self.layer1 = NN2() self.layer2 = NN2() self.AAA = 0 def forward(self, x): return self.layer2(rm.relu(self.layer1(x))) nn = NN3() with nn.train(): result = nn(np.random.rand(2, 2)) l = rm.softmax_cross_entropy(result, np.random.rand(2, 2)) grad = l.grad() opt = rm.Sgd() grad.update(opt) nn.layer1.layer1.params.b._auto_update = False d = tmpdir_factory.mktemp('h5') fname = os.path.join(str(d), 'aaa') nn.AAA = 9999 nn.save(fname) nn2 = NN3() nn2.load(fname) assert np.allclose(nn.layer1.layer1.params.w, nn2.layer1.layer1.params.w) assert np.allclose(nn.layer1.layer1.params.b, nn2.layer1.layer1.params.b) assert np.allclose(nn.layer1.layer2.params.w, nn2.layer1.layer2.params.w) assert np.allclose(nn.layer1.layer2.params.b, nn2.layer1.layer2.params.b) assert np.allclose(nn.layer2.layer1.params.w, nn2.layer2.layer1.params.w) assert np.allclose(nn.layer2.layer1.params.b, nn2.layer2.layer1.params.b) assert np.allclose(nn.layer2.layer2.params.w, nn2.layer2.layer2.params.w) assert np.allclose(nn.layer2.layer2.params.b, nn2.layer2.layer2.params.b) assert nn2.layer1.layer1.params.w._auto_update assert not nn2.layer1.layer1.params.b._auto_update assert nn2.AAA == 9999
def test_trainer(): class NN(rm.Model): def __init__(self): super(NN, self).__init__() self.params.value1 = rm.Variable(np.array([1., 2., 3., 4.])) self.params.value2 = rm.Variable(np.array([1., 2., 3., 4.])) def forward(self, v): return v * self.params.value1 * self.params.value2 distributor = NdarrayDistributor( np.array([[1., 2., 3., 4.], [1., 2., 3., 4.]]), np.array([[1., 2., 3., 4.], [1., 2., 3., 4.]])) trainer = Trainer(NN(), num_epoch=10, loss_func=rm.softmax_cross_entropy, batch_size=100, optimizer=rm.Sgd()) l = set() @trainer.events.start def start(trainer): l.add('start') @trainer.events.start_epoch def start_epoch(trainer): l.add('start_epoch') @trainer.events.forward def forward(trainer): l.add('forward') @trainer.events.backward def backward(trainer): l.add('backward') @trainer.events.updated def updated(trainer): l.add('updated') @trainer.events.end_epoch def end_epoch(trainer): l.add('end_epoch') trainer.train(distributor) assert l == set([ 'start', 'start_epoch', 'forward', 'backward', 'updated', 'end_epoch' ])
def __init__(self, class_map=None, imsize=(256, 256), load_pretrained_weight=False, train_whole_network=False): assert not load_pretrained_weight, "Currently pretrained weight of %s is not prepared. Please set False to `load_pretrained_weight` flag." % self.__class__.__name__ super(UNet, self).__init__(class_map, imsize, load_pretrained_weight, train_whole_network, None) self._model = CNN_UNet(len(self.class_map)) self._opt = rm.Sgd(1e-2, 0.9) self.decay_rate = 0.00002 self._freeze()
def __init__(self, class_map=[], imsize=(512, 512), load_pretrained_weight=False, train_whole_network=False): assert not load_pretrained_weight, "Currently pretrained weight of %s is not prepared. Please set False to `load_pretrained_weight` flag." % self.__class__.__name__ if not hasattr(imsize, "__getitem__"): imsize = (imsize, imsize) self.imsize = imsize self.num_class = len(class_map) self.class_map = [c.encode("ascii", "ignore") for c in class_map] self._model = CNN_TernausNet(self.num_class) self._train_whole_network = train_whole_network self._opt = rm.Sgd(4e-3, 0.9) self.decay_rate = 0.00002 self._freeze()
def __init__(self, class_map=None, imsize=(224, 224), load_pretrained_weight=False, train_whole_network=False): self._model = CNN_VGG19() super(VGG19, self).__init__(class_map, imsize, load_pretrained_weight, train_whole_network, self._model) self._opt = rm.Sgd(0.01, 0.9) self.decay_rate = 0.0005 self._model.fc1.params = {} self._model.fc2.params = {} self._model.fc3.params = {}
def __init__(self, class_map=None, cells=7, bbox=2, imsize=(224, 224), load_pretrained_weight=False, train_whole_network=False): if not hasattr(cells, "__getitem__"): cells = (cells, cells) self._cells = cells self._bbox = bbox model = Darknet() super(Yolov1, self).__init__(class_map, imsize, load_pretrained_weight, train_whole_network, model) self._last_dense_size = (self.num_class + 5 * bbox) * cells[0] * cells[1] self._freezed_network = rm.Sequential(model[:-4]) self._network = rm.Sequential([ rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1, stride=2, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True), rm.BatchNormalize(mode='feature'), rm.LeakyRelu(slope=0.1), rm.Flatten(), rm.Dense( 4096 ), # instead of locally connected layer, we are using Dense layer rm.LeakyRelu(slope=0.1), rm.Dropout(0.5), rm.Dense(self._last_dense_size) ]) self._opt = rm.Sgd(0.0005, 0.9)
def __init__(self, class_map=None, anchor=None, imsize=(320, 320), load_pretrained_weight=False, train_whole_network=False): assert (imsize[0] / 32.) % 1 == 0 and (imsize[1] / 32.) % 1 == 0, \ "Yolo v2 only accepts 'imsize' argument which is list of multiple of 32. \ exp),imsize=(320, 320)." self.flag = False # This is used for modify loss function. self.global_counter = 0 self.anchor = [] if not isinstance(anchor, AnchorYolov2) else anchor.anchor self.anchor_size = imsize if not isinstance(anchor, AnchorYolov2) else anchor.imsize self.num_anchor = 0 if anchor is None else len(anchor) darknet = Darknet19(1) self._opt = rm.Sgd(0.001, 0.9) super(Yolov2, self).__init__(class_map, imsize, load_pretrained_weight, train_whole_network, darknet) # Initialize trainable layers. last_channel = (self.num_class + 5) * self.num_anchor self._conv1 = rm.Sequential([ DarknetConv2dBN(channel=1024, prev_ch=1024), DarknetConv2dBN(channel=1024, prev_ch=1024), ]) self._conv21 = DarknetConv2dBN(channel=64, prev_ch=512, filter=1) self._conv2 = DarknetConv2dBN(channel=1024, prev_ch=1024 + 256) self._last = rm.Conv2d(channel=last_channel, filter=1) self._freezed_network = darknet._base for model in [self._conv21, self._conv1, self._conv2]: for layer in model.iter_models(): if not layer.params: continue if isinstance(layer, rm.Conv2d): layer.params = { "w": rm.Variable(layer._initializer(layer.params.w.shape), auto_update=True), "b": rm.Variable(np.zeros_like(layer.params.b), auto_update=False), } elif isinstance(layer, rm.BatchNormalize): layer.params = { "w": rm.Variable(layer._initializer(layer.params.w.shape), auto_update=True), "b": rm.Variable(np.zeros_like(layer.params.b), auto_update=True), }
def __init__(self, class_map=[], imsize=(299, 299), load_pretrained_weight=False, train_whole_network=True): if not hasattr(imsize, "__getitem__"): imsize = (imsize, imsize) self.imsize = imsize self.num_class = len(class_map) self.class_map = [c.encode("ascii", "ignore") for c in class_map] self._train_whole_network = train_whole_network self._model = CNN_InceptionV4(self.num_class) self._opt = rm.Sgd(0.045, 0.9) self.decay_rate = 0.0005 if load_pretrained_weight: if isinstance(load_pretrained_weight, bool): load_pretrained_weight = self.__class__.__name__ + '.h5' if not os.path.exists(load_pretrained_weight): download(self.WEIGHT_URL, load_pretrained_weight) self._model.load(load_pretrained_weight) for layer in self._model._network.iter_models(): layer.params = {}
def __init__(self, class_map=None, imsize=(224, 224), plateau=False, load_pretrained_weight=False, train_whole_network=False): self._model = ResNet(1, Bottleneck, [3, 8, 36, 3]) super(ResNet152, self).__init__(class_map, imsize, load_pretrained_weight, train_whole_network, self._model) self.decay_rate = 0.0001 self._opt = rm.Sgd(0.1, 0.9) # for error plateau self.plateau = plateau self._patience = 15 self._counter = 0 self._min_lr = 1e-6 self._factor = np.sqrt(0.1) self._model.fc.params = {}
def __init__(self, class_map=[], cells=7, bbox=2, imsize=(224, 224), load_pretrained_weight=False, train_whole_network=False): num_class = len(class_map) if not hasattr(cells, "__getitem__"): cells = (cells, cells) if not hasattr(imsize, "__getitem__"): imsize = (imsize, imsize) self.num_class = num_class self.class_map = [c.encode("ascii", "ignore") for c in class_map] self._cells = cells self._bbox = bbox self._last_dense_size = (num_class + 5 * bbox) * cells[0] * cells[1] model = Darknet(self._last_dense_size) self._train_whole_network = train_whole_network self.imsize = imsize self._freezed_network = rm.Sequential(model[:-7]) self._network = rm.Sequential(model[-7:]) self._opt = rm.Sgd(0.01, 0.9) if load_pretrained_weight: if isinstance(load_pretrained_weight, bool): load_pretrained_weight = self.__class__.__name__ + '.h5' if not os.path.exists(load_pretrained_weight): download(self.WEIGHT_URL, load_pretrained_weight) self.load(load_pretrained_weight) for layer in self._network.iter_models(): layer.params = {}
def __init__(self, class_map=[], imsize=(299, 299), load_pretrained_weight=False, train_whole_network=True): assert not load_pretrained_weight, "In ReNomIMG version {}, pretained weight of {} is not prepared.".format( __version__, self.__class__.__name__) if not hasattr(imsize, "__getitem__"): imsize = (imsize, imsize) self.imsize = imsize self.num_class = len(class_map) self.class_map = [c.encode("ascii", "ignore") for c in class_map] self._train_whole_network = train_whole_network self._model = CNN_InceptionV2(self.num_class) self._opt = rm.Sgd(0.045, 0.9) self.decay_rate = 0.0005 if load_pretrained_weight: if isinstance(load_pretrained_weight, bool): load_pretrained_weight = self.__class__.__name__ + '.h5' if not os.path.exists(load_pretrained_weight): download(self.WEIGHT_URL, load_pretrained_weight) self._model.load(load_pretrained_weight) layer._model.aux1.params = {} layer._model.aux2.params = {}
def __init__(self, class_map=None, imsize=(300, 300), overlap_threshold=0.5, load_pretrained_weight=False, train_whole_network=False): if not hasattr(imsize, "__getitem__"): imsize = (imsize, imsize) self.num_class = len(class_map) + 1 self.class_map = [c.encode("ascii", "ignore") for c in class_map] self._train_whole_network = train_whole_network self.prior = create_priors() self.num_prior = len(self.prior) self.overlap_threshold = overlap_threshold self.imsize = imsize vgg = VGG16(class_map, load_pretrained_weight=load_pretrained_weight) self._freezed_network = rm.Sequential( [vgg._model.block1, vgg._model.block2]) self._network = DetectorNetwork(self.num_class, vgg) self._opt = rm.Sgd(1e-3, 0.9)
def __init__(self, class_map=[], imsize=(224, 224), load_pretrained_weight=False, train_whole_network=False): if not hasattr(imsize, "__getitem__"): imsize = (imsize, imsize) self.imsize = imsize self.num_class = len(class_map) self.class_map = [c.encode("ascii", "ignore") for c in class_map] self._model = CNN_FCN8s(self.num_class) self._train_whole_network = train_whole_network self._opt = rm.Sgd(0.001, 0.9) if load_pretrained_weight: vgg16 = VGG16(class_map, load_pretrained_weight=load_pretrained_weight, train_whole_network=train_whole_network) self._model.block1 = vgg16._model.block1 self._model.block2 = vgg16._model.block2 self._model.block3 = vgg16._model.block3 self._model.block4 = vgg16._model.block4 self._model.block5 = vgg16._model.block5
def __init__(self, class_map=[], imsize=(224, 224), cardinality=32, plateau=False, load_pretrained_weight=False, train_whole_network=False): if not hasattr(imsize, "__getitem__"): imsize = (imsize, imsize) self.num_class = len(class_map) self.class_map = [c.encode("ascii", "ignore") for c in class_map] self.imsize = imsize self._train_whole_network = train_whole_network self.decay_rate = 0.0001 self.cardinality = cardinality self._model = ResNeXt(self.num_class, Bottleneck, [3, 4, 23, 3], self.cardinality) self._opt = rm.Sgd(0.1, 0.9) # for error plateau self.plateau = plateau self._patience = 15 self._counter = 0 self._min_lr = 1e-6 self._factor = np.sqrt(0.1) if load_pretrained_weight: if isinstance(load_pretrained_weight, bool): load_pretrained_weight = self.__class__.__name__ + '.h5' if not os.path.exists(load_pretrained_weight): download(self.WEIGHT_URL, load_pretrained_weight) self._model.load(load_pretrained_weight) self._model.fc.params = {}
y_axis = base + noise x_axis = x_axis.reshape(N, 1) y_axis = y_axis.reshape(N, 1) idx = random.permutation(N) train_idx = idx[::2] test_idx = idx[1::2] train_x = x_axis[train_idx] train_y = y_axis[train_idx] test_x = x_axis[test_idx] test_y = y_axis[test_idx] seq_model = rm.Sequential( [rm.Dense(1), rm.Dense(10), rm.Sigmoid(), rm.Dense(1)]) optimizer = rm.Sgd(0.1, momentum=0.5) plt.clf() epoch_splits = 10 epoch_period = epoch // epoch_splits fig, ax = plt.subplots(epoch_splits, 2, figsize=(4, epoch_splits)) curve = [[], []] for e in range(epoch): with seq_model.train(): loss = rm.mean_squared_error(seq_model(train_x), train_y) grad = loss.grad() grad.update(optimizer) curve[0].append(loss.as_ndarray()) loss = rm.mean_squared_error(seq_model(test_x), test_y) curve[1].append(loss.as_ndarray()) if e % epoch_period == epoch_period - 1 or e == epoch:
# labels = f.read().strip().split("\n") # load model print("loading model...") model = Darknet19(classes) backup_file = "%s/backup.h5" % (backup_path) if os.path.isfile(backup_file): model.load(backup_file) #cuda.get_device(0).use() #model.to_gpu() # for gpu trainer = Trainer(model, batch_size=batch_size, loss_func=rm.mean_squared_error, num_epoch=1, optimizer=rm.Sgd(lr=learning_rate, momentum=momentum), num_gpu=num_gpu) # start to train print("start training") for batch in range(max_batches): # generate sample x, t = generator.generate_samples( n_samples=batch_size, n_items=1, crop_width=input_width, crop_height=input_height, min_item_scale=0.1, max_item_scale=0.2, rand_angle=25, minimum_crop=0.8,
def __init__(self, class_map): super(DenseNetBase, self).__init__(class_map) self._opt = rm.Sgd(0.1, 0.9)