Example #1
0
    def forward(self, x):
        """Performs forward propagation.
        This function can be called using ``__call__`` method.
        See following example of method usage.

        Args:
            x (ndarray, Node): Input image as an tensor.

        Returns:
            (Node): Returns raw output of yolo v1.
            You can reform it to bounding box form using the method ``get_bbox``.

        Example:
            >>> import numpy as np
            >>> from renom_img.api.detection.yolo_v2 import Yolov2
            >>>
            >>> x = np.random.rand(1, 3, 224, 224)
            >>> class_map = ["dog", "cat"]
            >>> model = Yolov2(class_map)
            >>> y = model.forward(x) # Forward propagation.
            >>> y = model(x)  # Same as above result.
            >>>
            >>> bbox = model.get_bbox(y) # The output can be reformed using get_bbox method.

        """

        assert len(self.class_map) > 0, \
            "Class map is empty. Please set the attribute class_map when instantiate model class. " +\
            "Or, please load already trained model using the method 'load()'."
        assert self.num_anchor > 0, \
            "Anchor list is empty. Please calculate anchor list using create_anchor function, before instantiate model class.  " +\
            "Or, please load already trained model using the method 'load()'."

        self._freezed_network.set_auto_update(self.train_whole_network)
        self._freezed_network.set_models(inference=(
            not self.train_whole_network or getattr(self, 'inference', False)))

        h, f = self._freezed_network(x)
        f = self._conv21(f)
        h = self._conv1(h)

        h = self._conv2(rm.concat(h,
                                  rm.concat([f[:, :, i::2, j::2] for i in range(2) for j in range(2)])))

        out = self._last(h)
        # Create yolo format.
        N, C, H, W = h.shape

        reshaped = out.reshape(N, self.num_anchor, -1, W * H)
        conf = rm.sigmoid(reshaped[:, :, 0:1]).transpose(0, 2, 1, 3)
        px = rm.sigmoid(reshaped[:, :, 1:2]).transpose(0, 2, 1, 3)
        py = rm.sigmoid(reshaped[:, :, 2:3]).transpose(0, 2, 1, 3)
        pw = rm.exp(reshaped[:, :, 3:4]).transpose(0, 2, 1, 3)
        ph = rm.exp(reshaped[:, :, 4:5]).transpose(0, 2, 1, 3)
        cl = rm.softmax(reshaped[:, :, 5:].transpose(0, 2, 1, 3))
        return rm.concat(conf, px, py, pw, ph, cl).transpose(0, 2, 1, 3).reshape(N, -1, H, W)
 def forward(self, x, eps=1e-3):
     nb = len(x)
     self.enc(x)
     e = np.random.randn(nb, self.latent_dim)
     self.z = self.enc.zm + rm.exp(self.enc.zlv / 2) * e
     self.decd = self.dec(self.z)
     self.reconE = rm.mean_squared_error(self.decd, x)
     self.kl_loss = -0.5 * rm.sum(1 + self.enc.zlv - self.enc.zm**2 -
                                  rm.exp(self.enc.zlv)) / nb
     return self.decd
 def forward(self, x):
     self.z_mean, self.z_log_var = self.enc(x)
     e = np.random.randn(len(x), self.latent_dim) * 1.
     z_new = self.z_mean + rm.exp(self.z_log_var / 2) * e
     self.decoded = self.dec(z_new)
     nb, zd = self.z_log_var.shape
     self.kl_loss = -0.5 * rm.sum(1 + self.z_log_var - self.z_mean**2 -
                                  rm.exp(self.z_log_var)) / nb
     #self.recon_loss = rm.sigmoid_cross_entropy(self.decoded, x)
     self.recon_loss = rm.mean_squared_error(self.decoded, x)
     vae_loss = self.kl_loss + self.recon_loss
     return vae_loss
    def forward(self, x, eps=1e-3):
        nb = len(x)
        size = (nb, self.latent_dim)
        zp = np.random.randn(np.array(size).prod()).reshape(size).astype('float32')
        self.xp = self.gen(zp)
        self.Dis_xp = self.dis(self.xp)
        self.Dis_xp_is = self.dis.raw_output
        self.Dis_x = self.dis(x)

        self.real_cost = - rm.sum(rm.log(self.Dis_x + eps))/nb
        self.fake_cost = - rm.sum(rm.log(1 - self.Dis_xp + eps))/nb
        self.GAN_loss = self.real_cost + self.fake_cost

        gan_mode = 'non-saturating'
        if gan_mode == 'minmax':
            self.gen_loss = - self.GAN_loss
        elif gan_mode == 'non-saturating':
            self.gen_loss = - rm.sum(rm.log(self.Dis_xp + eps))/nb
        elif gan_mode == 'max-likelihood':
            self.gen_loss = - rm.sum(rm.exp(self.Dis_xp_is))/nb

        return self.GAN_loss
Example #5
0
 def func(node):
     return sum(rm.exp(node))