Exemple #1
0
 def _propagate(self, Y, dropout=0.):
     blstm = self.blstm_layer(Y, dropout=dropout)
     relu_1 = F.clipped_relu(self.relu_1(blstm, dropout=dropout))
     relu_2 = F.clipped_relu(self.relu_2(relu_1, dropout=dropout))
     N_mask = F.sigmoid(self.noise_mask_estimate(relu_2))
     X_mask = F.sigmoid(self.speech_mask_estimate(relu_2))
     return N_mask, X_mask
Exemple #2
0
 def _propagate(self, Y, dropout=0.):
     blstm = self.blstm_layer(Y, dropout=dropout)
     relu_1 = F.clipped_relu(self.relu_1(blstm, dropout=dropout))
     relu_2 = F.clipped_relu(self.relu_2(relu_1, dropout=dropout))
     N_mask = F.sigmoid(self.noise_mask_estimate(relu_2))
     X_mask = F.sigmoid(self.speech_mask_estimate(relu_2))
     return N_mask, X_mask
    def forward(self, x):
        h = F.clipped_relu(self.bn1(self.conv1(x)), z=6.)
        h = F.clipped_relu(self.bn2(self.conv2(h)), z=6.)
        h = self.bn3(self.conv3(h))
        if self.stride == 1 and self.skip_connection:
            h = h + x

        return h
 def __call__(self, x):
     h = x
     if self.expand_ratio != 1:
         h = F.clipped_relu(self.expand_bn(self.expand_conv(h)), 6.0)
     h = F.clipped_relu(self.depthwise_bn(self.depthwise_conv(h)), 6.0)
     h = self.project_bn(self.project_conv(h))
     if h.shape == x.shape:
         return h + x
     else:
         return h
 def __call__(self, x):
     top, middle, bottom = self.mobilenetv2(x)
     top, middle, bottom = self.globalnet(top, middle, bottom)
     global_vect, global_heat = self.globalloss(top)
     global_vect = self.global_vect_convout(global_vect)
     global_heat = self.global_heat_convout(global_heat)
     refine_concat = self.refinenet(top, middle, bottom)
     refine_vect, refine_heat = self.refineloss(refine_concat)
     refine_vect = self.refine_vect_convout(refine_vect)
     refine_heat = self.refine_heat_convout(refine_heat)
     global_heat = F.clipped_relu(global_heat, 1.1)
     refine_heat = F.clipped_relu(refine_heat, 1.1)
     return [global_vect, refine_vect], [global_heat, refine_heat]
    def __call__(self, x, train=False):
        """
        calculate output of VoxResNet given input x

        Parameters
        ----------
        x : (batch_size, in_channels, xlen, ylen, zlen) ndarray
            image to perform semantic segmentation

        Returns
        -------
        proba: (batch_size, n_classes, xlen, ylen, zlen) ndarray
            probability of each voxel belonging each class
            elif train=True, returns list of logits
        """
        with chainer.using_config("train", train):
            h = self.conv1a(x)
            h = F.relu(self.bnorm1a(h))
            h = self.conv1b(h)
            c1 = F.clipped_relu(self.c1deconv(h))
            c1 = self.c1conv(c1)

            h = F.relu(self.bnorm1b(h))
            h = self.conv1c(h)
            h = self.voxres2(h)
            h = self.voxres3(h)
            c2 = F.clipped_relu(self.c2deconv(h))
            c2 = self.c2conv(c2)

            h = F.relu(self.bnorm3(h))
            h = self.conv4(h)
            h = self.voxres5(h)
            h = self.voxres6(h)
            c3 = F.clipped_relu(self.c3deconv(h))
            c3 = self.c3conv(c3)

            h = F.relu(self.bnorm6(h))
            h = self.conv7(h)
            h = self.voxres8(h)
            h = self.voxres9(h)
            c4 = F.clipped_relu(self.c4deconv(h))
            c4 = self.c4conv(c4)

            c = c1 + c2 + c3 + c4

        if train:
            return [c1, c2, c3, c4, c]
        else:
            return F.softmax(c)
Exemple #7
0
    def __call__(self, x):
        for nth in range(self.layers):
            if getattr(self, 'P' + str(nth)) is None:
                setattr(self, 'P' + str(nth), variable.Variable(
                    self.xp.zeros(self.sizes[nth], dtype=x.data.dtype),
                    volatile='auto'))

        E = [None] * self.layers
        for nth in range(self.layers):
            if nth == 0:
                E[nth] = F.concat((F.relu(x - getattr(self, 'P' + str(nth))),
                                  F.relu(getattr(self, 'P' + str(nth)) - x)))
            else:
                A = F.max_pooling_2d(F.relu(getattr(self, 'ConvA' + str(nth))(E[nth - 1])), 2, stride = 2)
                E[nth] = F.concat((F.relu(A - getattr(self, 'P' + str(nth))),
                                  F.relu(getattr(self, 'P' + str(nth)) - A)))

        R = [None] * self.layers
        for nth in reversed(range(self.layers)):
            if nth == self.layers - 1:
                R[nth] = getattr(self, self.rnn_module + str(nth))((E[nth],))
            else:
                upR = F.unpooling_2d(R[nth + 1], 2, stride = 2, cover_all=False)
                R[nth] = getattr(self, self.rnn_module + str(nth))((E[nth], upR))

            if nth == 0:
                setattr(self, 'P' + str(nth), F.clipped_relu(getattr(self, 'ConvP' + str(nth))(R[nth]), 1.0))
            else:
                setattr(self, 'P' + str(nth), F.relu(getattr(self, 'ConvP' + str(nth))(R[nth])))
        
        return self.P0
Exemple #8
0
    def __call__(self, x):
        for nth in range(self.layers):
            if getattr(self, 'P' + str(nth)) is None:
                setattr(self, 'P' + str(nth), variable.Variable(
                    self.xp.zeros(self.sizes[nth], dtype=x.data.dtype),
                    volatile='auto'))

        E = [None] * self.layers
        for nth in range(self.layers):
            if nth == 0:
                E[nth] = F.concat((F.relu(x - getattr(self, 'P' + str(nth))),
                                  F.relu(getattr(self, 'P' + str(nth)) - x)))
            else:
                A = F.max_pooling_2d(F.relu(getattr(self, 'ConvA' + str(nth))(E[nth - 1])), 2, stride = 2)
                E[nth] = F.concat((F.relu(A - getattr(self, 'P' + str(nth))),
                                  F.relu(getattr(self, 'P' + str(nth)) - A)))

        R = [None] * self.layers
        for nth in reversed(range(self.layers)):
            if nth == self.layers - 1:
                R[nth] = getattr(self, 'ConvLSTM' + str(nth))((E[nth],))
            else:
                upR = F.unpooling_2d(R[nth + 1], 2, stride = 2, cover_all=False)
                R[nth] = getattr(self, 'ConvLSTM' + str(nth))((E[nth], upR))

            if nth == 0:
                setattr(self, 'P' + str(nth), F.clipped_relu(getattr(self, 'ConvP' + str(nth))(R[nth]), 1.0))
            else:
                setattr(self, 'P' + str(nth), F.relu(getattr(self, 'ConvP' + str(nth))(R[nth])))
        
        return self.P0
Exemple #9
0
    def __call__(self, bottom_E, lateral_R):
        """
        bottom_E  : Error from lower layer; E(t,l-1)
        lateral_R : ConvLSTM's output from lateral layer; R(t,l)
        """
        # Target unit
        if self.first_layer == True:
            A = bottom_E
        else:
            A = F.relu(self.tconv(bottom_E))
            A = F.max_pooling_2d(A, ksize=2, stride=2)

        # Prediction unit
        if self.first_layer == True:
            # F.clipped_relu equals SatLU + ReLU
            Ahat = F.clipped_relu(self.pconv(lateral_R), z=self.pixel_max)
        else:
            Ahat = F.relu(self.pconv(lateral_R))

        # Error unit
        E = F.concat((F.relu(Ahat - A), F.relu(A - Ahat)), axis=1)
        if self.first_layer == True:
            return E, Ahat
        else:
            return E
    def __call__(self, x, t=None):
        self.clear()
        h1 = F.leaky_relu(self.conv1(x), slope=0.1)
        h1 = F.leaky_relu(self.conv2(h1), slope=0.1)
        h1 = F.leaky_relu(self.conv3(h1), slope=0.1)

        h2 = self.seranet_v1_crbm(x)
        # Fusion
        h12 = F.concat((h1, h2), axis=1)

        lu = F.leaky_relu(self.convlu6(h12), slope=0.1)
        lu = F.leaky_relu(self.convlu7(lu), slope=0.1)
        lu = F.leaky_relu(self.convlu8(lu), slope=0.1)
        ru = F.leaky_relu(self.convru6(h12), slope=0.1)
        ru = F.leaky_relu(self.convru7(ru), slope=0.1)
        ru = F.leaky_relu(self.convru8(ru), slope=0.1)
        ld = F.leaky_relu(self.convld6(h12), slope=0.1)
        ld = F.leaky_relu(self.convld7(ld), slope=0.1)
        ld = F.leaky_relu(self.convld8(ld), slope=0.1)
        rd = F.leaky_relu(self.convrd6(h12), slope=0.1)
        rd = F.leaky_relu(self.convrd7(rd), slope=0.1)
        rd = F.leaky_relu(self.convrd8(rd), slope=0.1)

        # Splice
        h = CF.splice(lu, ru, ld, rd)

        h = F.leaky_relu(self.conv9(h), slope=0.1)
        h = F.leaky_relu(self.conv10(h), slope=0.1)
        h = F.leaky_relu(self.conv11(h), slope=0.1)
        h = F.clipped_relu(self.conv12(h), z=1.0)
        if self.train:
            self.loss = F.mean_squared_error(h, t)
            return self.loss
        else:
            return h
Exemple #11
0
    def __call__(self, z, test=False, rectifier='clipped_relu'):
        batch = z

        if self.mode == 'convolution':
            batch = F.relu(self.bn6(self.lin(z), test=test))
            n_pics = batch.data.shape[0]
            start_array_shape = (n_pics, ) + calc_fc_size(
                self.img_height, self.img_width)
            batch = F.reshape(batch, start_array_shape)
            batch = F.relu(self.bn5(self.deconv5(batch), test=test))
            batch = F.relu(self.bn4(self.deconv4(batch), test=test))
            batch = F.relu(self.bn3(self.deconv3(batch), test=test))
            batch = F.relu(self.bn2(self.deconv2(batch), test=test))
            batch = self.deconv1(batch)

        elif self.mode == 'linear':
            n_layers = len(self.decode_layers)
            for i in range(n_layers):
                batch = F.relu(getattr(self, 'linear_%i' % i)(batch))
            batch = F.relu(getattr(self, 'linear_%i' % n_layers)(batch))
            batch = F.reshape(
                batch,
                (-1, self.img_height, self.img_width, self.color_channels))
        if rectifier == 'clipped_relu':
            batch = F.clipped_relu(batch, z=1.0)
        elif rectifier == 'sigmoid':
            batch = F.sigmoid(batch)
        else:
            raise NameError(
                "Unsupported rectifier type: %s, must be either 'sigmoid' or 'clipped_relu'."
                % rectifier)

        return batch
Exemple #12
0
    def __call__(self, z, test=False, rectifier='clipped_relu'):
        batch = z

        if self.mode == 'convolution':
            batch = F.relu(self.bn6(self.lin(z), test=test))
            n_pics = batch.data.shape[0]
            start_array_shape = (n_pics,) + calc_fc_size(self.img_height, self.img_width)
            batch = F.reshape(batch, start_array_shape)
            batch = F.relu(self.bn5(self.deconv5(batch), test=test))
            batch = F.relu(self.bn4(self.deconv4(batch), test=test))
            batch = F.relu(self.bn3(self.deconv3(batch), test=test))
            batch = F.relu(self.bn2(self.deconv2(batch), test=test))
            batch = self.deconv1(batch)

        elif self.mode == 'linear':
            n_layers = len(self.decode_layers)
            for i in range(n_layers):
                batch = F.relu(getattr(self, 'linear_%i' % i)(batch))
            batch = F.relu(getattr(self, 'linear_%i' % n_layers)(batch))
            batch = F.reshape(batch, (-1, self.img_height, self.img_width, self.color_channels))
        if rectifier == 'clipped_relu':
            batch = F.clipped_relu(batch, z=1.0)
        elif rectifier == 'sigmoid':
            batch = F.sigmoid(batch)
        else:
            raise NameError(
                "Unsupported rectifier type: %s, must be either 'sigmoid' or 'clipped_relu'."
                % rectifier)

        return batch
Exemple #13
0
    def __call__(self, x, t=None):
        self.clear()
        h1 = F.leaky_relu(self.conv1(x), slope=0.1)
        h1 = F.leaky_relu(self.conv2(h1), slope=0.1)
        h1 = F.leaky_relu(self.conv3(h1), slope=0.1)

        h2 = self.seranet_v1_crbm(x)
        # Fusion
        h12 = F.concat((h1, h2), axis=1)

        lu = F.leaky_relu(self.convlu6(h12), slope=0.1)
        lu = F.leaky_relu(self.convlu7(lu), slope=0.1)
        lu = F.leaky_relu(self.convlu8(lu), slope=0.1)
        ru = F.leaky_relu(self.convru6(h12), slope=0.1)
        ru = F.leaky_relu(self.convru7(ru), slope=0.1)
        ru = F.leaky_relu(self.convru8(ru), slope=0.1)
        ld = F.leaky_relu(self.convld6(h12), slope=0.1)
        ld = F.leaky_relu(self.convld7(ld), slope=0.1)
        ld = F.leaky_relu(self.convld8(ld), slope=0.1)
        rd = F.leaky_relu(self.convrd6(h12), slope=0.1)
        rd = F.leaky_relu(self.convrd7(rd), slope=0.1)
        rd = F.leaky_relu(self.convrd8(rd), slope=0.1)

        # Splice
        h = CF.splice(lu, ru, ld, rd)

        h = F.leaky_relu(self.conv9(h), slope=0.1)
        h = F.leaky_relu(self.conv10(h), slope=0.1)
        h = F.leaky_relu(self.conv11(h), slope=0.1)
        h = F.clipped_relu(self.conv12(h), z=1.0)
        if self.train:
            self.loss = F.mean_squared_error(h, t)
            return self.loss
        else:
            return h
Exemple #14
0
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.clipped_relu(x, self.z)
        self.assertEqual(y.data.dtype, self.dtype)

        y_expect = self.x.clip(0, self.z)

        testing.assert_allclose(y_expect, y.data)
Exemple #15
0
    def __call__(self, x, train=False):
        """
        calculate output of VoxResNet given input x

        Parameters
        ----------
        x : [sample_size, in_channels, xlen, ylen, zlen]
            image to perform semantic segmentation

        Returns
        -------
        logit [sample_size, xlen, ylen, zlen]
            logit to be passed to softmax activation
        """
        h = self.conv1a(x)
        h = F.relu(self.bnorm1a(h, test=not train))
        h = self.conv1b(h)
        c1 = F.clipped_relu(self.c1deconv(h))
        c1 = self.c1conv(c1)

        h = F.relu(self.bnorm1b(h, test=not train))
        h = self.conv1c(h)
        h = self.voxres2(h, train)
        h = self.voxres3(h, train)
        c2 = F.clipped_relu(self.c2deconv(h))
        c2 = self.c2conv(c2)

        h = F.relu(self.bnorm3(h, test=not train))
        h = self.conv4(h)
        h = self.voxres5(h, train)
        h = self.voxres6(h, train)
        c3 = F.clipped_relu(self.c3deconv(h))
        c3 = self.c3conv(c3)

        h = F.relu(self.bnorm6(h, test=not train))
        h = self.conv7(h)
        h = self.voxres8(h, train)
        h = self.voxres9(h, train)
        c4 = F.clipped_relu(self.c4deconv(h))
        c4 = self.c4conv(c4)

        c = c1 + c2 + c3 + c4
        if train:
            return (c1, c2, c3, c4, c)
        else:
            return c
    def check_forward(self, x_data, use_cudnn='always'):
        x = chainer.Variable(x_data)
        with chainer.using_config('use_cudnn', use_cudnn):
            y = functions.clipped_relu(x, self.z)
        self.assertEqual(y.data.dtype, self.dtype)

        y_expect = self.x.clip(0, self.z)

        testing.assert_allclose(y_expect, y.data)
    def forward(self, x):
        h = F.clipped_relu(self.bn1(self.conv1(x)), z=6.)
        for name in self._forward:
            if name == 'block3_1':
                low_level_features = h
            block = getattr(self, name)
            h = block(h)

        return h, low_level_features
Exemple #18
0
    def check_forward(self, x_data, use_cudnn='always'):
        x = chainer.Variable(x_data)
        with chainer.using_config('use_cudnn', use_cudnn):
            y = functions.clipped_relu(x, self.z)
        self.assertEqual(y.data.dtype, self.dtype)

        y_expect = self.x.clip(0, self.z)

        testing.assert_allclose(y_expect, y.data)
Exemple #19
0
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = functions.clipped_relu(x, self.z)
        self.assertEqual(y.data.dtype, numpy.float32)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data, ))
        gx, = gradient_check.numerical_grad(f, (x.data, ), (y.grad, ))

        gradient_check.assert_allclose(gx, x.grad)
Exemple #20
0
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = functions.clipped_relu(x, self.z)
        self.assertEqual(y.data.dtype, numpy.float32)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))

        gradient_check.assert_allclose(gx, x.grad)
def forward(x, tags, model):
    accum_loss = Variable(xp.zeros((), dtype=xp.float32))
    x, c, t = make_sequences(x, tags)
    x = Variable(x)
    leng = len(c)
    c = Variable(xp.reshape(c, (leng * noise_size, window_size)))
    t = Variable(t)
    o = model(x)
    co = model(c)
    co = F.reshape(co, (leng, noise_size, 1))
    o = F.broadcast_to(F.reshape(o, (leng, 1, 1)), (leng, noise_size, 1))
    loss = F.sum(F.clipped_relu(1 - co + o, 1e999))
    return loss
Exemple #22
0
    def __call__(self, x, t=None):
        self.clear()

        h = F.leaky_relu(self.conv1(x), slope=0.1)
        h = F.leaky_relu(self.conv2(h), slope=0.1)
        #h = F.leaky_relu(self.conv3(h), slope=0.1)
        #h = F.leaky_relu(self.conv4(h), slope=0.1)
        h = F.clipped_relu(self.conv3(h), z=1.0)
        if self.train:
            self.loss = F.mean_squared_error(h, t)
            return self.loss
        else:
            return h
Exemple #23
0
    def __call__(self, x, t=None):
        self.clear()

        h = F.leaky_relu(self.conv1(x), slope=0.1)
        h = F.leaky_relu(self.conv2(h), slope=0.1)
        #h = F.leaky_relu(self.conv3(h), slope=0.1)
        #h = F.leaky_relu(self.conv4(h), slope=0.1)
        h = F.clipped_relu(self.conv3(h), z=1.0)
        if self.train:
            self.loss = F.mean_squared_error(h, t)
            return self.loss
        else:
            return h
Exemple #24
0
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.clipped_relu(x, self.z)
        self.assertEqual(y.data.dtype, numpy.float32)

        y_expect = self.x.copy()
        for i in numpy.ndindex(self.x.shape):
            if self.x[i] < 0:
                y_expect[i] = 0
            elif self.x[i] > self.z:
                y_expect[i] = self.z

        gradient_check.assert_allclose(y_expect, y.data)
Exemple #25
0
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.clipped_relu(x, self.z)
        self.assertEqual(y.data.dtype, numpy.float32)

        y_expect = self.x.copy()
        for i in numpy.ndindex(self.x.shape):
            if self.x[i] < 0:
                y_expect[i] = 0
            elif self.x[i] > self.z:
                y_expect[i] = self.z

        gradient_check.assert_allclose(y_expect, y.data)
def forward(x, tags, model):
    accum_loss = Variable(xp.zeros((), dtype=xp.float32))
    x, c, t = make_sequences(x, tags)
    x = Variable(x)
    leng = len(c)
    c = Variable(xp.reshape(c, (leng * noise_size, window_size)))
    t = Variable(t)
    o, p = model(x)
    co, cp = model(c)
    a = F.softmax_cross_entropy(p, t)
    co = F.reshape(co, (leng, noise_size, 1))
    o = F.broadcast_to(F.reshape(o, (leng, 1, 1)), (leng, noise_size, 1))
    b = F.sum(F.clipped_relu(1 - o + co, 1e999))
    accum_loss = (1 - r) * a + r * b
    return accum_loss
Exemple #27
0
    def __call__(self, x, t=None):
        self.clear()
        #x = Variable(x_data)  # x_data.astype(np.float32)

        h = F.leaky_relu(self.conv1(x), slope=0.1)
        h = F.leaky_relu(self.conv2(h), slope=0.1)
        h = F.leaky_relu(self.conv3(h), slope=0.1)
        h = F.leaky_relu(self.conv4(h), slope=0.1)
        h = F.leaky_relu(self.conv5(h), slope=0.1)
        h = F.leaky_relu(self.conv6(h), slope=0.1)
        h = F.clipped_relu(self.conv7(h), z=1.0)
        if self.train:
            self.loss = F.mean_squared_error(h, t)
            return self.loss
        else:
            return h
Exemple #28
0
    def __call__(self, x, t=None):
        self.clear()
        #x = Variable(x_data)  # x_data.astype(np.float32)

        h = F.leaky_relu(self.conv1(x), slope=0.1)
        h = F.leaky_relu(self.conv2(h), slope=0.1)
        h = F.leaky_relu(self.conv3(h), slope=0.1)
        h = F.leaky_relu(self.conv4(h), slope=0.1)
        h = F.leaky_relu(self.conv5(h), slope=0.1)
        h = F.leaky_relu(self.conv6(h), slope=0.1)
        h = F.clipped_relu(self.conv7(h), z=1.0)
        if self.train:
            self.loss = F.mean_squared_error(h, t)
            return self.loss
        else:
            return h
Exemple #29
0
    def __call__(self, x):
        for nth in range(self.layers):
            if getattr(self, 'P' + str(nth)) is None:
                with chainer.using_config('enable_backprop', False):
                    setattr(
                        self, 'P' + str(nth),
                        variable.Variable(
                            self.xp.zeros(self.sizes[nth],
                                          dtype=x.data.dtype)))

        tol = lambda l: [p for p in l.params()]
        E = [None] * self.layers
        for nth in range(self.layers):
            if nth == 0:
                # with name_scope('E_Layer' + str(nth), [getattr(self, 'P' + str(nth))], True):
                E[nth] = F.concat((F.relu(x - getattr(self, 'P' + str(nth))),
                                   F.relu(getattr(self, 'P' + str(nth)) - x)))
            else:
                #with name_scope('E_Layer' + str(nth),
                #                [getattr(self, 'P' + str(nth))] + tol(getattr(self, 'ConvA' + str(nth))), True):
                A = F.max_pooling_2d(F.relu(
                    getattr(self, 'ConvA' + str(nth))(E[nth - 1])),
                                     2,
                                     stride=2)
                E[nth] = F.concat((F.relu(A - getattr(self, 'P' + str(nth))),
                                   F.relu(getattr(self, 'P' + str(nth)) - A)))

        R = [None] * self.layers
        for nth in reversed(range(self.layers)):
            #with name_scope('R_Layer' + str(nth), getattr(self, 'ConvLSTM' + str(nth)).params(), True):
            if nth == self.layers - 1:
                R[nth] = getattr(self, 'ConvLSTM' + str(nth))((E[nth], ))
            else:
                upR = F.unpooling_2d(R[nth + 1], 2, stride=2, cover_all=False)
                R[nth] = getattr(self, 'ConvLSTM' + str(nth))((E[nth], upR))

            #with name_scope('P_Layer' + str(nth), getattr(self, 'ConvP' + str(nth)).params(), True):
            if nth == 0:
                setattr(
                    self, 'P' + str(nth),
                    F.clipped_relu(
                        getattr(self, 'ConvP' + str(nth))(R[nth]), 1.0))
            else:
                setattr(self, 'P' + str(nth),
                        F.relu(getattr(self, 'ConvP' + str(nth))(R[nth])))

        return self.P0
Exemple #30
0
    def __call__(self, bottom_up, top_down=None):
        with cupy.cuda.Device(self.device):
            E = F.concat((F.relu(bottom_up - self.P),
                          F.relu(self.P - bottom_up)))
            if self.istop:
                A = None
                R = self.ConvLSTM((E,))
            else:
                A = F.max_pooling_2d(F.relu(self.ConvA(E)), 2, stride=2)
                unpooled = F.unpooling_2d(top_down, 2,
                                          stride=2, cover_all=False)
                R = self.ConvLSTM((E, unpooled))

            if self.isbottom:
                P = F.clipped_relu(self.ConvP(R), 1.0)
            else:
                P = F.relu(self.ConvP(R))

        self.P = P

        return (A, R)
Exemple #31
0
	def __call__(self, x):
		return F.clipped_relu(x, self.z)
Exemple #32
0
 def forward(self):
     x = chainer.Variable(self.x)
     return functions.clipped_relu(x, self.z)
Exemple #33
0
    def __call__(self, x, t=None):
        self.clear()

        lu = F.leaky_relu(self.convlu1(x), slope=0.1)
        lu = F.leaky_relu(self.convlu2(lu), slope=0.1)
        lu = F.leaky_relu(self.convlu3(lu), slope=0.1)
        lu = F.leaky_relu(self.convlu4(lu), slope=0.1)
        lu = F.leaky_relu(self.convlu5(lu), slope=0.1)

        ru = F.leaky_relu(self.convru1(x), slope=0.1)
        ru = F.leaky_relu(self.convru2(ru), slope=0.1)
        ru = F.leaky_relu(self.convru3(ru), slope=0.1)
        ru = F.leaky_relu(self.convru4(ru), slope=0.1)
        ru = F.leaky_relu(self.convru5(ru), slope=0.1)

        ld = F.leaky_relu(self.convld1(x), slope=0.1)
        ld = F.leaky_relu(self.convld2(ld), slope=0.1)
        ld = F.leaky_relu(self.convld3(ld), slope=0.1)
        ld = F.leaky_relu(self.convld4(ld), slope=0.1)
        ld = F.leaky_relu(self.convld5(ld), slope=0.1)

        rd = F.leaky_relu(self.convrd1(x), slope=0.1)
        rd = F.leaky_relu(self.convrd2(rd), slope=0.1)
        rd = F.leaky_relu(self.convrd3(rd), slope=0.1)
        rd = F.leaky_relu(self.convrd4(rd), slope=0.1)
        rd = F.leaky_relu(self.convrd5(rd), slope=0.1)

        cr = self.crbm1(x)
        cr = self.crbm2(cr)
        cr = self.crbm3(cr)
        cr = self.crbm4(cr)
        cr = self.crbm5(cr)

        # JOIN CR

        lucr = F.concat((lu, cr), axis=1)
        rucr = F.concat((ru, cr), axis=1)
        ldcr = F.concat((ld, cr), axis=1)
        rdcr = F.concat((rd, cr), axis=1)

        lucr = F.leaky_relu(self.convlu6(lucr), slope=0.1)
        lucr = F.leaky_relu(self.convlu7(lucr), slope=0.1)
        lucr = F.leaky_relu(self.convlu8(lucr), slope=0.1)
        lucr = F.leaky_relu(self.convlu9(lucr), slope=0.1)
        lucr = F.leaky_relu(self.convlu10(lucr), slope=0.1)
        lucr = F.clipped_relu(self.convlu11(lucr), z=1.0)

        rucr = F.leaky_relu(self.convru6(rucr), slope=0.1)
        rucr = F.leaky_relu(self.convru7(rucr), slope=0.1)
        rucr = F.leaky_relu(self.convru8(rucr), slope=0.1)
        rucr = F.leaky_relu(self.convru9(rucr), slope=0.1)
        rucr = F.leaky_relu(self.convru10(rucr), slope=0.1)
        rucr = F.clipped_relu(self.convru11(rucr), z=1.0)

        ldcr = F.leaky_relu(self.convld6(ldcr), slope=0.1)
        ldcr = F.leaky_relu(self.convld7(ldcr), slope=0.1)
        ldcr = F.leaky_relu(self.convld8(ldcr), slope=0.1)
        ldcr = F.leaky_relu(self.convld9(ldcr), slope=0.1)
        ldcr = F.leaky_relu(self.convld10(ldcr), slope=0.1)
        ldcr = F.clipped_relu(self.convld11(ldcr), z=1.0)

        rdcr = F.leaky_relu(self.convrd6(rdcr), slope=0.1)
        rdcr = F.leaky_relu(self.convrd7(rdcr), slope=0.1)
        rdcr = F.leaky_relu(self.convrd8(rdcr), slope=0.1)
        rdcr = F.leaky_relu(self.convrd9(rdcr), slope=0.1)
        rdcr = F.leaky_relu(self.convrd10(rdcr), slope=0.1)
        rdcr = F.clipped_relu(self.convrd11(rdcr), z=1.0)

        h = CF.splice(lucr, rucr, ldcr, rdcr)

        if self.train:
            self.loss = F.mean_squared_error(h, t)
            return self.loss
        else:
            return h
 def __call__(self, x):
     return F.clipped_relu(self.bn(self.conv(x)), 6.0)
 def relu6(x):
     return clipped_relu(x, 6.)
Exemple #36
0
 def __call__(self, x):
     return F.clipped_relu(x, self.z)
 def __call__(self, x):
     h = F.clipped_relu(self.depthwise_bn(self.depthwise_conv(x)), 6.0)
     return h
Exemple #38
0
 def forward(self, inputs, device):
     x, = inputs
     y = functions.clipped_relu(x, self.z)
     return y,
Exemple #39
0
 def forward(self):
     x = chainer.Variable(self.x)
     return functions.clipped_relu(x, self.z)
 def f(x):
     return functions.clipped_relu(x, self.z)
Exemple #41
0
 def _propagate(self, Y, dropout=0.):
     relu_1 = F.clipped_relu(self.relu_1(Y, dropout=dropout))
     N_mask = F.sigmoid(self.noise_mask_estimate(relu_1))
     X_mask = F.sigmoid(self.speech_mask_estimate(relu_1))
     return N_mask, X_mask
def relu6(x):
    """ReLU 6 activation function."""
    return F.clipped_relu(x, 6.)
Exemple #43
0
 def f(x):
     return functions.clipped_relu(x, self.z)
Exemple #44
0
 def forward(self, inputs, device):
     x, = inputs
     y = functions.clipped_relu(x, self.z)
     return y,
Exemple #45
0
 def f(x):
     y = functions.clipped_relu(x, self.z)
     return y * y
Exemple #46
0
 def _propagate(self, Y, dropout=0.):
     relu_1 = F.clipped_relu(self.relu_1(Y, dropout=dropout))
     N_mask = F.sigmoid(self.noise_mask_estimate(relu_1))
     X_mask = F.sigmoid(self.speech_mask_estimate(relu_1))
     return N_mask, X_mask
Exemple #47
0
	def __call__(self, x):
		return functions.clipped_relu(x, self.z)
Exemple #48
0
	def __call__(self, x):
		return functions.clipped_relu(x, self.z)