Esempio n. 1
0
    def where(self, mask, tensor_in_1, tensor_in_2):
        """
        Apply a boolean selection mask to the elements of the input tensors.

        Example:

            >>> import pyhf
            >>> pyhf.set_backend(pyhf.tensor.mxnet_backend())
            >>> pyhf.tensorlib.where(
            ...   pyhf.tensorlib.astensor([1, 0, 1]),
            ...   pyhf.tensorlib.astensor([1, 1, 1]),
            ...   pyhf.tensorlib.astensor([2, 2, 2]))
            ...
            <BLANKLINE>
            [1. 2. 1.]
            <NDArray 3 @cpu(0)>

        Args:
            mask (bool): Boolean mask (boolean or tensor object of booleans)
            tensor_in_1 (Tensor): Tensor object
            tensor_in_2 (Tensor): Tensor object

        Returns:
            MXNet NDArray: The result of the mask being applied to the tensors.
        """
        mask = self.astensor(mask)
        tensor_in_1 = self.astensor(tensor_in_1)
        tensor_in_2 = self.astensor(tensor_in_2)
        return nd.add(
            nd.multiply(mask, tensor_in_1),
            nd.multiply(nd.subtract(1, mask), tensor_in_2),
        )
Esempio n. 2
0
    def where(self, mask, tensor_in_1, tensor_in_2):
        """
        Apply a boolean selection mask to the elements of the input tensors.

        Example::

            >>> where(
                astensor([1, 0, 1]),
                astensor([1, 1, 1]),
                astensor([2, 2, 2]))
            [1. 2. 1.]

        Args:
            mask (bool): Boolean mask (boolean or tensor object of booleans)
            tensor_in_1 (Tensor): Tensor object
            tensor_in_2 (Tensor): Tensor object

        Returns:
            MXNet NDArray: The result of the mask being applied to the tensors.
        """
        mask = self.astensor(mask)
        tensor_in_1 = self.astensor(tensor_in_1)
        tensor_in_2 = self.astensor(tensor_in_2)
        return nd.add(nd.multiply(mask, tensor_in_1),
                      nd.multiply(nd.subtract(1, mask), tensor_in_2))
Esempio n. 3
0
    def forward(self, inputs, begin_state=None): # pylint: disable=arguments-differ
        """Implement the forward computation that the awd language model and cache model use.

        Parameters
        -----------
        inputs : NDArray
            input tensor with shape `(sequence_length, batch_size)`
            when `layout` is "TNC".
        begin_state : list
            initial recurrent state tensor with length equals to num_layers.
            the initial state with shape `(1, batch_size, num_hidden)`

        Returns
        --------
        out: NDArray
            output tensor with shape `(sequence_length, batch_size, input_size)`
            when `layout` is "TNC".
        out_states: list
            output recurrent state tensor with length equals to num_layers.
            the state with shape `(1, batch_size, num_hidden)`
        encoded_raw: list
            The list of outputs of the model's encoder with length equals to num_layers.
            the shape of every encoder's output `(sequence_length, batch_size, num_hidden)`
        encoded_dropped: list
            The list of outputs with dropout of the model's encoder with length equals
            to num_layers. The shape of every encoder's dropped output
            `(sequence_length, batch_size, num_hidden)`
        """
        encoded = self.embedding(inputs)
        if not begin_state:
            begin_state = self.begin_state(batch_size=inputs.shape[1])
        out_states = []
        encoded_raw = []
        encoded_dropped = []
        for i, (e, s) in enumerate(zip(self.encoder, begin_state)):
            encoded, state = e(encoded, s)
            encoded_raw.append(encoded)
            out_states.append(state)
            if self._drop_h and i != len(self.encoder)-1:
                encoded = nd.Dropout(encoded, p=self._drop_h, axes=(0,))
                encoded_dropped.append(encoded)
        if self._dropout:
            encoded = nd.Dropout(encoded, p=self._dropout, axes=(0,))
        encoded_dropped.append(encoded)
        latent = nd.Dropout(self.latent(encoded), p=self._drop_l, axes=(0,))
        logit = self.decoder(latent.reshape(-1, self._embed_size))
        prior_logit = self.prior(encoded).reshape(-1, self._num_experts)
        prior = nd.softmax(prior_logit)
        prob = nd.softmax(logit.reshape(-1, self._vocab_size))
        prob = prob.reshape(-1, self._num_experts, self._vocab_size)
        prob = (prob * prior.expand_dims(2).broadcast_to(prob.shape)).sum(axis=1)
        out = nd.log(nd.add(prob, 1e-8)).reshape(-1, inputs.shape[1], self._vocab_size)
        return out, out_states, encoded_raw, encoded_dropped
Esempio n. 4
0
def test_513():
    # 边缘检测小例
    X = nd.ones((8, 8))
    X[2:6, 2:6] = 0
    print(X)
    '''
    原始图片,任务是进行边缘检测
        [
         [1. 1. 1. 1. 1. 1. 1. 1.]
         [1. 1. 1. 1. 1. 1. 1. 1.]
         [1. 1. 0. 0. 0. 0. 1. 1.]
         [1. 1. 0. 0. 0. 0. 1. 1.]
         [1. 1. 0. 0. 0. 0. 1. 1.]
         [1. 1. 0. 0. 0. 0. 1. 1.]
         [1. 1. 1. 1. 1. 1. 1. 1.]
         [1. 1. 1. 1. 1. 1. 1. 1.]
        ]
     '''
    # 构造kernel
    K1 = nd.array([[1, -1], [1, -1]])
    Y1 = corr2d(X, K1)
    print(Y1)
    '''
    检测纵向边缘    
    [[ 0.  0.  0.  0.  0.  0.  0.]
     [ 0.  1.  0.  0.  0. -1.  0.]
     [ 0.  2.  0.  0.  0. -2.  0.]
     [ 0.  2.  0.  0.  0. -2.  0.]
     [ 0.  2.  0.  0.  0. -2.  0.]
     [ 0.  1.  0.  0.  0. -1.  0.]
     [ 0.  0.  0.  0.  0.  0.  0.]]
     '''
    # 构造kernel
    K2 = nd.array([[1, 1], [-1, -1]])
    Y2 = corr2d(X, K2)
    print(Y2)
    '''
    检测横向边缘    
    [[ 0.  0.  0.  0.  0.  0.  0.]
     [ 0.  1.  2.  2.  2.  1.  0.]
     [ 0.  0.  0.  0.  0.  0.  0.]
     [ 0.  0.  0.  0.  0.  0.  0.]
     [ 0.  0.  0.  0.  0.  0.  0.]
     [ 0. -1. -2. -2. -2. -1.  0.]
     [ 0.  0.  0.  0.  0.  0.  0.]]
    
    '''
    Y3 = nd.add(nd.abs(Y1), nd.abs(Y2))
    print(Y3)
    '''
Esempio n. 5
0
    def forward(self, inputs, begin_state=None):
        """Implement forward computation.

        Parameters
        ----------
        inputs : NDArray
            The training dataset.
        begin_state : list
            The initial hidden states.

        Returns
        -------
        out: NDArray
            The output of the model.
        out_states: list
            The list of output states of the model's encoder.
        """
        encoded = self.embedding(inputs)
        if not begin_state:
            begin_state = self.begin_state(batch_size=inputs.shape[1])
        out_states = []
        encoded_raw = []
        encoded_dropped = []
        for i, (e, s) in enumerate(zip(self.encoder, begin_state)):
            encoded, state = e(encoded, s)
            encoded_raw.append(encoded)
            out_states.append(state)
            if self._drop_h and i != len(self.encoder) - 1:
                encoded = nd.Dropout(encoded, p=self._drop_h, axes=(0, ))
                encoded_dropped.append(encoded)
        if self._dropout:
            encoded = nd.Dropout(encoded, p=self._dropout, axes=(0, ))
        states = out_states
        encoded_dropped.append(encoded)

        latent = nd.Dropout(self.latent(encoded), p=self._drop_l, axes=(0, ))
        logit = self.decoder(latent.reshape(-1, self._embed_size))
        prior_logit = self.prior(encoded).reshape(-1, self._num_experts)
        prior = nd.softmax(prior_logit)

        prob = nd.softmax(logit.reshape(-1, self._vocab_size))
        prob = prob.reshape(-1, self._num_experts, self._vocab_size)
        prob = (prob *
                prior.expand_dims(2).broadcast_to(prob.shape)).sum(axis=1)
        out = nd.log(nd.add(prob, 1e-8)).reshape(-1, inputs.shape[1],
                                                 self._vocab_size)

        return out, out_states, encoded_raw, encoded_dropped
Esempio n. 6
0
def fusionFMaps(lMap, sMap, upconv_ksize=3, method='upconv'):
    # lMap/sMap stand for large/small feature maps
    # methods: 'upconv', 'lin_interpol'
    s_channels = sMap.shape[1]
    l_channels = lMap.shape[1]
    # if s_channels != l_channels:
    #     raise ValueError("ERROR [jcy checkpoint]: Inconsistent feature-map channels."
    #                      " Check the channels of neighboring layers. ")
    if method == 'upconv':
        upconver = nn.HybridSequential()
        upconver.add(
            nn.Conv2DTranspose(channels=l_channels,
                               kernel_size=upconv_ksize,
                               activation='relu'),
            nn.BatchNorm(in_channels=l_channels))
        upconver.initialize(
            ctx=mx.gpu())  # how to init? should I make the params trainable?
        upconv_sMap = upconver(sMap)
        # TODO: Modify this. Figure out a way to deal with size problem brought by pooling
        upconv_sMap = nd.contrib.BilinearResize2D(data=upconv_sMap,
                                                  height=lMap.shape[-2],
                                                  width=lMap.shape[-1])
    elif method == 'bilinear':
        upconv_sMap = nd.contrib.BilinearResize2D(data=sMap,
                                                  height=lMap.shape[-2],
                                                  width=lMap.shape[-1])
        # NO !! We must unify the feature channels of the up-down path! Or the color would be eliminated.
        # consider re-enable this when things are done and you are ready for the training of
        # the params in upconv blks
        # ^
        # ^ this is not a problem asshole. Do you think that color images are special?

    else:
        raise Exception(
            "ERROR! [jcy checkpoint]: Unexpected enlarging method.")

    res = nd.add(lMap + upconv_sMap) / 2  # add large fmap with the smaller one
    res = res / nd.max(res)
    # return (res, upconv_sMap)
    return res
Esempio n. 7
0
    def forward(self,
                inputs,
                begin_state=None,
                token_types=None,
                valid_length=None,
                masked_positions=None):  # pylint: disable=arguments-differ
        """Implement the forward computation that the awd language model and cache model use.

        Parameters
        -----------
        inputs : NDArray
            input tensor with shape `(sequence_length, batch_size)`
            when `layout` is "TNC".
        begin_state : list
            initial recurrent state tensor with length equals to num_layers.
            the initial state with shape `(1, batch_size, num_hidden)`
        token_types: NDArray
            input token type tensor, shape (batch_size, seq_length).
            If the inputs contain two sequences, then the token type of the first
            sequence differs from that of the second one.
        valid_length: NDArray
            optional tensor of input sequence valid lengths, shape (batch_size,)
        masked_positions: optional tensor of position of tokens for masked LM decoding,
            shape (batch_size, num_masked_positions).

        Returns
        --------
        out: NDArray
            output tensor with shape `(sequence_length, batch_size, input_size)`
            when `layout` is "TNC".
        out_states: list
            output recurrent state tensor with length equals to num_layers.
            the state with shape `(1, batch_size, num_hidden)`
        encoded_raw: list
            The list of outputs of the model's encoder with length equals to num_layers.
            the shape of every encoder's output `(sequence_length, batch_size, num_hidden)`
        encoded_dropped: list
            The list of outputs with dropout of the model's encoder with length equals
            to num_layers. The shape of every encoder's dropped output
            `(sequence_length, batch_size, num_hidden)`
        """
        batch_size = inputs.shape[1]
        inputs = nd.transpose(inputs, axes=(1, 0))
        if token_types is None:
            token_types = nd.zeros_like(inputs)
        encoded = self.embedding(inputs,
                                 token_types=token_types,
                                 valid_length=valid_length,
                                 masked_positions=masked_positions)
        encoded = nd.transpose(encoded, axes=(1, 0, 2))
        encoded = nd.Dropout(encoded, p=self._drop_i, axes=(0, ))
        if not begin_state:
            begin_state = self.begin_state(batch_size=batch_size)
        out_states = []
        encoded_raw = []
        encoded_dropped = []
        for i, (e, s) in enumerate(zip(self.encoder, begin_state)):
            encoded, state = e(encoded, s)
            encoded_raw.append(encoded)
            out_states.append(state)
            if i != len(self.encoder) - 1:
                encoded = nd.Dropout(encoded, p=self._drop_h, axes=(0, ))
                encoded_dropped.append(encoded)
        encoded = nd.Dropout(encoded, p=self._dropout, axes=(0, ))
        encoded_dropped.append(encoded)
        #use mos
        latent = nd.Dropout(self.latent(encoded), p=self._drop_l, axes=(0, ))
        logit = self.decoder(latent.reshape(-1, self._embed_size))

        prior_logit = self.prior(encoded).reshape(-1, self._num_experts)
        prior = nd.softmax(prior_logit, axis=-1)

        prob = nd.softmax(logit.reshape(-1, self._vocab_size), axis=-1)
        prob = prob.reshape(-1, self._num_experts, self._vocab_size)
        prob = (prob *
                prior.expand_dims(2).broadcast_to(prob.shape)).sum(axis=1)

        out = nd.log(nd.add(prob, 1e-8)).reshape(-1, batch_size,
                                                 self._vocab_size)

        return out, out_states, encoded_raw, encoded_dropped
Esempio n. 8
0
class RepulsionLoss(gluon.Block):
    def __init__(self, iou_thresh=0.5, sigma=0.5, epo=0.1, **kwargs):
        super(RepulsionLoss, self).__init__(**kwargs)
        self.iou_thresh = iou_thresh
        self.sigma = sigma
        self.epo = epo

    def Smooth_Ln(self, x, sigma):
        large = np.where(x > sigma)
        small = np.where(x <= sigma)

        large = x[large]
        small = x[small]

        large = np.sum((large - sigma) / (1 - sigma) - np.log(1 - sigma))
        small = np.sum(-np.log(1 - small))

        return (large + small)

    def forward(self,
                cls_preds,
                box_preds,
                cls_targets,
                box_targets,
                loss=None):
        RepLoss = []
        all_box_gt = box_targets[0].asnumpy()
        all_box_pred = box_preds[0].asnumpy()
        for i in range(all_box_pred.shape[0]):
            #filter out all zero rows(mainly gt)
            nonzero_boxgt_index = np.where(
                all_box_gt[i][:, 0] != all_box_gt[i][:, 2])
            nonzero_boxpred_index = np.where(
                all_box_pred[i][:, 0] != all_box_pred[i][:, 2])

            nonzero_box_gt = all_box_gt[i][nonzero_boxgt_index][:, 0:4]
            nonzero_box_pred = all_box_pred[i][nonzero_boxpred_index][:, 0:4]

            #calculate iou
            _iou = bbox_iou(nonzero_box_pred, nonzero_box_gt)

            # select positive proposals
            pos_index = np.where(np.max(_iou, axis=1) >= self.iou_thresh)
            _iou = _iou[pos_index]
            #for each positive proposals keep its top two iou with targets
            sort_index = _iou.argsort(axis=1)[:, -2:]
            iog = []
        for _i in range(len(sort_index)):
            tmp = _iou[_i, sort_index[_i]]
            iog.append(tmp)
        iog = np.array(iog)
        if iog.shape[0] == 0:
            RepGT = 0
            RepBo = 0
        else:
            #RepulsionGT
            RepGT = self.Smooth_Ln(iog[:, 0], self.sigma) / iog.shape[0]
            #for each ground truth keep only the proposal with highest iou
            pos_gt_prop_index = np.argmax(_iou, axis=0)
            pos_gt_prop = np.array([
                nonzero_box_pred[pos_gt_prop_index],
                nonzero_box_pred[pos_gt_prop_index]
            ])
            # RepulsionBox
            box_l = np.array([])
            total_iou = np.array([])
        for row in range(len(pos_gt_prop[0]) - 1):
            curr = pos_gt_prop[0][row].reshape(1, -1)
            rest = pos_gt_prop[1][row + 1:]
            _bbox_iou = bbox_iou(curr, rest)
            box_l = np.hstack((box_l, [self.Smooth_Ln(_bbox_iou, self.sigma)]))
            total_iou = np.hstack((total_iou, [np.sum(_bbox_iou)]))
        RepBo = np.sum(box_l) / (np.sum(total_iou) + self.epo)
        RepLoss.append(RepGT + RepBo)

    RepLoss = [nd.array(RepLoss, ctx=mx.gpu(0))]
    if loss:
        sum_loss, cls_loss, box_loss = loss(cls_preds, box_preds, cls_targets,
                                            box_targets)  #TODO:YOLO-VERSION
        return nd.add(RepLoss[0], sum_loss[0]), cls_loss, box_loss
    else:
        return RepLoss, 0, 0
Esempio n. 9
0
    def nodeforward(self, x, cs, hs, ctx):
        x = nd.reshape(x, (self.dim_h, ))
        _Ui = nd.zeros((self.dim_h, ), ctx=ctx)
        _Uo = nd.zeros((self.dim_h, ), ctx=ctx)
        _Uu = nd.zeros((self.dim_h, ), ctx=ctx)
        _Uf = [nd.zeros((self.dim_h, ), ctx=ctx) for i in range(len(cs))]

        for idx in range(len(cs)):
            _Ui = nd.add(_Ui, nd.dot(self.Uis[idx].data(), hs[idx]))
            _Uo = nd.add(_Uo, nd.dot(self.Uos[idx].data(), hs[idx]))
            _Uu = nd.add(_Uu, nd.dot(self.Uus[idx].data(), hs[idx]))
            for j in range(len(cs)):
                _Uf[idx] = nd.add(_Uf[idx],
                                  nd.dot(self.Ufs[idx][j].data(), hs[j]))

        i = nd.sigmoid(
            nd.add(nd.add(nd.dot(self.Wi.data(), x), _Ui), self.bi.data()))
        o = nd.sigmoid(
            nd.add(nd.add(nd.dot(self.Wo.data(), x), _Uo), self.bo.data()))
        f = [
            nd.sigmoid(
                nd.add(nd.add(nd.dot(self.Wf.data(), x), _Uf[idx]),
                       self.bf.data())) for idx in range(len(cs))
        ]
        u = nd.tanh(
            nd.add(nd.add(nd.dot(self.Wu.data(), x), _Uu), self.bu.data()))

        c = nd.zeros((self.dim_h, ), ctx=ctx)
        for idx in range(len(cs)):
            c = nd.add(c, nd.multiply(f[idx], cs[idx]))
        c = nd.add(nd.multiply(i, u), c)

        h = nd.multiply(o, nd.tanh(c))
        return c, h
Esempio n. 10
0
    def dense_fw(self, x):
        """Fully connected layer forward process."""
        self.z = nd.add(nd.dot(x, self.W), self.b)
        self.output = self.act(self.z)

        return self.output