Exemplo n.º 1
0
    def variable_recurrent(self, x, h, seq_length):
        time_step = range(x.shape[0])
        h_t = h
        if self.is_lstm:
            hidden_size = h[0].shape[-1]
            zero_output = P.ZerosLike()(h_t[0])
        else:
            hidden_size = h.shape[-1]
            zero_output = P.ZerosLike()(h_t)

        seq_length = P.BroadcastTo((hidden_size, -1))(seq_length)
        seq_length = P.Transpose()(seq_length, (1, 0))

        outputs = []
        state_t = h_t
        for t in time_step:
            h_t = self.cell(x[t], state_t)
            seq_cond = seq_length > t
            if self.is_lstm:
                state_t_0 = P.Select()(seq_cond, h_t[0], state_t[0])
                state_t_1 = P.Select()(seq_cond, h_t[1], state_t[1])
                output = P.Select()(seq_cond, h_t[0], zero_output)
                state_t = (state_t_0, state_t_1)
            else:
                state_t = P.Select()(seq_cond, h_t, state_t)
                output = P.Select()(seq_cond, h_t, zero_output)
            outputs.append(output)
        outputs = P.Stack()(outputs)
        return outputs, state_t
Exemplo n.º 2
0
    def variable_recurrent(self, x, h, seq_length, w_ih, w_hh, b_ih, b_hh):
        '''recurrent steps with sequence length'''
        time_step = x.shape[0]
        h_t = h
        if self.is_lstm:
            hidden_size = h[0].shape[-1]
            zero_output = P.ZerosLike()(h_t[0])
        else:
            hidden_size = h.shape[-1]
            zero_output = P.ZerosLike()(h_t)
        seq_length = P.Cast()(seq_length, mindspore.float32)
        seq_length = P.BroadcastTo((hidden_size, -1))(seq_length)
        seq_length = P.Cast()(seq_length, mindspore.int32)
        seq_length = P.Transpose()(seq_length, (1, 0))

        outputs = []
        state_t = h_t
        t = 0
        while t < time_step:
            x_t = x[t:t + 1:1]
            x_t = P.Squeeze(0)(x_t)
            h_t = self.cell(x_t, state_t, w_ih, w_hh, b_ih, b_hh)
            seq_cond = seq_length > t
            if self.is_lstm:
                state_t_0 = P.Select()(seq_cond, h_t[0], state_t[0])
                state_t_1 = P.Select()(seq_cond, h_t[1], state_t[1])
                output = P.Select()(seq_cond, h_t[0], zero_output)
                state_t = (state_t_0, state_t_1)
            else:
                state_t = P.Select()(seq_cond, h_t, state_t)
                output = P.Select()(seq_cond, h_t, zero_output)
            outputs.append(output)
            t += 1
        outputs = P.Stack()(outputs)
        return outputs, state_t
Exemplo n.º 3
0
    def __init__(self, bins=10, momentum=0.0, mu=0.02):
        super(GHMRLoss, self).__init__()
        self.bins = bins
        self.momentum = momentum
        self.mu = mu
        edges_left = np.array([float(x) / bins for x in range(bins)], dtype=np.float32)
        self.edges_left = Tensor(edges_left.reshape((bins, 1, 1, 1, 1)))
        edges_right = np.array([float(x) / bins for x in range(1, bins + 1)], dtype=np.float32)
        edges_right[-1] += 1e-4
        self.edges_right = Tensor(edges_right.reshape((bins, 1, 1, 1, 1)))

        if momentum >= 0:
            self.acc_sum = Parameter(initializer(0, [bins], mstype.float32))

        self.abs = ops.Abs()
        self.sqrt = ops.Sqrt()
        self.cast = ops.Cast()
        self.select = ops.Select()
        self.reshape = ops.Reshape()
        self.reduce_sum = ops.ReduceSum()
        self.max = ops.Maximum()
        self.less = ops.Less()
        self.equal = ops.Equal()
        self.greater = ops.Greater()
        self.logical_and = ops.LogicalAnd()
        self.greater_equal = ops.GreaterEqual()
        self.zeros_like = ops.ZerosLike()
        self.expand_dims = ops.ExpandDims()
Exemplo n.º 4
0
    def construct(self, x, seq_lengths):
        """Defines the ReverseSequence operator computation performed."""
        batch_size = x.shape[self.batch_dim]
        max_seq_len = x.shape[self.seq_dim]
        seq_lens_type = seq_lengths.dtype

        back = ops.Sub()(seq_lengths, ops.OnesLike()(seq_lengths))

        batch_idx = self.make_shape((batch_size, max_seq_len), seq_lens_type,
                                    0)
        forward_idx = self.make_shape((batch_size, max_seq_len), seq_lens_type,
                                      1)

        back = back.view(-1, 1)
        reverse_idx = ops.Sub()(back, forward_idx)

        condition = ops.Less()(reverse_idx, ops.ZerosLike()(reverse_idx))
        reverse_idx = ops.Select()(condition, forward_idx, reverse_idx)

        reverse_idx = ops.ExpandDims()(reverse_idx, 2)
        batch_idx = ops.ExpandDims()(batch_idx, 2)

        if self.batch_dim > self.seq_dim:
            batch_idx = ops.Transpose()(batch_idx, (1, 0, 2))
            reverse_idx = ops.Transpose()(reverse_idx, (1, 0, 2))
            x = ops.Transpose()(x, (1, 0, 2))
        start_indices = ops.Concat(2)((batch_idx, reverse_idx))

        output = ops.GatherNd()(x, start_indices)

        return output
Exemplo n.º 5
0
 def __init__(self, reduction="mean"):
     super(D_Loss, self).__init__(reduction)
     self.sig = SigmoidCrossEntropyWithLogits()
     self.ones = ops.OnesLike()
     self.zeros = ops.ZerosLike()
     self.LAMBDA_Dis = args.LAMBDA_Dis
Exemplo n.º 6
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
grad accumulation cell wrapper
"""
import numpy as np
import mindspore.common.dtype as mstype
from mindspore import ops, context, Tensor, Parameter
from mindspore.nn import Cell, TrainOneStepCell, TrainOneStepWithLossScaleCell
from mindspore.nn.wrap.loss_scale import _grad_scale
from mindspore.common.initializer import initializer
from mindspore.ops.operations.comm_ops import _VirtualDataset

zeroslike = ops.ZerosLike()
reset_accu_grads = ops.MultitypeFuncGraph("reset_accu_grads")


@reset_accu_grads.register("Tensor")
def _reset_accu_grads(accu_grad):
    succ = True
    return ops.depend(succ, ops.assign(accu_grad, zeroslike(accu_grad)))


cast = ops.Cast()
update_accu_grads = ops.MultitypeFuncGraph("update_accu_grads")


@update_accu_grads.register("Tensor", "Tensor")
def _update_accu_grads(accu_grad, grad):