コード例 #1
0
    def __init__(self,
                 input_size,
                 batch_size=64,
                 hidden_size=512,
                 num_classes=11):
        super(StackedRNNForCPU, self).__init__()
        self.batch_size = batch_size
        self.input_size = input_size
        k = (1 / hidden_size)**0.5
        self.w1 = Parameter(
            np.random.uniform(-k, k,
                              (4 * hidden_size *
                               (input_size + hidden_size + 1), 1, 1)).astype(
                                   np.float32))
        self.w2 = Parameter(
            np.random.uniform(-k, k, (4 * hidden_size * (2 * hidden_size + 1),
                                      1, 1)).astype(np.float32))
        self.h = Tensor(
            np.zeros(shape=(1, batch_size, hidden_size)).astype(np.float32))
        self.c = Tensor(
            np.zeros(shape=(1, batch_size, hidden_size)).astype(np.float32))

        self.lstm_1 = nn.LSTMCell(input_size=input_size,
                                  hidden_size=hidden_size)
        self.lstm_2 = nn.LSTMCell(input_size=hidden_size,
                                  hidden_size=hidden_size)

        self.fc = nn.Dense(in_channels=hidden_size, out_channels=num_classes)
        self.transpose = P.Transpose()
コード例 #2
0
ファイル: lstm.py プロジェクト: shirley18411/book
    def __init__(self,
                 input_size,
                 hidden_size,
                 num_layers=1,
                 has_bias=True,
                 batch_first=False,
                 dropout=0.0,
                 bidirectional=False):
        super(StackLSTM, self).__init__()
        self.num_layers = num_layers
        self.batch_first = batch_first
        self.transpose = P.Transpose()

        # direction number
        num_directions = 2 if bidirectional else 1

        # input_size list
        input_size_list = [input_size]
        for i in range(num_layers - 1):
            input_size_list.append(hidden_size * num_directions)

        # layers
        layers = []
        for i in range(num_layers):
            layers.append(
                nn.LSTMCell(input_size=input_size_list[i],
                            hidden_size=hidden_size,
                            has_bias=has_bias,
                            batch_first=batch_first,
                            bidirectional=bidirectional,
                            dropout=dropout))

        # weights
        weights = []
        for i in range(num_layers):
            # weight size
            weight_size = (input_size_list[i] +
                           hidden_size) * num_directions * hidden_size * 4
            if has_bias:
                bias_size = num_directions * hidden_size * 4
                weight_size = weight_size + bias_size

            # numpy weight
            stdv = 1 / math.sqrt(hidden_size)
            w_np = np.random.uniform(-stdv, stdv,
                                     (weight_size, 1, 1)).astype(np.float32)

            # lstm weight
            weights.append(
                Parameter(initializer(Tensor(w_np), w_np.shape),
                          name="weight" + str(i)))

        #
        self.lstms = layers
        self.weight = ParameterTuple(tuple(weights))
コード例 #3
0
ファイル: lstm.py プロジェクト: tristonerRL/mindspore
 def __init__(self,
              input_size,
              hidden_size,
              num_layers=1,
              has_bias=True,
              batch_first=False,
              dropout=0,
              bidirectional=False):
     super(LSTM, self).__init__()
     self.input_size = input_size
     self.hidden_size = hidden_size
     self.num_layers = num_layers
     self.has_bias = has_bias
     self.batch_first = validator.check_value_type("batch_first",
                                                   batch_first, [bool],
                                                   self.cls_name)
     self.hidden_size = validator.check_integer("hidden_size", hidden_size,
                                                0, Rel.GT, self.cls_name)
     self.num_layers = validator.check_integer("num_layers", num_layers, 0,
                                               Rel.GT, self.cls_name)
     self.dropout = float(dropout)
     self.bidirectional = bidirectional
     if self.batch_first:
         self.transpose1 = P.Transpose()
         self.transpose2 = P.Transpose()
     num_directions = 2 if self.bidirectional else 1
     self.cpu_target = False
     enable_debug = context.get_context("enable_debug_runtime")
     if context.get_context("device_target") == "CPU" and not enable_debug:
         self.cpu_target = True
     if not self.cpu_target:
         self.lstm = P.LSTM(input_size=self.input_size,
                            hidden_size=self.hidden_size,
                            num_layers=self.num_layers,
                            has_bias=self.has_bias,
                            bidirectional=self.bidirectional,
                            dropout=self.dropout)
         weight_size = 0
         gate_size = 4 * self.hidden_size
         for layer in range(self.num_layers):
             input_layer_size = self.input_size if layer == 0 else self.hidden_size * num_directions
             increment_size = gate_size * input_layer_size
             increment_size += gate_size * self.hidden_size
             if self.has_bias:
                 increment_size += 2 * gate_size
             weight_size += increment_size * num_directions
         stdv = 1 / math.sqrt(hidden_size)
         w_np = np.random.uniform(-stdv, stdv,
                                  (weight_size, 1, 1)).astype(np.float32)
         self.weight = Parameter(initializer(Tensor(w_np),
                                             [weight_size, 1, 1]),
                                 name='weight')
     else:
         input_size_list = []
         input_size_list.append(self.input_size)
         for i in range(self.num_layers - 1):
             input_size_list.append(self.hidden_size * num_directions)
         weights = []
         layers = []
         bias_size = 0 if not self.has_bias else num_directions * self.hidden_size * 4
         stdv = 1 / math.sqrt(hidden_size)
         for i in range(num_layers):
             weight_size = (input_size_list[i] + self.hidden_size
                            ) * num_directions * self.hidden_size * 4
             if has_bias:
                 weight_size = weight_size + bias_size
             w_np = np.random.uniform(
                 -stdv, stdv, (weight_size, 1, 1)).astype(np.float32)
             weights.append(
                 Parameter(initializer(Tensor(w_np), w_np.shape),
                           name='weight' + str(i)))
             layers.append(
                 nn.LSTMCell(input_size=input_size_list[i],
                             hidden_size=self.hidden_size,
                             has_bias=self.has_bias,
                             bidirectional=self.bidirectional,
                             dropout=self.dropout))
         self.lstms = layers
         self.weight = ParameterTuple(tuple(weights))
     self.fill = P.Fill()
     self.shape = P.Shape()
コード例 #4
0
 def __init__(self,
              input_size,
              hidden_size,
              num_layers=1,
              has_bias=True,
              batch_first=False,
              dropout=0,
              bidirectional=False):
     super(LSTM, self).__init__()
     self.input_size = input_size
     self.hidden_size = hidden_size
     self.num_layers = num_layers
     self.has_bias = has_bias
     self.batch_first = validator.check_value_type("batch_first",
                                                   batch_first, [bool],
                                                   self.cls_name)
     self.dropout = float(dropout)
     self.bidirectional = bidirectional
     if self.batch_first:
         self.transpose1 = P.Transpose()
         self.transpose2 = P.Transpose()
     num_directions = 2 if self.bidirectional else 1
     self.cpu_target = False
     if context.get_context("device_target") == "CPU":
         self.cpu_target = True
     if not self.cpu_target:
         self.lstm = P.LSTM(input_size=self.input_size,
                            hidden_size=self.hidden_size,
                            num_layers=self.num_layers,
                            has_bias=self.has_bias,
                            bidirectional=self.bidirectional,
                            dropout=self.dropout)
         weight_size = 0
         gate_size = 4 * self.hidden_size
         for layer in range(self.num_layers):
             input_layer_size = self.input_size if layer == 0 else self.hidden_size * num_directions
             increment_size = gate_size * input_layer_size
             increment_size += gate_size * self.hidden_size
             if self.has_bias:
                 increment_size += 2 * gate_size
             weight_size += increment_size * num_directions
         self.weight = Parameter(initializer(0.0, [weight_size, 1, 1]),
                                 name='weight')
     else:
         layer = []
         layer.append(
             nn.LSTMCell(input_size=self.input_size,
                         hidden_size=self.hidden_size,
                         layer_index=0,
                         has_bias=self.has_bias,
                         bidirectional=self.bidirectional,
                         dropout=self.dropout))
         for i in range(num_layers - 1):
             layer.append(
                 nn.LSTMCell(input_size=self.hidden_size * num_directions,
                             hidden_size=self.hidden_size,
                             layer_index=i + 1,
                             has_bias=self.has_bias,
                             bidirectional=self.bidirectional,
                             dropout=self.dropout))
         self.lstms = layer
     self.fill = P.Fill()
     self.shape = P.Shape()
コード例 #5
0
ファイル: deepspeech2.py プロジェクト: louis100/mindspore
    def __init__(self,
                 batch_size,
                 input_size,
                 hidden_size,
                 num_layers,
                 bidirectional=False,
                 batch_norm=False,
                 rnn_type='LSTM'):
        super(BatchRNN, self).__init__()
        self.batch_size = batch_size
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.rnn_type = rnn_type
        self.bidirectional = bidirectional
        self.has_bias = True
        self.is_batch_norm = batch_norm
        self.num_directions = 2 if bidirectional else 1
        self.reshape_op = P.Reshape()
        self.shape_op = P.Shape()
        self.sum_op = P.ReduceSum()

        input_size_list = [input_size]
        for i in range(num_layers - 1):
            input_size_list.append(hidden_size)
        layers = []

        for i in range(num_layers):
            layers.append(
                nn.LSTMCell(input_size=input_size_list[i],
                            hidden_size=hidden_size,
                            bidirectional=bidirectional,
                            has_bias=self.has_bias))

        weights = []
        for i in range(num_layers):
            weight_size = (input_size_list[i] +
                           hidden_size) * hidden_size * self.num_directions * 4
            if self.has_bias:
                bias_size = self.num_directions * hidden_size * 4 * 2
                weight_size = weight_size + bias_size

            stdv = 1 / math.sqrt(hidden_size)
            w_np = np.random.uniform(-stdv, stdv,
                                     (weight_size, 1, 1)).astype(np.float32)

            weights.append(
                Parameter(initializer(Tensor(w_np), w_np.shape),
                          name="weight" + str(i)))

        self.h, self.c = self.stack_lstm_default_state(
            batch_size,
            hidden_size,
            num_layers=num_layers,
            bidirectional=bidirectional)
        self.lstms = layers
        self.weight = ParameterTuple(tuple(weights))

        if batch_norm:
            batch_norm_layer = []
            for i in range(num_layers - 1):
                batch_norm_layer.append(nn.BatchNorm1d(hidden_size))
            self.batch_norm_list = batch_norm_layer
コード例 #6
0
ファイル: lstm.py プロジェクト: windhooked/mindspore
    def __init__(self,
                 input_size,
                 hidden_size,
                 num_layers=1,
                 has_bias=True,
                 batch_first=False,
                 dropout=0,
                 bidirectional=False):
        super(LSTM, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.has_bias = has_bias
        self.batch_first = validator.check_value_type("batch_first", batch_first, [bool], self.cls_name)
        self.dropout = float(dropout)
        self.bidirectional = bidirectional
        if self.batch_first:
            self.transpose1 = P.Transpose()
            self.transpose2 = P.Transpose()
        num_directions = 2 if self.bidirectional else 1
        self.cpu_target = False
        if context.get_context("device_target") == "CPU":
            self.cpu_target = True
        if not self.cpu_target:
            self.lstm = P.LSTM(input_size=self.input_size,
                               hidden_size=self.hidden_size,
                               num_layers=self.num_layers,
                               has_bias=self.has_bias,
                               bidirectional=self.bidirectional,
                               dropout=self.dropout)
            weight_size = 0
            gate_size = 4 * self.hidden_size
            for layer in range(self.num_layers):
                input_layer_size = self.input_size if layer == 0 else self.hidden_size * num_directions
                increment_size = gate_size * input_layer_size
                increment_size += gate_size * self.hidden_size
                if self.has_bias:
                    increment_size += 2 * gate_size
                weight_size += increment_size * num_directions
            self.weight = Parameter(initializer(0.0, [weight_size, 1, 1]), name='weight')
        else:
            input_size_list = []
            input_size_list.append(self.input_size)
            for i in range(self.num_layers - 1):
                input_size_list.append(self.hidden_size * num_directions)
            weights = []
            layers = []
            bias_size = 0 if not self.has_bias else num_directions * self.hidden_size * 4
            for i in range(num_layers):
                weight_size = (input_size_list[i] + self.hidden_size) * num_directions * self.hidden_size * 4
                w_np = np.ones([weight_size, 1, 1]).astype(np.float32) * 0.01
                if has_bias:
                    bias_np = np.zeros([bias_size, 1, 1]).astype(np.float32)
                    w_np = np.concatenate([w_np, bias_np], axis=0)
                weights.append(Parameter(initializer(Tensor(w_np), w_np.shape), name='weight' + str(i)))

                layers.append(nn.LSTMCell(input_size=input_size_list[i],
                                          hidden_size=self.hidden_size,
                                          has_bias=self.has_bias,
                                          bidirectional=self.bidirectional,
                                          dropout=self.dropout))
            self.lstms = layers
            self.weight = ParameterTuple(tuple(weights))
        self.fill = P.Fill()
        self.shape = P.Shape()