コード例 #1
0
ファイル: mlp_minpy_gpu.py プロジェクト: sxjscience/minpy
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (self.batch_size, 784))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['wi'], self.params['bi'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Hidden layers.
     for i in range(self.num_hidden - 1):
         y2 = layers.affine(y2, self.params['w%d' % i], self.params['b%d' % i])
         y2 = layers.relu(y2)
     # Second affine layer.
     y3 = layers.affine(y2, self.params['wo'], self.params['bo'])
     return y3
コード例 #2
0
ファイル: retrieval_test.py プロジェクト: HQ01/fast-weight
    def forward(self, X, mode):
        N, sequence_length, D = X.shape
        WX = self.params['WX']
        Wh = self.params['Wh']
        bias_h = self.params['bias_h']
        WY = self.params['WY']
        bias_Y = self.params['bias_Y']
        WY0 = self.params['WY0']
        bias_Y0 = self.params['bias_Y0']

        h = np.zeros((N, self._n_hidden))
        self.previous_h = [h]
        for t in xrange(sequence_length):
            X_t = X[:, t, :]
            h0 = self._update_h(X_t, h, WX, Wh, bias_h)
            projected_h = sum(
                batch_scalar_product(h, h0) * h
                for t, h in enumerate(self.previous_h))
            h = np.dot(X_t, WX) + np.dot(h, Wh) + projected_h
            h = self._nonlinear(h)
            self.previous_h.append(h)

        Y0 = layers.relu(layers.affine(h, WY0, bias_Y0))
        Y = layers.affine(Y0, WY, bias_Y)
        return Y
コード例 #3
0
ファイル: test_cnn.py プロジェクト: LiuFang816/SALSTM_py_data
 def forward(self, X, mode):
     out = self.conv(X=X, **self.params)
     out = layers.affine(out, self.params['w1'], self.params['b1'])
     out = layers.relu(out)
     out = layers.affine(out, self.params['w2'], self.params['b2'])
     # This verifies whether symbols can be reused.
     trash = self.conv(X=np.zeros(X.shape), **self.params)
     return out
コード例 #4
0
ファイル: test_cnn.py プロジェクト: lryta/minpy
 def forward(self, X, mode):
     out = self.conv(X=X, **self.params)
     out = layers.affine(out, self.params['w1'], self.params['b1'])
     out = layers.relu(out)
     out = layers.affine(out, self.params['w2'], self.params['b2'])
     # This verifies whether symbols can be reused.
     trash = self.conv(X=np.zeros(X.shape), **self.params)
     return out
コード例 #5
0
ファイル: mlp_minpy_cpu.py プロジェクト: xcbat/minpy
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (batch_size, flattened_input_size))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Second affine layer.
     y3 = layers.affine(y2, self.params['w2'], self.params['b2'])
     return y3
コード例 #6
0
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (batch_size, 3 * 32 * 32))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Batch normalization
     y3, self.aux_params['running_mean'], self.aux_params['running_var'] = layers.batchnorm(
         y2, self.params['gamma'], self.params['beta'], running_mean=self.aux_params['running_mean'], \
         running_var=self.aux_params['running_var'])
     # Second affine layer.
     y4 = layers.affine(y3, self.params['w2'], self.params['b2'])
     # Dropout
     y5 = layers.dropout(y4, 0.5, mode=mode)
     return y5
コード例 #7
0
ファイル: mlp_bn_dropout.py プロジェクト: HrWangChengdu/minpy
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (batch_size, 3 * 32 * 32))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Batch normalization
     y3, self.aux_params['running_mean'], self.aux_params['running_var'] = layers.batchnorm(
         y2, self.params['gamma'], self.params['beta'], running_mean=self.aux_params['running_mean'], \
         running_var=self.aux_params['running_var'])
     # Second affine layer.
     y4 = layers.affine(y3, self.params['w2'], self.params['b2'])
     # Dropout
     y5 = layers.dropout(y4, 0.5, mode=mode)
     return y5
コード例 #8
0
ファイル: rnn.py プロジェクト: HQ01/fast-weight
    def forward(self, X, mode):
        N, sequence_length, D = X.shape
        h = np.zeros((N, self._n_hidden))

        WX = self.params['WX']
        Wh = self.params['Wh']
        bias_h = self.params['bias_h']
        WY = self.params['WY']
        bias_Y = self.params['bias_Y']
        WY0 = self.params['WY0']
        bias_Y0 = self.params['bias_Y0']

        self.previous_h = [h]
        for t in xrange(sequence_length):
            X_t = X[:, t, :]
            h = self._update_h(X_t, h, WX, Wh, bias_h)
            h = self._inner_loop(X_t, self.previous_h[-1], h, WX, Wh,
                                 self.previous_h)
            self.previous_h.append(h)

        Y0 = layers.relu(layers.affine(h, WY0, bias_Y0))
        Y = layers.affine(Y0, WY, bias_Y)
        return Y
コード例 #9
0
ファイル: test_layers.py プロジェクト: colinsongf/minpy
 def check_fn(x):
     return layers.l2_loss(layers.relu(x), fake_y)
コード例 #10
0
ファイル: mlp.py プロジェクト: colinsongf/minpy
 def forward(self, X):
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     y2 = layers.relu(y1)
     y3 = layers.affine(y2, self.params['w2'], self.params['b2'])
     return y3
コード例 #11
0
ファイル: test_layers.py プロジェクト: lryta/minpy
 def check_fn(x):
     return layers.softmax_loss(layers.relu(x), fake_y)
コード例 #12
0
ファイル: model_builder.py プロジェクト: xcbat/minpy
 def forward(self, inputs, *args):
     return layers.relu(inputs)
コード例 #13
0
ファイル: test_layers.py プロジェクト: schevalier/minpy
 def check_fn(x):
     return layers.softmax_loss(layers.relu(x), fake_y)
コード例 #14
0
ファイル: cnn.py プロジェクト: HrWangChengdu/minpy
 def forward(self, X, mode):
     out = self.conv(X=X, **self.params)
     out = layers.affine(out, self.params["w1"], self.params["b1"])
     out = layers.relu(out)
     out = layers.affine(out, self.params["w2"], self.params["b2"])
     return out
コード例 #15
0
 def forward(self, X, mode):
     out = self.conv(X=X, **self.params)
     out = layers.affine(out, self.params['w1'], self.params['b1'])
     out = layers.relu(out)
     out = layers.affine(out, self.params['w2'], self.params['b2'])
     return out
コード例 #16
0
ファイル: model_builder.py プロジェクト: ZihengJiang/minpy
 def forward(self, inputs, *args):
     return layers.relu(inputs)
コード例 #17
0
 def forward(self, X, mode):
     out = self.conv(X=X, **self.params)
     out = layers.affine(out, self.params['w1'], self.params['b1'])
     out = layers.relu(out)
     out = layers.affine(out, self.params['w2'], self.params['b2'])
     return out