コード例 #1
0
ファイル: recurrent.py プロジェクト: turiya/toy-auto-diff
 def step(self, inputs, body_inputs):
     index, cell_state, output = body_inputs
     linear_sum = ad.dot(inputs[:, index], self.wx) + ad.dot(
         cell_state, self.wh)
     if self.use_bias:
         linear_sum += self.b
     forget_gate = ad.acts.sigmoid(linear_sum[:, :self.units])
     input_gate = ad.acts.sigmoid(linear_sum[:, self.units:self.units * 2])
     output_gate = ad.acts.sigmoid(linear_sum[:, self.units * 2:self.units *
                                              3])
     cell_state_inner = ad.tanh(linear_sum[:, self.units * 3:])
     new_cell_state = forget_gate * cell_state + input_gate * cell_state_inner
     new_output = output_gate * ad.tanh(new_cell_state)
     return index + 1.0, new_cell_state, new_output
コード例 #2
0
 def call(self, inputs, **kwargs):
     y = ad.dot(inputs, self.w)
     if self.use_bias:
         y += self.b
     if self.activation is not None:
         y = self.activation(y)
     return y
コード例 #3
0
ファイル: recurrent.py プロジェクト: turiya/toy-auto-diff
 def step(self, inputs, body_inputs):
     index, output = body_inputs
     linear_sum = ad.dot(inputs[:, index],
                         self.wx[:, :self.units * 2]) + ad.dot(
                             output, self.wh[:, :self.units * 2])
     if self.use_bias:
         linear_sum += self.b[:self.units * 2]
     update_gate = ad.acts.sigmoid(linear_sum[:, :self.units])
     reset_gate = ad.acts.sigmoid(linear_sum[:, self.units:self.units * 2])
     output_inner = ad.dot(inputs[:, index], self.wx[:, self.units * 2:]) +\
         ad.dot(reset_gate * output, self.wh[:, self.units * 2:])
     if self.use_bias:
         output_inner += self.b[self.units * 2:]
     new_output = (
         1.0 - update_gate) * output + update_gate * ad.tanh(output_inner)
     return index + 1.0, new_output
コード例 #4
0
    def test_forward(self):
        y = ad.while_loop(
            cond=lambda inputs: ad.less(inputs[0], ad.constant(10)),
            body=lambda inputs: [inputs[0] + 1],
            loop_vars=[ad.variable(0.0)],
        )
        actual = y.forward()
        expect = np.arange(1, 11)
        self.assertEqual((None, ), y.shape)
        self.assertTrue(np.allclose(expect, actual), (expect, actual))

        y = ad.while_loop(
            cond=lambda inputs: ad.less(inputs[0], ad.constant(5)),
            body=lambda inputs: [inputs[0] + 1, (inputs[0] + 1) * inputs[1]],
            loop_vars=[ad.variable(0.0), ad.variable(1.0)],
            output_index=1,
        )
        actual = y.forward()
        expect = np.array([1, 2, 6, 24, 120])
        self.assertEqual((None, ), y.shape)
        self.assertTrue(np.allclose(expect, actual), (expect, actual))

        y = ad.while_loop(
            cond=lambda inputs: ad.less(inputs[0], ad.constant(64)),
            body=lambda inputs:
            [inputs[0] * 2,
             ad.dot(inputs[1], ad.variable([[1, 1], [1, 0]]))],
            loop_vars=[ad.variable(1),
                       ad.variable([[1, 0], [0, 1]])],
            output_index=1,
        )
        actual = y.forward()
        expect = np.array([1, 2, 3, 5, 8, 13])
        self.assertEqual((None, 2, 2), y.shape)
        self.assertTrue(np.allclose(expect, actual[:, 0, 0]), (expect, actual))
コード例 #5
0
def gen_linear_model(config: dict, verbose=False):
    """Generate a linear model.

    :param config: Configuration.
    :param verbose: Print loss and gradients if it is True.
    :return: Model, loss, placeholders and variables.
    """
    x = ad.placeholder(shape=(None, config['input_len']), name='X')
    y = ad.placeholder(shape=(None, ), name='Y')

    w1 = ad.variable(
        initializer=ad.inits.random_normal(),
        shape=(config['input_len'], config['hidden_dim']),
        name='W1',
    )
    b1 = ad.variable(
        initializer=ad.inits.zeros,
        shape=config['hidden_dim'],
        name='b1',
    )

    v = ad.acts.leaky_relu(ad.dot(x, w1) + b1)

    w2 = ad.variable(
        initializer=ad.inits.random_normal(),
        shape=(config['hidden_dim'], 2),
        name='W2',
    )
    b2 = ad.variable(
        initializer=ad.inits.zeros,
        shape=2,
        name='b2',
    )

    y_pred = ad.acts.softmax(ad.dot(v, w2) + b2)

    loss = ad.square(y - y_pred).mean()

    if verbose:
        print('Loss:', loss)

    return y_pred, loss, [x, y], [w1, b1, w2, b2]
コード例 #6
0
ファイル: test_op_dot.py プロジェクト: turiya/toy-auto-diff
 def _gen_random_and_result(x_shape, y_shape, call_type=True):
     x_val = np.random.random(x_shape)
     y_val = np.random.random(y_shape)
     x = ad.variable(x_val, name='X%s' % str(x_shape))
     y = ad.variable(y_val, name='Y%s' % str(y_shape))
     if call_type:
         z = ad.dot(x, y)
     else:
         z = x.dot(y)
     expect = np.dot(x_val, y_val)
     return z, [x, y], expect
コード例 #7
0
 def test_backward(self):
     x = ad.variable([[1, 1], [1, 0]])
     y = ad.while_loop(
         cond=lambda inputs: ad.less(inputs[0], ad.constant(64)),
         body=lambda inputs: [inputs[0] * 2,
                              ad.dot(inputs[1], x)],
         loop_vars=[ad.variable(1),
                    ad.variable([[1, 0], [0, 1]])],
         output_index=1,
     )
     self.numeric_gradient_check(y, {}, [x])
コード例 #8
0
ファイル: conv.py プロジェクト: turiya/toy-auto-diff
 def call(self, inputs, **kwargs):
     padded = ad.pad(inputs, ((0, ), (self.pad_width[0], ),
                              (self.pad_width[1], ), (0, )))
     batch_size = ad.shape(inputs)[0]
     reshaped = ad.map_fn(lambda i: self.call_batch(padded, i),
                          ad.arange(batch_size))
     y = ad.dot(reshaped, self.w)
     if self.use_bias:
         y += self.b
     if self.activation is not None:
         y = self.activation(y)
     return y
コード例 #9
0
ファイル: recurrent.py プロジェクト: turiya/toy-auto-diff
 def call(self, inputs, **kwargs):
     initial_val = ad.dot(
         ad.zeros_like(inputs)[:, 0, :],
         ad.zeros_like(self.wx[:, :self.units]))
     outputs = ad.while_loop(
         lambda body_inputs: ad.less(body_inputs[0],
                                     ad.shape(inputs)[1]),
         lambda x: self.step(inputs, x),
         [ad.variable(0.0), initial_val, initial_val],
         output_index=-1,
     )
     if self.return_sequences:
         return outputs.transpose(axes=[1, 0, 2])
     return outputs[-1]
コード例 #10
0
def gen_linear_model(config: dict, verbose=False):
    """Generate a linear model.

    :param config: Configuration.
    :param verbose: Print loss and gradients if it is True.
    :return: Model, loss, placeholders and variables.
    """
    x = ad.placeholder(shape=(None, config['input_len']), name='X')
    y = ad.placeholder(shape=(None, ), name='Y')

    w = ad.variable(
        initializer=ad.inits.random_normal(),
        shape=config['input_len'],
        name='W',
    )
    b = ad.variable(0.0, name='b')

    y_pred = ad.dot(x, w) + b
    loss = ad.square(y - y_pred).mean()

    if verbose:
        print('Loss:', loss)

    return y_pred, loss, [x, y], [w, b]