Ejemplo n.º 1
0
 def on_epoch_start(self, epoch_num):
     self.seed += 1
     env.seed(self.seed)
     if epoch_num % 100 == 0:
         if epoch_num > 0:
             duration = time.time() - self.start_time
             print(f"100 epoch duration = {duration}")
         self.start_time = time.time()
Ejemplo n.º 2
0
def test_l2_regularization_coursera_test_case():
    env.seed(1)
    y_assess = np.array([[1, 1, 0, 1, 0]])
    w1 = np.random.randn(2, 3)
    np.random.randn(2, 1)
    w2 = np.random.randn(3, 2)
    np.random.randn(3, 1)
    w3 = np.random.randn(1, 3)
    np.random.randn(1, 1)

    norm = reg.L2NormRegularization(0.1)
    norm.compute(y_assess.shape[-1], w1, w2, w3)

    np.testing.assert_allclose(norm.output, 0.183984340402)
Ejemplo n.º 3
0
def test_mini_batch_training():
    env.seed(1)
    g = Graph(constant(8))
    x = np.arange(5)
    y = np.arange(5)
    feed = DummyFeedMethod()
    optimizer = DummyOptimizer(10)
    listener = DummyListener()
    train = MiniBatchTraining(optimizer, listener, batch_size=2)
    train.train(g, feed, x, y, 2)

    # Dry run
    assert_pair(feed.arg_pairs[0], x, y)
    # First epoch
    assert_pair(feed.arg_pairs[1], np.array([2, 1]), np.array([2, 1]))
    assert_pair(feed.arg_pairs[2], np.array([4, 0]), np.array([4, 0]))
    assert_pair(feed.arg_pairs[3], np.array([3]), np.array([3]))

    # Second epoch
    assert_pair(feed.arg_pairs[4], np.array([0, 2]), np.array([0, 2]))
    assert_pair(feed.arg_pairs[5], np.array([4, 3]), np.array([4, 3]))
    assert_pair(feed.arg_pairs[6], np.array([1]), np.array([1]))

    # 6 gradient descent steps
    assert optimizer.cost == 4

    assert len(listener.calls) == 12
    assert listener.calls[0] == "on_start"
    assert listener.calls[1] == "on_epoch_start(0)"
    assert listener.calls[2] == "on_iteration(0, 0, 9)"
    assert listener.calls[3] == "on_iteration(0, 1, 8)"
    assert listener.calls[4] == "on_iteration(0, 2, 7)"
    assert listener.calls[5] == "on_epoch_end(0, 8.0)"
    assert listener.calls[6] == "on_epoch_start(1)"
    assert listener.calls[7] == "on_iteration(1, 3, 6)"
    assert listener.calls[8] == "on_iteration(1, 4, 5)"
    assert listener.calls[9] == "on_iteration(1, 5, 4)"
    assert listener.calls[10] == "on_epoch_end(1, 5.0)"
    assert listener.calls[11] == "on_end"
Ejemplo n.º 4
0
 def on_epoch_start(self, epoch_num):
     env.seed(1)
Ejemplo n.º 5
0
 def on_start(self):
     self.costs = []
     self.start_time = time.time()
     env.seed(3)
Ejemplo n.º 6
0
    def fully_connected_layer(self, features, cnt_features, cnt_neurons,
                              layer_num):
        weights = f.var("W" + str(layer_num + 1), self.weight_initializer,
                        shape=(cnt_neurons, cnt_features))
        biases = f.var("b" + str(layer_num + 1), init.ZeroInitializer(),
                       shape=(cnt_neurons, 1))
        return f.fully_connected(features, weights, biases,
                                 first_layer=(layer_num == 0))

    def train(self, x_train, y_train, *,
              num_iterations=15_000,
              learning_rate=0.01,
              print_cost=True):

        env.seed(3)
        self.cost_graph.placeholders = {self.X: x_train, self.Y: y_train}
        self.cost_graph.initialize_variables()
        optimizer = gd.GradientDescentOptimizer(learning_rate)
        optimizer.prepare_and_check(self.cost_graph)
        costs = []
        for i in range(num_iterations + 1):
            optimizer.run()

            if i % 1000 == 0:
                costs.append(optimizer.cost)

            if print_cost and i % 1000 == 0:
                print(f"Cost after iteration {i}: {optimizer.cost}")

        return costs
Ejemplo n.º 7
0
                        XavierInitializer(),
                        shape=(cnt_neurons, cnt_features))
        biases = f.var("b" + str(layer_number + 1),
                       ZeroInitializer(),
                       shape=(cnt_neurons, 1))
        return chains.core.node_factory.fully_connected(
            features, weights, biases, first_layer=(layer_number == 0))

    def train(self,
              x_train,
              y_train,
              *,
              num_iterations=2_500,
              learning_rate=0.0075,
              print_cost=False):
        env.seed(1)
        self.cost_graph.placeholders = {self.X: x_train, self.Y: y_train}
        self.cost_graph.initialize_variables()
        optimizer = GradientDescentOptimizer(learning_rate)
        optimizer.prepare_and_check(self.cost_graph)
        costs = []
        for i in range(num_iterations):
            optimizer.run()

            if i % ITERATION_UNIT == 0:
                costs.append(optimizer.cost)

            if print_cost and i % ITERATION_UNIT == 0:
                print(f"Cost after iteration {i}: {optimizer.cost}")

        return costs
Ejemplo n.º 8
0
        weight_matrices.append(w)
        bias_matrices.append(b)
        return weight_matrices, bias_matrices

    @staticmethod
    def layer(features, w, b, layer_num):
        return f.fully_connected(features, w, b, first_layer=(layer_num == 1))

    def train(self,
              x_train,
              y_train,
              *,
              num_iterations=30_000,
              learning_rate=0.3,
              print_cost=True):
        env.seed(3)
        self.cost_graph.placeholders = {self.X: x_train, self.Y: y_train}
        self.cost_graph.initialize_variables()
        optimizer = gd.GradientDescentOptimizer(learning_rate)
        optimizer.prepare_and_check(self.cost_graph)
        costs = []
        for i in range(num_iterations):
            env.seed(1)
            optimizer.run()

            if i % ITERATION_UNIT == 0:
                costs.append(optimizer.cost)

            if print_cost and i % 10000 == 0:
                print(f"Cost after iteration {i}: {optimizer.cost}")
Ejemplo n.º 9
0
 def on_epoch_start(self, epoch):
     self.seed += 1
     env.seed(self.seed)
Ejemplo n.º 10
0
 def __init__(self):
     env.seed(3)
     self.costs = []
     self.seed = 10