コード例 #1
0
    sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))
    from layers import SoftmaxWithLoss
    from common import softmax
    import twolayernet as network
except ImportError:
    print('Library Module Can Not Found')

# 1. load training/test data
_x, _t = np.array([2.6, 3.9, 5.6]), np.array([0, 0, 1])

# 2. hyperparameter

# 3. initialize layer
layer = SoftmaxWithLoss()

# Test
loss = layer.forward(_x, _t)
dout = layer.backward(1)
print(loss, dout)

# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++


def forward_propagation(x):
    y = softmax(x)
    return y


network.forward_progation = forward_propagation
loss = network.loss(_x, _t)
print(loss)
コード例 #2
0
    # 4-1. fetch mini-batch
    batch_mask = np.random.choice(sztrain, szbatch)
    train_x_batch = train_x[batch_mask]                 # 100 x 784
    train_t_batch = train_t[batch_mask]                 # 100 x 10

    # 4-2 gradient
    stime = time.time()                 # stopwatch: start
    gradient = network.numerical_gradient_net(train_x_batch, train_t_batch)
    elapsed = time.time() - stime       # stopwatch: end

    # 4-3. update parameter
    for key in network.params:
        network.params[key] -= ratelearning * gradient[key]

    # 4-4. train loss
    loss = network.loss(train_x_batch, train_t_batch)
    train_losses.append(loss)

    # 4-5. accuracy per epoch
    if idx / szepoch == 0:
        train_accuracy = network.accuracy(train_x, train_t)
        train_accuracies.append(train_accuracy)

        test_accuracy = network.accuracy(test_x, test_t)
        test_accuracies.append(test_accuracy)

    print(f'#{idx}: loss:{loss}, elapsed time: {elapsed}s')

#5. serialization
now = datetime.datetime.now()