コード例 #1
0
def network_and_weights(request):
    np.random.seed(0)
    layers = [Layer(5, Identity)] + [Layer(5, request.param) for _ in range(3)]
    network = Network(layers)
    weights = Matrices(network.shapes)
    weights.flat = np.random.normal(0, 0.01, len(weights.flat))
    return network, weights
コード例 #2
0
def random_matrices(shapes):
    np.random.seed(0)
    matrix = Matrices(shapes)
    matrix.flat = np.random.normal(0, 0.1, len(matrix.flat))
    return matrix
コード例 #3
0
num_outputs = 10

network = Network([
    Layer(num_inputs, Identity),
    Layer(700, Relu),
    Layer(500, Relu),
    Layer(300, Relu),
    Layer(num_outputs, Softmax),
])

from layered.network import Matrices

weight_scale = 0.01

weights = Matrices(network.shapes)
weights.flat = np.random.normal(0, weight_scale, len(weights.flat))

from layered.cost import SquaredError
from layered.gradient import Backprop
from layered.optimization import GradientDecent

backprop = Backprop(network, cost=SquaredError())
descent = GradientDecent()

from layered.dataset import Mnist

dataset = Mnist()
for example in dataset.training:
    gradient = backprop(weights, example)
    weights = descent(weights, gradient, learning_rate=0.1)
コード例 #4
0
ファイル: main.py プロジェクト: Dencrash/layered
        weight_scale=0.01,
        weight_decay=1e-3,
        evaluate_every=5000,
        dataset=Mnist(),
        cost=Squared())

    # Define model and initialize weights
    network = Network([
        Layer(len(problem.dataset.training[0].data), Linear),
        Layer(700, Relu),
        Layer(500, Relu),
        Layer(300, Relu),
        Layer(len(problem.dataset.training[0].target), Sigmoid)
    ])
    weights = Matrices(network.shapes)
    weights.flat = np.random.normal(0, problem.weight_scale, len(weights.flat))

    # Classes needed during training
    backprop = ParallelBackprop(network, problem.cost)
    momentum = Momentum()
    decent = GradientDecent()
    decay = WeightDecay()
    plot = Plot()

    # Train the model
    repeats = repeated(problem.dataset.training, problem.training_rounds)
    batches = batched(repeats, problem.batch_size)
    for index, batch in enumerate(batches):
        gradient = backprop(weights, batch)
        gradient = momentum(gradient, problem.momentum)
        weights = decent(weights, gradient, problem.learning_rate)