コード例 #1
0
def Model_Linear_Sigmoid_2_HingeLoss():
    name = '2_Sigmoid_HingeLoss'
    model = Network()
    model.add(Linear('fc1', 784, 441, 0.01))
    model.add(Sigmoid('a1'))
    model.add(Linear('fc2', 441, 196, 0.01))
    model.add(Sigmoid('a2'))
    model.add(Linear('fc3', 196, 10, 0.01))
    loss = HingeLoss(name='loss')
    return name, model, loss
コード例 #2
0
ファイル: tests.py プロジェクト: stjordanis/claudioflow
    def test_backward(self):
        layer = Sigmoid()
        x = np.random.rand(2)
        y = layer.forward(x)
        deriv_grad = layer.backward(np.ones(1))

        numerical_grad_matrix = numerical_gradient.calc(layer.forward, x)

        # the numerical grad in this case is a matrix made of zeros with
        # dJ/dx_i only in the diagonal
        num_grad = np.diagonal(numerical_grad_matrix)

        numerical_gradient.assert_are_similar(deriv_grad, num_grad)
コード例 #3
0
def network_setup(model_file_path=None):
    freq_count = 4000
    count_bins = 88 * 20
    dataset = MapsDB('../db',
                     freq_count=freq_count,
                     count_bins=count_bins,
                     batch_size=128,
                     start_time=0.5,
                     duration=0.5)
    model = Network()
    model.add(Linear('fc1', dataset.get_vec_input_width(), 2048, 0.001))
    model.add(Sigmoid('sigmoid1'))
    model.add(Linear('fc2', 2048, dataset.get_label_width(), 0.001))
    model.add(Softmax('softmax2'))

    loss = CrossEntropyLoss(name='xent')
    # loss = EuclideanLoss(name='r2')

    optim = SGDOptimizer(learning_rate=0.00001, weight_decay=0.005, momentum=0.9)
    # optim = AdagradOptimizer(learning_rate=0.001, eps=1e-6)

    input_placeholder = T.fmatrix('input')
    label_placeholder = T.fmatrix('label')
    label_active_size_placeholder = T.ivector('label_active_size')

    if model_file_path:
        model.loads(model_file_path)
    else:
        dataset.load_cache()

    model.compile(input_placeholder, label_placeholder, label_active_size_placeholder, loss, optim)
    return model, dataset, freq_count, count_bins
コード例 #4
0
def two_layer_sigmoid():
    model = Network()
    model.add(Linear('fc1', 784, 256, 0.001))
    model.add(Sigmoid('sg1'))
    model.add(Linear('fc2', 256, 10, 0.001))
    model.add(Sigmoid('sg2'))
    config = {
        'learning_rate': 0.01,
        'weight_decay': 0.005,
        'momentum': 0.9,
        'batch_size': 100,
        'max_epoch': 20,
        'disp_freq': 50,
        'test_epoch': 5
    }
    return model, config
コード例 #5
0
ファイル: tests.py プロジェクト: stjordanis/claudioflow
 def test_LinearSigmoid(self):
     model = Seq()
     model.add(Linear(2, 1, initialize='ones'))
     model.add(Sigmoid())
     data = np.array([2., 3.])
     out = model.forward(data)
     self.assertEqual(round(out, 2), 1.)
コード例 #6
0
def build_model(config):
    model = Network()
    layer_num = 0
    for layer in config['use_layer']:
        if layer['type'] == "Linear":
            in_num = layer['in_num']
            out_num = layer['out_num']
            if "init_std" in layer.keys():
                model.add(
                    Linear(layer['type'] + str(layer_num),
                           in_num,
                           out_num,
                           init_std=layer['init_std']))
            else:
                model.add(
                    Linear(layer['type'] + str(layer_num), in_num, out_num))
            layer_num += 1
        elif layer['type'] == 'Relu':
            model.add(Relu(layer['type'] + str(layer_num)))
            layer_num += 1
        elif layer['type'] == 'Sigmoid':
            model.add(Sigmoid(layer['type'] + str(layer_num)))
            layer_num += 1
        else:
            assert 0
    loss_name = config['use_loss']
    if loss_name == 'EuclideanLoss':
        loss = EuclideanLoss(loss_name)
    elif loss_name == 'SoftmaxCrossEntropyLoss':
        loss = SoftmaxCrossEntropyLoss(loss_name)
    else:
        assert 0
    return model, loss
コード例 #7
0
def getNetwork():
	'''
	to obtain network structure from specified file
	'''
	file_name = "models/structure.json"
	if len(sys.argv)>1:
		file_name = sys.argv[1]
	f = file(file_name, "r")
	s = f.read()
	f.close()

	networks = json.loads(s)
	for network in networks:
		config = network['config']
		dis_model = network['model']
		model = Network()
		for layer in dis_model:
			if layer['type'] == 'Linear':
				model.add(Linear(layer['name'], layer['in_num'], layer['out_num'], layer['std']))
			if layer['type'] == 'Relu':
				model.add(Relu(layer['name']))
			if layer['type'] == 'Sigmoid':
				model.add(Sigmoid(layer['name']))
			if layer['type'] == 'Softmax':
				model.add(Softmax(layer['name']))
		loss = EuclideanLoss('loss')
		if 'loss' in config:
			if config['loss'] == 'CrossEntropyLoss':
				loss = CrossEntropyLoss('loss')
		yield network['name'], model, config, loss
コード例 #8
0
def Model_Linear_Sigmoid_1_SoftmaxCrossEntropyLoss():
    name = '1_Sigmoid_SoftmaxCrossEntropyLoss'
    model = Network()
    model.add(Linear('fc1', 784, 256, 0.01))
    model.add(Sigmoid('a1'))
    model.add(Linear('fc2', 256, 10, 0.01))
    loss = SoftmaxCrossEntropyLoss(name='loss')
    return name, model, loss
コード例 #9
0
def Model_Linear_Sigmoid_1_HingeLoss():
    name = '1_Sigmoid_HingeLoss'
    model = Network()
    model.add(Linear('fc1', 784, 256, 0.01))
    model.add(Sigmoid('a1'))
    model.add(Linear('fc2', 256, 10, 0.01))
    loss = HingeLoss(name='loss')
    return name, model, loss
コード例 #10
0
ファイル: tests.py プロジェクト: iliailmer/numpy_learn
def testNetwork():  # noqa D103
    net = Network([Linear(10, 64), ReLU(), Linear(64, 2), Sigmoid()])
    x = np.random.randn(32, 10)
    y = np.random.randn(32, 2)
    mse = MSE()
    optim = SGD(0.001, 0.001)
    pred = net(x)
    _ = mse(pred, y)
    _ = net.backward(mse.grad)
    optim.step(net)
コード例 #11
0
    def test_Perceptron(self):
        train_set, test_set = gen_data()

        model = Seq([
            Linear(2, 5, initialize='random'),
            Sigmoid(),
            Linear(5, 1, initialize='random'),
            Sigmoid(),
        ])

        OnlineTrainer().train(
            model,
            train_set=train_set,
            loss=SquaredLoss(),
            # optimizer=SGD(learning_rate=0.1),
            optimizer=MomentumSGD(learning_rate=0.1, momentum=0.9),
            # optimizer=AdaGrad(learning_rate=0.9),
            # optimizer=RMSProp(learning_rate=0.1, decay_rate=0.9),
            epochs=200,
            save_progress=False)

        # model.learn_minibatch(
        # input_data=train_data,
        # target_data=train_targets,
        # loss=SquaredLoss(),
        # batch_size=5,
        # # optimizer=SGD(learning_rate=0.1),
        # # optimizer=MomentumSGD(learning_rate=0.1, momentum=0.9),
        # optimizer=AdaGrad(learning_rate=0.9),
        # # optimizer=RMSProp(learning_rate=0.1, decay_rate=0.9),
        #
        # epochs=100,
        # save_progress=True)

        model.save_to_file('perceptron.pkl')

        scatter_test_data(test_set, model)

        # model.plot_errors_history()
        # model.plot_loss_gradient_history()
        plt.show()
コード例 #12
0
    def addSigmoidLayer(self, **kwargs):
        """
        Add sigmoid classification layer.
        """

        input_layer = self.input_layer if not self.all_layers \
            else self.all_layers[-1]
        new_layer = Sigmoid(input_layer, **kwargs)

        self.all_layers += (new_layer, )

        self.n_layers = len(self.all_layers)
コード例 #13
0
ファイル: tests.py プロジェクト: stjordanis/claudioflow
    def test_TwoLinearSigmoidLayers(self):
        x = np.random.rand(5)

        real_model = Seq([
            Linear(5, 3, initialize='ones'),
            Sigmoid(),
            Linear(3, 5, initialize='ones'),
            Sigmoid()
        ])
        y = real_model.forward(x)
        real_grad = real_model.backward(np.ones(5))

        num_model = Seq([
            Linear(5, 3, initialize='ones'),
            Sigmoid(),
            Linear(3, 5, initialize='ones'),
            Sigmoid()
        ])
        num_grad = numerical_gradient.calc(num_model.forward, x)

        num_grad = np.sum(num_grad, axis=1)
        numerical_gradient.assert_are_similar(real_grad, num_grad)
コード例 #14
0
    def __init__(self, input_size: int, hidden_size: int, output_size: int):
        W1 = 0.01 * np.random.randn(input_size, hidden_size)
        b1 = np.zeros(hidden_size)
        W2 = 0.01 * np.random.randn(hidden_size, output_size)
        b2 = np.zeros(output_size)

        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]
        self.loss_layer = SoftmaxWithLoss()

        self.params, self.grads = [], []
        for layer in self.layers:
            self.params += layer.params
            self.grads += layer.grads
コード例 #15
0
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        w1 = np.random.randn(I, H) * 0.01
        b1 = np.zeros(H)  #np.random.randn(H)
        w2 = np.random.randn(H, O) * 0.01
        b2 = np.zeros(O)  #np.random.randn(O)

        self.layers = [Affine(w1, b1), Sigmoid(), Affine(w2, b2)]
        self.loss_layer = SoftmaxWithLoss()

        self.params, self.grads = [], []
        for l in self.layers:
            self.params += l.params
            self.grads += l.grads  # 勾配まとめはこのときだけ。-> 各layerの勾配更新は参照場所を動かさないようにする。
コード例 #16
0
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        W1 = 0.01 * np.random.randn(I, H)
        b1 = np.zeros(H)
        W2 = 0.01 * np.random.randn(H, O)
        b2 = np.zeros(O)

        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]

        self.loss_layer = SoftmaxWithLoss()

        self.params, self.grads = [], []
        for layer in self.layer:
            self.params += layer.params
            self.grads += layer.grads
コード例 #17
0
def create_model(hidden_layer, nb_nodes):
    if hidden_layer <= 0:
        raise ValueError("Model needs at least 1 hidden layer")

    elif hidden_layer > 10:
        raise ValueError("Maximum number of hidden layers is 10")

    model = Model()
    input_length = 5
    for i in range(hidden_layer):
        model.append_layer(
            Dense(node_count=nb_nodes[i], input_length=input_length))
        model.append_layer(Sigmoid())
        input_length = nb_nodes[i]

    return model
コード例 #18
0
ファイル: ln1.py プロジェクト: skisa31/learning_deeplearning
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        # 重みとバイアスの初期化
        W1 = 0.01 * np.random.randn(I, H)
        b1 = np.random.randn(H)
        W2 = 0.01 * np.random.randn(H, O)
        b2 = np.random.randn(O)

        # レイヤの生成
        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]
        self.loss_layer = SoftmaxWithLoss()

        # すべての重みをリストにまとめる
        self.params, self.grads = [], []
        for layer in self.layers:
            self.params += layer.params
            self.grads += layer.grads
コード例 #19
0
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        # initialize weight and bias
        W1 = 0.01 * cp.random.randn(I, H)
        b1 = cp.zeros(H)
        W2 = 0.01 * cp.random.randn(H, O)
        b2 = cp.zeros(O)

        # create layer
        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]
        self.loss_layer = SoftmaxWithLoss()

        # combine all weight and grads into list
        self.params, self.grads = [], []

        for layer in self.layers:
            self.params += layer.params
            self.grads += layer.grads
コード例 #20
0
ファイル: run_mlp.py プロジェクト: wmhst7/UndergradProjects
def build_model_from_string(def_str):
    model = Network()
    def_str.strip()
    layer_strs = def_str.split(';')
    for layer_str in layer_strs:
        tokens = layer_str.split(',')
        if (len(tokens) <= 1):
            raise Exception(
                "Invalid token: {} in layer definition".format(layer_str))
        type = tokens[0].strip()
        name = tokens[1].strip()
        if (type == "linear"):
            model.add(
                Linear(name, int(tokens[2]), int(tokens[3]), float(tokens[4])))
        elif (type == "sigmoid"):
            model.add(Sigmoid(name))
        elif (type == "relu"):
            model.add(Relu(name))
        else:
            raise NotImplementedError("Unsupported layer type {}".format(type))
    print("=" * 50 + "\nModel Summary:\n{}\n".format(model) + "=" * 50 + "\n")
    return model
コード例 #21
0
    def __init__(self,
                 sizes,
                 batch_size,
                 epoch_num,
                 learning_rate,
                 use_trained_params=False,
                 filename=None):
        self.num_layers = len(sizes)
        self.sizes = sizes
        self.batch_size = batch_size
        self.epoch_num = epoch_num
        self.learning_rate = learning_rate

        if use_trained_params:
            path = os.path.dirname(os.path.abspath(__file__))
            loaded_params = np.load(os.path.join(path, filename))
            self.W1 = loaded_params['W1']
            self.b1 = loaded_params['b1']
            self.W2 = loaded_params['W2']
            self.b2 = loaded_params['b2']
        else:
            np.random.seed(12)
            self.W1 = np.sqrt(1 / sizes[0]) * np.random.randn(
                sizes[0], sizes[1])  #(784,50)
            self.b1 = np.sqrt(1 / sizes[0]) * np.random.randn(sizes[1])
            self.W2 = np.sqrt(1 / sizes[1]) * np.random.randn(
                sizes[1], sizes[2])  #(50,10)
            self.b2 = np.sqrt(1 / sizes[1]) * np.random.randn(sizes[2])

        # layers of network
        self.layers = {}
        self.layers['FullyConnected1'] = FullyConnected(self.W1, self.b1)
        self.layers['Activation'] = Sigmoid()
        self.layers['FullyConnected2'] = FullyConnected(self.W2, self.b2)

        self.lastLayer = SoftmaxLoss()
コード例 #22
0
from network import Network
from utils import LOG_INFO
from layers import Relu, Sigmoid, Linear
from loss import EuclideanLoss
from solve_net import train_net, test_net
from load_data import load_mnist_2d
import numpy as np

train_data, test_data, train_label, test_label = load_mnist_2d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Linear('fc1', 784, 100, 0.01))
model.add(Sigmoid('Sigmoid1'))
model.add(Linear('fc2', 100, 10, 0.01))

loss = EuclideanLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.08,
    'weight_decay': 0.001,
    'momentum': 0.9,
    'batch_size': 80,
    'max_epoch': 100,
コード例 #23
0
kernel_weights_0 = np.array([[5, -5],
                             [5, -5]])

bias_weights_1 = np.array([-5])
kernel_weights_1 = np.array([[5],
                             [5]])

saved_bias_0 = saved_weights(bias_weights_0)
saved_kernel_0 = saved_weights(kernel_weights_0)

saved_bias_1 = saved_weights(bias_weights_1)
saved_kernel_1 = saved_weights(kernel_weights_1)

model = Sequential()
model.add(Dense(2, 2, kernel_initializer=saved_kernel_0, bias_initializer=saved_bias_0, alpha=50.0))
model.add(Sigmoid())
model.add(Dense(1, 2, kernel_initializer=saved_kernel_1, bias_initializer=saved_bias_1, alpha=50.0))
model.add(Sigmoid())

X = np.array([[0, 0],
              [0, 1],
              [1, 0],
              [1, 1]])
y = np.array([[1],
              [0],
              [0],
              [1]])

print("Prediction")
p = model.predict(X)
print(p)
コード例 #24
0
from network import Network
from utils import LOG_INFO
from layers import Relu, Sigmoid, Linear
from loss import EuclideanLoss
from solve_net import train_net, test_net
from load_data import load_mnist_2d


train_data, test_data, train_label, test_label = load_mnist_2d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Linear('fc1', 784, 10, 0.01))
model.add(Sigmoid('fc2'))
#model.add(Linear('fc3', 10, 10, 0.01))
#model.add(Sigmoid('fc4'))

loss = EuclideanLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.01,
    'weight_decay': 0.0,
    'momentum': 0.9,
    'batch_size': 100,
コード例 #25
0
ファイル: tests.py プロジェクト: stjordanis/claudioflow
 def test_forward(self):
     layer = Sigmoid()
     x = np.array([2., 3., 4.])
     y = layer.forward(x)
コード例 #26
0
import numpy as np

### GLOBALS ###
BATCH_SIZE = 50
EPOCHS = 1000
LEARNING_RATE = 0.001

if __name__ == '__main__':

    rawData, rawLabels = extract_raw()
    trainingData, labels = raw_2_numpy(rawData, rawLabels)
    total = trainingData.shape[0]
    trainingData, labels = data_batches(trainingData, labels, BATCH_SIZE)

    layer1 = Dense(trainingData.shape[2], 16)
    activation1 = Sigmoid()
    layer2 = Dense(16, 10)
    activation2 = SoftMax()
    cost = CostMeanSquared()

for epoch in range(EPOCHS):
    print('Epoch: ' + str(epoch + 1) + '/' + str(EPOCHS))
    print('')
    correct = 0
    for batch in range(total // BATCH_SIZE):

        ### SOCHASIC GRADIENT DESCENT ###

        layer1.forward(trainingData[batch])
        activation1.forward(layer1.outputs)
        layer2.forward(activation1.outputs)
コード例 #27
0
ファイル: tests.py プロジェクト: iliailmer/numpy_learn
def testSigmoid():  # noqa D103
    s = Sigmoid()
    x = np.random.randn(32, 10)  # batch size by in_features
    output = s(x)
    assert output.shape == (32, 10)
コード例 #28
0
#width1 = 2048

#width2 = 256
width2 = 1024
#width2 = 2048

init_std_dev = 1 / sqrt(784)
init_std_dev_l1 = 1 / sqrt(width1)
init_std_dev_l2 = 1 / sqrt(width2)

model = Network()

if usedNN == '1 layer':
    model.add(Linear('fc1', 784, width1, init_std_dev))
    if usedActivation == 'sigmoid':
        model.add(Sigmoid('act1'))
    else:
        model.add(Relu('act1'))
    model.add(Linear('fc2', width1, 10, init_std_dev))
if usedNN == '2 layers':
    model.add(Linear('fc1', 784, width1, init_std_dev))
    if usedActivation == 'sigmoid':
        model.add(Sigmoid('act1'))
    else:
        model.add(Relu('act1'))
    model.add(Linear('fc2', width1, width2, init_std_dev))
    if usedActivation == 'sigmoid':
        model.add(Sigmoid('act2'))
    else:
        model.add(Relu('act2'))
    model.add(Linear('fc3', width2, 10, init_std_dev))
コード例 #29
0
ファイル: ex04.py プロジェクト: prospros001/deep-learning
# Sigmoid Layer Test
import os
import sys
from pathlib import Path

import numpy as np

try:
    sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))
    from layers import Sigmoid
except ImportError:
    print('Library Module Can Not Found')

# Test1(Vector)
layer = Sigmoid()

x = np.array([0.1, -0.2, 0.3, -0.4, 0.5])
print(x)

y = layer.forward(x)
print(y)
print(layer.out)

dout = np.array([-0.1, -0.2, -0.3, 0.4, -0.5])
dout = layer.backward(dout)
print(dout)

print('=========================================')

# Test2(Matrix)
x = np.array([
コード例 #30
0
from utils import LOG_INFO
from layers import Relu, Sigmoid, Linear, Leaky_Relu, ELU
from loss import EuclideanLoss
from solve_net import train_net, test_net
from load_data import load_mnist_2d

train_data, test_data, train_label, test_label = load_mnist_2d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Linear('fc1', 784, 1000, 0.01))
#model.add(ELU('elu1', 0.5))
#model.add(Leaky_Relu('lrelu1', 0.1))
#model.add(Relu('relu1'))
model.add(Sigmoid('sigmoid1'))
model.add(Linear('fc2', 1000, 100, 0.01))
#model.add(Relu('relu2'))
model.add(Sigmoid('sigmoid2'))
#model.add(Leaky_Relu('lrelu2', 0.1))
model.add(Linear('fc3', 100, 10, 0.01))
#model.add(Sigmoid('sigmoid2'))

loss = EuclideanLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.