def test_many_vector_expressions(self): a = Var('a') b = Var('b') av = np.random.rand(3) bv = np.random.rand(3) sigm = layers.Sigmoid().forward dJdy = np.ones(3) models = [(a + b, av + bv, (1, 1)), (a * a + b, av**2 + bv, (2 * av, 1)), (a + b + a, av + bv + av, (2, 1)), (Sigmoid(a + b), sigm(av + bv), (sigm(av + bv) * (1 - sigm(av + bv)), sigm(av + bv) * (1 - sigm(av + bv)))), (Sigmoid(a + b + a), sigm(av + bv + av), (2 * sigm(av + bv + av) * (1 - sigm(av + bv + av)), sigm(av + bv + av) * (1 - sigm(av + bv + av))))] for model, fwd_return, (a_grad, b_grad) in models: y = model.forward_variables({'a': av, 'b': bv}) assert_array_equal(y, fwd_return) grad = model.backward_variables(dJdy, debug=True) assert_array_almost_equal(grad['a'], a_grad, err_msg="wrong gradient in model: %s" % model) assert_array_almost_equal(grad['b'], b_grad, err_msg="wrong gradient in model: %s" % model)
def __init__(self, input_size, mid_size, out_size, sig=True): mag = None if sig: mag = 1 else: mag = 2 self.weights = { 'W1': np.random.normal(0, mag / np.sqrt(input_size), (input_size, mid_size)), 'b1': np.random.normal(0, mag / np.sqrt(input_size), (mid_size, )), 'W2': np.random.normal(0, mag / np.sqrt(mid_size), (mid_size, out_size)), 'b2': np.random.normal(0, mag / np.sqrt(mid_size), (out_size, )) } self.layers = OrderedDict() self.layers['Affine1'] = layers.Affine(self.weights['W1'], self.weights['b1']) if sig: self.layers['Sig'] = layers.Sigmoid() else: self.layers['ReLU'] = layers.ReLU() self.layers['Dropout'] = layers.Dropout() self.layers['Affine2'] = layers.Affine(self.weights['W2'], self.weights['b2']) self.last_layer = layers.SmLo()
def test_proper_equation_sum(self): var_a = Var('a') var_b = Var('b') a = np.array([2.3, 3., 3]) b = np.array([3., 5., 4]) input_dict = {'a': a, 'b': b} sigm = layers.Sigmoid().forward model = Sigmoid(var_a + var_a) y = model.forward_variables(input_dict) assert_array_equal(y, sigm(a + a)) grad = model.backward_variables(np.ones(3)) assert_array_almost_equal(grad['a'], 2 * sigm(a + a) * (1 - sigm(a + a)))
def test_proper_equation(self): var_a = Var('a') var_b = Var('b') a = np.array([2.3, 3., 3]) b = np.array([3., 5., 4]) input_dict = {'a': a, 'b': b} sigm = layers.Sigmoid().forward model = Sigmoid(var_a * var_a) + var_b * var_b * var_b + var_a print(model) y = model.forward_variables(input_dict) assert_array_equal(y, sigm(a**2) + (b**3 + a)) grad = model.backward_variables(np.ones(3)) assert_array_almost_equal(grad['a'], 2 * a * sigm(a**2) * (1 - sigm(a**2)) + 1) assert_array_almost_equal(grad['b'], 3 * b * b)
def test_sigmoid(self): l = layers.Sigmoid(3, 3) self.assertEqual(l.a(0), 0.5) self.assertEqual(l.der(0), 0.25)
def test_sigmoid(): sig = ly.Sigmoid(**{'sigma':2}) pdb.set_trace()
def __init__(self, nb_feature, nb_hidden, nb_class): self.dense1 = layers.Dense(N=nb_feature, H=nb_hidden) self.sigmoid = layers.Sigmoid() self.dense2 = layers.Dense(N=nb_hidden, H=nb_class) self.softmaxce = layers.SoftmaxCE()
for input_data in data_group["input"]: x_data = [] for input_value in input_data: x_data.append(input_value[0]) x_data.append(input_value[1]) train_x.append(x_data) train_y.append(output) train_x = np.array(train_x) train_y = np.array(train_y) net = [] inputs = np.random.randn(config['batch_size'], len(train_x)) net += [layers.FC(inputs, 50, "fc1")] net += [layers.Sigmoid(net[-1], "sg1")] net += [layers.FC(net[-1], 10, "fc2")] net += [layers.Sigmoid(net[-1], "sg2")] net += [layers.FC(net[-1], 5, "fc3")] loss = layers.MeanSquareError() nn.train(train_x, train_y, net, loss, config) while True: pointsFilename = input("Enter file name:") with open(pointsFilename) as f: points = json.load(f) points = np.array(points).reshape(1, 100) nn.evaluate(net, np.array(points)) print('DONE')
def __init__(self, *args): self.layer = layers.Sigmoid() self.inputs = args
ff1 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3) ff1 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3) ff1 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3) ff1 += layers.Linear(28, 8) ff2 = FeedForward(learn_rate=0.07, momentum=0.2, weight_decay=0.23) ff2 += layers.Tanh(6, 23) ff2 += layers.Dropout(layers.Tanh(23, 28), percentage=0.3) ff2 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3) ff2 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3) ff2 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3) ff2 += layers.Linear(28, 8) ff3 = FeedForward(learn_rate=0.04, momentum=0.6, weight_decay=0.4) ff3 += layers.Tanh(6, 23) ff3 += layers.Dropout(layers.Sigmoid(23, 28), percentage=0.3) ff3 += layers.Dropout(layers.Sigmoid(28, 28), percentage=0.3) ff3 += layers.Dropout(layers.Sigmoid(28, 28), percentage=0.3) ff3 += layers.Dropout(layers.Sigmoid(28, 28), percentage=0.3) ff3 += layers.Linear(28, 8) ensemble = Ensemble(ff1) test = ( [10, 12, 0, 0, 3, 17], [5, 8, 0, 0, 5, 21], [10, 0, 15, 0, 6, 11], ) error = [] v_error = []