def prepare(self): if self.output_dim is None: self.output_dim = self.input_dim // self.num_pieces if self.linear_transform: self.transformer = Dense( self.output_dim * self.num_pieces).initialize(self.input_dim) self.register(self.transformer)
class DenseTest(unittest.TestCase): def setUp(self): """Configures and sets up variables for each test case N (int): Number of inputs D (int): Input dimension, which is the total number of pixels from each input image H (int): Output/hidden unit dimension """ np.random.seed(314) self.img_height = 28 self.img_width = 28 self.N = 10 self.D = self.img_height * self.img_width self.H = 10 self.layer = Dense() def tearDown(self): """Tear down after each test case """ pass def test_forward_prop(self): x = np.linspace(-1, 1, num=self.N * self.D).reshape(self.N, self.img_height, self.img_width) w = np.linspace(-0.5, 0.5, num=self.D * self.H).reshape(self.D, self.H) b = np.linspace(-0.5, 0.5, num=self.H) output = self.layer.forward_prop(x, w, b) expected_output = np.dot(x.reshape(self.N, self.D), w) + b np.testing.assert_array_almost_equal(expected_output, output, decimal=7) def test_backprop(self): x = np.random.randn(self.N, self.img_height, self.img_width) w = np.random.randn(self.D, self.H) b = np.random.randn(self.H) grad_output = np.random.randn(self.N, self.H) # Numerical gradient w.r.t inputs num_grad_x = eval_numerical_gradient_array(f=lambda x: self.layer.forward_prop(x, w, b), x=x, df=grad_output) # Numerical gradient w.r.t weights num_grad_w = eval_numerical_gradient_array(f=lambda w: self.layer.forward_prop(x, w, b), x=w, df=grad_output) # Numerical gradient w.r.t. biases num_grad_b = eval_numerical_gradient_array(f=lambda b: self.layer.forward_prop(x, w, b), x=b, df=grad_output) # Compute gradients using backprop algorithm grad_x, grad_w, grad_b = self.layer.backprop(grad_output) np.testing.assert_array_almost_equal(num_grad_x, grad_x, decimal=7) np.testing.assert_array_almost_equal(num_grad_w, grad_w, decimal=7) np.testing.assert_array_almost_equal(num_grad_b, grad_b, decimal=7)
def __init__(self, input_dim, sizes): self.layers = [] sizes.insert(0, input_dim) for i in range(1, len(sizes) - 1): layer = Dense(sizes[i], sizes[i - 1], Relu()) self.layers.append(layer) l = len(sizes) layer = Dense(sizes[l - 1], sizes[l - 2], Sigmoid()) self.layers.append(layer)
def test_dense(num_layers): input = torch.rand(10, 3, 32, 32) model_name = "dense-" + str(num_layers) model = Dense( config={ "name": model_name, "num_layers": num_layers, }, num_classes=100, ) output = model(input) assert model.name() == model_name assert list(output.size()) == [10, 100]
def __init__(self, input_shape): print("Creating Network, got {} as input shape".format(input_shape)) self.network = [] # Initially the layer is empty # There are n_features in training data so n_features input nodes # First hidden layer has 12 neurons, and n_features inputs self.network.append(Dense(12, input_shape)) # Secon hidden layer has 6 neurons, and 12 inputs self.network.append(Dense(6, 12)) # Output layer has 2 neurons, and 6 inputs self.network.append(Dense(2, 6)) #previous outputs to use as inputs to next layer in network self.previous_layers_outputs = [] print('Done, Network Created...')
def __init__(self, shape, ActivFun): """ Input: shape: a tuple, the length represents the number of layers, its elements represent the number of nodes/neurons at the corresponding layer ActivFun: a list, elements of a name of an activation function or an activation function itself Desc: Inside the class we create the list of all hidden layers + output layer, an elements of this list are objects of Layer class """ self.shape = np.array(shape) self.size = self.shape.size if len(ActivFun) == 1: self.ActivFun = [ActivFun[0] for k in range(self.size - 1)] elif len(ActivFun) == 2: self.ActivFun = [ActivFun[0] for k in range(self.size - 2)] + [ActivFun[-1]] else: assert len(ActivFun) == self.size - 1 self.ActivFun = ActivFun self.layers = [ Dense((shape[k - 1], shape[k]), self.ActivFun[k - 1]) for k in range(1, self.size - 1) ] + [OutputLayer((shape[-2], shape[-1]), self.ActivFun[-1])]
class Maxout(NeuralLayer): """ Maxout activation unit. - http://arxiv.org/pdf/1302.4389.pdf """ def __init__(self, output_dim=None, num_pieces=4, init=None, linear_transform=True): """ :param num_pieces: pieces of sub maps """ super(Maxout, self).__init__("maxout") self.num_pieces = num_pieces self.output_dim = output_dim self.linear_transform = linear_transform self.init = init def prepare(self): if self.output_dim is None: self.output_dim = self.input_dim // self.num_pieces if self.linear_transform: self.transformer = Dense(self.output_dim * self.num_pieces).initialize(self.input_dim) self.register(self.transformer) def compute_tensor(self, x): if self.linear_transform: x = self.transformer.compute_tensor(x) # x ~ batch, time, size / batch, size new_shape = [x.shape[i] for i in range(x.ndim - 1)] + [self.output_dim, self.num_pieces] # new_shape ~ batch, time, out_dim, pieces / batch, out_dim, pieces output = T.max(x.reshape(new_shape, ndim=x.ndim + 1), axis=x.ndim) return output
class DenseTest(unittest.TestCase): # When a setUp() method is defined, the test runner will run that method # prior to each test. Likewise, if a tearDown() method is defined, the # test runner will invoke that method after each test. def setUp(self): self.N = 2 # number of inputs self.D = 3 # input dimension self.H = 4 # output/hidden dimension self.layer = Dense() # defines the layer we are testing def tearDown(self): pass def test_one_plus_one(self): self.assertEqual(1 + 1, 2) def test_one_plus_two(self): self.assertEqual(1 + 2, 3) def test_forward(self): # `numpy.linspace(start, stop, num=50...)` returns an array of evenly # spaced numbers over a specified interval. # `num=` Number of samples to generate. Default is 50. Must be non-negative. x = np.linspace(-1, 1, num=self.N * self.D).reshape(self.N, self.D) w = np.linspace(-0.5, 0.5, num=self.D * self.H).reshape(self.D, self.H) b = np.linspace(-0.5, 0.5, num=self.H) output = self.layer.forward(x, w, b) expected_output = np.dot(x, w) + b np.testing.assert_array_almost_equal(expected_output, output, decimal=9) # TODO: line 28-30 why those ranges?
def setUp(self): """Configures and sets up variables for each test case N (int): Number of inputs D (int): Input dimension, which is the total number of pixels from each input image H (int): Output/hidden unit dimension """ np.random.seed(314) self.img_height = 28 self.img_width = 28 self.N = 10 self.D = self.img_height * self.img_width self.H = 10 self.layer = Dense()
class Maxout(NeuralLayer): """ Maxout activation unit. - http://arxiv.org/pdf/1302.4389.pdf """ def __init__(self, output_dim=None, num_pieces=4, init=None, linear_transform=True): """ :param num_pieces: pieces of sub maps """ super(Maxout, self).__init__("maxout") self.num_pieces = num_pieces self.output_dim = output_dim self.linear_transform = linear_transform self.init = init def prepare(self): if self.output_dim is None: self.output_dim = self.input_dim // self.num_pieces if self.linear_transform: self.transformer = Dense(self.output_dim * self.num_pieces).init(self.input_dim) self.register(self.transformer) def compute_tensor(self, x): if self.linear_transform: x = self.transformer.compute_tensor(x) # x ~ batch, time, size / batch, size new_shape = [x.shape[i] for i in range(x.ndim - 1)] + [self.output_dim, self.num_pieces] # new_shape ~ batch, time, out_dim, pieces / batch, out_dim, pieces output = T.max(x.reshape(new_shape, ndim=x.ndim + 1), axis=x.ndim) return output
def Dense(x, args, name): """ Create a fully connected layer with :param x: the placeholder for the tensor :param args: the arguments for the fc layer(num_in, num_out) :param name: a label for the operation :return: a fully connected layer tensor """ from dense import Dense return Dense(x, args, name)
def setUp(self): self.N = 2 # number of inputs self.D = 3 # input dimension self.H = 4 # output/hidden dimension self.layer = Dense() # defines the layer we are testing
from dense import Dense from activations import Tanh from losses import mse, mse_prime import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D X = np.reshape([[0, 0], [0, 1], [1, 0], [1, 1]], (4, 2, 1)) Y = np.reshape([[0], [1], [1], [0]], (4, 1, 1)) epochs = 10000 learning_rate = 0.1 network = [Dense(2, 3), Tanh(), Dense(3, 1), Tanh()] # train for e in range(epochs): error = 0 for x, y in zip(X, Y): # forward output = x for layer in network: output = layer.forward(output) # error error += mse(y, output) # backward grad = mse_prime(y, output) for layer in reversed(network):
def __call__(self, x): for size in self.sizes[:-1]: x = Dense(size)(x) x = nn.relu(x) return Dense(self.sizes[-1])(x)
x_train = x_train.astype('float32') x_train /= 255 # encode output which is a number in range [0,9] into a vector of size 10 # e.g. number 3 will become [0, 0, 0, 1, 0, 0, 0, 0, 0, 0] y_train = np_utils.to_categorical(y_train) y_train = y_train.reshape(y_train.shape[0], 10, 1) # same for test data: 10000 samples x_test = x_test.reshape(x_test.shape[0], 28 * 28, 1) x_test = x_test.astype('float32') x_test /= 255 y_test = np_utils.to_categorical(y_test) y_test = y_test.reshape(y_test.shape[0], 10, 1) # neural network network = [Dense(28 * 28, 40), Tanh(), Dense(40, 10), Tanh()] epochs = 100 learning_rate = 0.1 # train for e in range(epochs): error = 0 # train on 1000 samples, since we're not training on GPU... for x, y in zip(x_train[:1000], y_train[:1000]): # forward output = x for layer in network: output = layer.forward(output) # error
y = np_utils.to_categorical(y) y = y.reshape(len(y), 2, 1) return x, y # load MNIST from server, limit to 100 images per class since we're not training on GPU (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, y_train = preprocess_data(x_train, y_train, 100) x_test, y_test = preprocess_data(x_test, y_test, 100) # neural network network = [ Convolutional((1, 28, 28), 3, 5), Sigmoid(), Reshape((5, 26, 26), (5 * 26 * 26, 1)), Dense(5 * 26 * 26, 100), Sigmoid(), Dense(100, 2), Sigmoid() ] epochs = 20 learning_rate = 0.1 # train for e in range(epochs): error = 0 for x, y in zip(x_train, y_train): # forward output = x for layer in network:
def setup(self): self.dense1 = Dense(features=2) self.dense2 = Dense(features=1) # shapes aren't yet known, so variables aren't materialized print(self.dense2.variables)
height_shift_range=0.16, width_shift_range=0.16, img_row_axis=1, img_col_axis=2, img_channel_axis=0, horizontal_flip=True, vertical_flip=False) model.add_layer(ConvolutionalLayer(num_filters=32)) model.add_layer(Relu()) model.add_layer( ConvolutionalLayer(input_shape=[32, 32, 32], num_filters=32, filter_dims=[32, 3, 3])) model.add_layer(Relu()) model.add_layer(MaxPool()) model.add_layer(Flatten()) model.add_layer(Dense(input_shape=8192, neurons=650)) model.add_layer(Relu()) #model.add_layer(Dense(input_shape=1000,neurons=650)) #model.add_layer(Relu()) model.add_layer(Dense(input_shape=650, neurons=10)) model.add_layer(Softmax()) model.train()
def __init__(self, name=None): super().__init__(name=name) self.dense_1 = Dense(in_features=3, out_features=3) self.dense_2 = Dense(in_features=3, out_features=2)
import numpy as np import sys sys.path.append("./core/") from dense import Dense from sigmoid import Sigmoid from softmax import Softmax from data_loader import get_images_and_labels, get_test_images_and_labels import random if __name__ == '__main__': dense = Dense(10, 784) dense.load_model("./model/w.npy", "./model/b.npy") sigmoid = Sigmoid() loss = Softmax() img, labels = get_images_and_labels() test_imgs, test_label = get_test_images_and_labels() train_label = np.zeros([10, 1]) train_label[labels[0]] = 1 inputx = (img[0] - 128) / 256.0 batch_size = 1 stop_accuracy_rate = 0.9 image_number = 60000 for k in range(3000): index_list = [i for i in range(image_number)] random.shuffle(index_list)
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jax from jax import numpy as jnp, random, lax, jit from flax import linen as nn from dense import Dense # Require JAX omnistaging mode. jax.config.enable_omnistaging() X = jnp.ones((1, 10)) Y = jnp.ones((5, )) model = Dense(features=5) @jit def predict(params): return model.apply({'params': params}, X) @jit def loss_fn(params): return jnp.mean(jnp.abs(Y - predict(params))) @jit def init_params(rng): mlp_variables = model.init({'params': rng}, X)
X += 2 * rng.uniform(size=X.shape) X = StandardScaler().fit_transform(X) # Splitting Dataset X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=.4, random_state=42) # Reshaping labels array into required shape -> (1, n_datapoints) for binary predicton y_train = y_train.reshape(1, -1) y_test = y_test.reshape(1, -1) # Initializing Model model = Sequential() layer = Dense(input_shape=(None, 2), units=4) output_shape = layer.output_shape model.add(layer) layer = Dense(input_shape=output_shape, units=6) output_shape = layer.output_shape model.add(layer) layer = Dense(input_shape=output_shape, units=6) output_shape = layer.output_shape model.add(layer) layer = Dense(input_shape=output_shape, units=4) output_shape = layer.output_shape model.add(layer)
def prepare(self): if self.output_dim is None: self.output_dim = self.input_dim // self.num_pieces if self.linear_transform: self.transformer = Dense(self.output_dim * self.num_pieces).initialize(self.input_dim) self.register(self.transformer)
import numpy as np import sys sys.path.append("./core/") from dense import Dense from sigmoid import Sigmoid from softmax import Softmax from data_loader import get_images_and_labels, get_test_images_and_labels if __name__ == '__main__': dense = Dense(10, 784) dense.load_model("./model/w.npy", "./model/b.npy") dense1 = Dense(10, 100) sigmoid = Sigmoid() loss = Softmax() img, labels = get_images_and_labels() test_imgs, test_label = get_test_images_and_labels() train_label = np.zeros([10, 1]) train_label[labels[0]] = 1 inputx = (img[0] - 128) / 256.0 count = 0 for i in range(10000): inputx = (test_imgs[i] - 128) / 256.0 inputx = inputx.reshape((784, 1)) dense.forward(inputx) sigmoid.forward(dense.end) loss.forward(sigmoid.end)
print('Test Accuracy:', num_correct / i) def calculate_loss(self, out, label): return Model.losses[self.loss](out, label) def calculate_loss_prime(self, out, target): return Model.losses_prime[self.loss](out, target) if __name__ == '__main__': import mnist #from keras.datasets import fashion_mnist from conv import Conv, ScipyConv, DepthWiseConv from pool import Pool from dense import Dense train_images = mnist.train_images()[:1001] train_labels = mnist.train_labels()[:1001] print(len(train_images)) model = Model(loss="cross_entropy") model.add_layer(DepthWiseConv(1, 8)) model.add_layer(Pool()) model.add_layer(Dense(1352, 10)) model.train(train_images, train_labels, epochs=3, step=0.001) test_images = mnist.test_images() test_labels = mnist.test_labels() model.test(test_images, test_labels)