def __call__(self, x): for size in self.sizes[:-1]: x = Dense(size)(x) x = nn.relu(x) return Dense(self.sizes[-1])(x)
y = np_utils.to_categorical(y) y = y.reshape(len(y), 2, 1) return x, y # load MNIST from server, limit to 100 images per class since we're not training on GPU (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, y_train = preprocess_data(x_train, y_train, 100) x_test, y_test = preprocess_data(x_test, y_test, 100) # neural network network = [ Convolutional((1, 28, 28), 3, 5), Sigmoid(), Reshape((5, 26, 26), (5 * 26 * 26, 1)), Dense(5 * 26 * 26, 100), Sigmoid(), Dense(100, 2), Sigmoid() ] epochs = 20 learning_rate = 0.1 # train for e in range(epochs): error = 0 for x, y in zip(x_train, y_train): # forward output = x for layer in network:
x_train = x_train.astype('float32') x_train /= 255 # encode output which is a number in range [0,9] into a vector of size 10 # e.g. number 3 will become [0, 0, 0, 1, 0, 0, 0, 0, 0, 0] y_train = np_utils.to_categorical(y_train) y_train = y_train.reshape(y_train.shape[0], 10, 1) # same for test data: 10000 samples x_test = x_test.reshape(x_test.shape[0], 28 * 28, 1) x_test = x_test.astype('float32') x_test /= 255 y_test = np_utils.to_categorical(y_test) y_test = y_test.reshape(y_test.shape[0], 10, 1) # neural network network = [Dense(28 * 28, 40), Tanh(), Dense(40, 10), Tanh()] epochs = 100 learning_rate = 0.1 # train for e in range(epochs): error = 0 # train on 1000 samples, since we're not training on GPU... for x, y in zip(x_train[:1000], y_train[:1000]): # forward output = x for layer in network: output = layer.forward(output) # error
def setup(self): self.dense1 = Dense(features=2) self.dense2 = Dense(features=1) # shapes aren't yet known, so variables aren't materialized print(self.dense2.variables)
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from dense import Dense from activations import Tanh from losses import mse, mse_prime X = np.reshape([[0, 0], [0, 1], [1, 0], [1, 1]], (4, 2, 1)) Y = np.reshape([[0], [1], [1], [0]], (4, 1, 1)) epochs = 10000 learning_rate = 0.1 network = [Dense(2, 3), Tanh(), Dense(3, 1), Tanh()] # train for e in range(epochs): error = 0 for x, y in zip(X, Y): # forward output = x for layer in network: output = layer.forward(output) # error error += mse(y, output) # backward grad = mse_prime(y, output) for layer in reversed(network):
def __init__(self, name=None): super().__init__(name=name) self.dense_1 = Dense(in_features=3, out_features=3) self.dense_2 = Dense(in_features=3, out_features=2)
def prepare(self): if self.output_dim is None: self.output_dim = self.input_dim // self.num_pieces if self.linear_transform: self.transformer = Dense(self.output_dim * self.num_pieces).init(self.input_dim) self.register(self.transformer)
X += 2 * rng.uniform(size=X.shape) X = StandardScaler().fit_transform(X) # Splitting Dataset X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=.4, random_state=42) # Reshaping labels array into required shape -> (1, n_datapoints) for binary predicton y_train = y_train.reshape(1, -1) y_test = y_test.reshape(1, -1) # Initializing Model model = Sequential() layer = Dense(input_shape=(None, 2), units=4) output_shape = layer.output_shape model.add(layer) layer = Dense(input_shape=output_shape, units=6) output_shape = layer.output_shape model.add(layer) layer = Dense(input_shape=output_shape, units=6) output_shape = layer.output_shape model.add(layer) layer = Dense(input_shape=output_shape, units=4) output_shape = layer.output_shape model.add(layer)
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jax from jax import numpy as jnp, random, lax, jit from flax import linen as nn from dense import Dense # Require JAX omnistaging mode. jax.config.enable_omnistaging() X = jnp.ones((1, 10)) Y = jnp.ones((5, )) model = Dense(features=5) @jit def predict(params): return model.apply({'params': params}, X) @jit def loss_fn(params): return jnp.mean(jnp.abs(Y - predict(params))) @jit def init_params(rng): mlp_variables = model.init({'params': rng}, X)
print('Test Accuracy:', num_correct / i) def calculate_loss(self, out, label): return Model.losses[self.loss](out, label) def calculate_loss_prime(self, out, target): return Model.losses_prime[self.loss](out, target) if __name__ == '__main__': import mnist #from keras.datasets import fashion_mnist from conv import Conv, ScipyConv, DepthWiseConv from pool import Pool from dense import Dense train_images = mnist.train_images()[:1001] train_labels = mnist.train_labels()[:1001] print(len(train_images)) model = Model(loss="cross_entropy") model.add_layer(DepthWiseConv(1, 8)) model.add_layer(Pool()) model.add_layer(Dense(1352, 10)) model.train(train_images, train_labels, epochs=3, step=0.001) test_images = mnist.test_images() test_labels = mnist.test_labels() model.test(test_images, test_labels)
height_shift_range=0.16, width_shift_range=0.16, img_row_axis=1, img_col_axis=2, img_channel_axis=0, horizontal_flip=True, vertical_flip=False) model.add_layer(ConvolutionalLayer(num_filters=32)) model.add_layer(Relu()) model.add_layer( ConvolutionalLayer(input_shape=[32, 32, 32], num_filters=32, filter_dims=[32, 3, 3])) model.add_layer(Relu()) model.add_layer(MaxPool()) model.add_layer(Flatten()) model.add_layer(Dense(input_shape=8192, neurons=650)) model.add_layer(Relu()) #model.add_layer(Dense(input_shape=1000,neurons=650)) #model.add_layer(Relu()) model.add_layer(Dense(input_shape=650, neurons=10)) model.add_layer(Softmax()) model.train()
import numpy as np import sys sys.path.append("./core/") from dense import Dense from sigmoid import Sigmoid from softmax import Softmax from data_loader import get_images_and_labels, get_test_images_and_labels import random if __name__ == '__main__': dense = Dense(10, 784) dense.load_model("./model/w.npy", "./model/b.npy") sigmoid = Sigmoid() loss = Softmax() img, labels = get_images_and_labels() test_imgs, test_label = get_test_images_and_labels() train_label = np.zeros([10, 1]) train_label[labels[0]] = 1 inputx = (img[0] - 128) / 256.0 batch_size = 1 stop_accuracy_rate = 0.9 image_number = 60000 for k in range(3000): index_list = [i for i in range(image_number)] random.shuffle(index_list)
def setUp(self): self.N = 2 # number of inputs self.D = 3 # input dimension self.H = 4 # output/hidden dimension self.layer = Dense() # defines the layer we are testing
import numpy as np import sys sys.path.append("./core/") from dense import Dense from sigmoid import Sigmoid from softmax import Softmax from data_loader import get_images_and_labels, get_test_images_and_labels if __name__ == '__main__': dense = Dense(10, 784) dense.load_model("./model/w.npy", "./model/b.npy") dense1 = Dense(10, 100) sigmoid = Sigmoid() loss = Softmax() img, labels = get_images_and_labels() test_imgs, test_label = get_test_images_and_labels() train_label = np.zeros([10, 1]) train_label[labels[0]] = 1 inputx = (img[0] - 128) / 256.0 count = 0 for i in range(10000): inputx = (test_imgs[i] - 128) / 256.0 inputx = inputx.reshape((784, 1)) dense.forward(inputx) sigmoid.forward(dense.end) loss.forward(sigmoid.end)