def own_complex_fit(epochs=10): tf.random.set_seed(1) init = tf.keras.initializers.GlorotUniform(seed=117) model = models.Sequential() model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu', input_shape=(32, 32, 3), kernel_initializer=init, use_bias=False, init_technique='zero_imag')) model.add(complex_layers.ComplexMaxPooling2D((2, 2))) model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', kernel_initializer=init, use_bias=False, init_technique='zero_imag')) model.add(complex_layers.ComplexMaxPooling2D((2, 2))) model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', kernel_initializer=init, use_bias=False, init_technique='zero_imag')) model.add(complex_layers.ComplexFlatten()) model.add(complex_layers.ComplexDense(64, activation='cart_relu', kernel_initializer=init, use_bias=False, init_technique='zero_imag')) model.add(complex_layers.ComplexDense(10, activation='cast_to_real', kernel_initializer=init, use_bias=False, init_technique='zero_imag')) # model.summary() model.compile(optimizer='sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) weigths = model.get_weights() with tf.GradientTape() as tape: loss = model.compiled_loss(y_true=tf.convert_to_tensor(test_labels), y_pred=model(test_images)) gradients = tape.gradient(loss, model.trainable_weights) # back-propagation history = model.fit(train_images, train_labels, epochs=epochs, validation_data=(test_images, test_labels)) test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) logs = { 'weights_at_init': weigths, 'loss': loss, 'gradients': gradients, 'weights_at_end': model.get_weights() } return history, logs
def own_fit(train_images, train_labels, test_images, test_labels, init1='glorot_uniform', init2='glorot_uniform', epochs=10): tf.random.set_seed(1) model = tf.keras.Sequential([ layers.ComplexFlatten(input_shape=(28, 28)), layers.ComplexDense(128, activation='cart_relu', dtype=np.float32, kernel_initializer=init1), layers.ComplexDense(10, dtype=np.float32, kernel_initializer=init2) ]) model.compile( optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) history = model.fit(train_images, train_labels, epochs=epochs, shuffle=False) test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) print('\nTest accuracy:', test_acc) return history
def cifar10_test(): dtype_1 = np.complex64 (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data() # Normalize pixel values to be between 0 and 1 train_images, test_images = train_images / 255.0, test_images / 255.0 train_images = train_images.astype(dtype_1) test_images = test_images.astype(dtype_1) train_labels = train_labels.astype(dtype_1) test_labels = test_labels.astype(dtype_1) model = models.Sequential() model.add(layers.ComplexInput(input_shape=(32, 32, 3), dtype=dtype_1)) # Never forget this!!! model.add(layers.ComplexConv2D(32, (3, 3), activation='cart_relu')) model.add(layers.ComplexMaxPooling2D( (2, 2))) # TODO: This is changing the dtype! model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu')) model.add(layers.ComplexAvgPooling2D((2, 2))) model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu')) model.add(layers.ComplexFlatten()) model.add(layers.ComplexDense(64, activation='cart_relu')) model.add(layers.ComplexDense(10, activation='convert_to_real_with_abs')) model.summary() model.compile( optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) history = model.fit(train_images, train_labels, epochs=2, validation_data=(test_images, test_labels))
def random_dataset(): x_train = np.complex64(tf.complex(tf.random.uniform([640, 65, 82, 1]), tf.random.uniform([640, 65, 82, 1]))) x_test = np.complex64(tf.complex(tf.random.uniform([200, 65, 82, 1]), tf.random.uniform([200, 65, 82, 1]))) y_train = np.uint8(np.random.randint(5, size=(640, 1))) y_test = np.uint8(np.random.randint(5, size=(200, 1))) model = tf.keras.models.Sequential() model.add(complex_layers.ComplexInput(input_shape=(65, 82, 1))) # Always use ComplexInput at the start model.add(complex_layers.ComplexConv2D(8, (5, 5), activation='cart_relu')) model.add(complex_layers.ComplexMaxPooling2D((2, 2))) model.add(complex_layers.ComplexConv2D(16, (5, 5), activation='cart_relu')) model.add(complex_layers.ComplexFlatten()) model.add(complex_layers.ComplexDense(256, activation='cart_relu')) model.add(complex_layers.ComplexDropout(0.1)) model.add(complex_layers.ComplexDense(64, activation='cart_relu')) model.add(complex_layers.ComplexDropout(0.1)) model.add(complex_layers.ComplexDense(5, activation='convert_to_real_with_abs')) # An activation that casts to real must be used at the last layer. # The loss function cannot minimize a complex number # Compile it model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'], # run_eagerly=Trutest_regressione ) model.summary() # Train and evaluate history = model.fit(x_train, y_train, epochs=2, validation_data=(x_test, y_test)) test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
def cifar10_test_model_2(train_images, train_labels, test_images, test_labels, dtype_1='complex64'): x = layers.complex_input(shape=(32, 32, 3), dtype=dtype_1) conv1 = layers.ComplexConv2D(32, (3, 3), activation='cart_relu')(x) pool1 = layers.ComplexMaxPooling2D((2, 2))(conv1) conv2 = layers.ComplexConv2D(64, (3, 3), activation='cart_relu')(pool1) pool2 = layers.ComplexAvgPooling2D((2, 2))(conv2) conv3 = layers.ComplexConv2D(64, (3, 3), activation='cart_relu')(pool2) flat = layers.ComplexFlatten()(conv3) dense1 = layers.ComplexDense(64, activation='cart_relu')(flat) y = layers.ComplexDense(10, activation='convert_to_real_with_abs')(dense1) model = models.Model(inputs=[x], outputs=[y]) model.compile( optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model.summary() return model.fit(train_images, train_labels, epochs=2, validation_data=(test_images, test_labels), shuffle=False)
def cifar10_test_model_1(train_images, train_labels, test_images, test_labels, dtype_1='complex64'): model = models.Sequential() model.add(layers.ComplexInput(input_shape=(32, 32, 3), dtype=dtype_1)) # Never forget this!!! model.add(layers.ComplexConv2D(32, (3, 3), activation='cart_relu')) model.add(layers.ComplexMaxPooling2D((2, 2))) model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu')) model.add(layers.ComplexAvgPooling2D((2, 2))) model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu')) model.add(layers.ComplexFlatten()) model.add(layers.ComplexDense(64, activation='cart_relu')) model.add(layers.ComplexDense(10, activation='convert_to_real_with_abs')) model.summary() model.compile( optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) return model.fit(train_images, train_labels, epochs=2, validation_data=(test_images, test_labels), shuffle=False)
def all_layers_model(): """ Creates a model using all possible layers to assert no layer changes the dtype to real. """ input_shape = (4, 28, 28, 3) x = tf.cast(tf.random.normal(input_shape), tf.complex64) model = tf.keras.models.Sequential() model.add(complex_layers.ComplexInput( input_shape=input_shape[1:])) # Always use ComplexInput at the start model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu')) model.add(complex_layers.ComplexAvgPooling2D((2, 2))) model.add( complex_layers.ComplexConv2D(64, (3, 3), activation='cart_sigmoid')) model.add(complex_layers.ComplexDropout(0.5)) model.add(complex_layers.ComplexMaxPooling2D((2, 2))) model.add(complex_layers.ComplexConv2DTranspose(32, (2, 2))) model.add(complex_layers.ComplexFlatten()) model.add(complex_layers.ComplexDense(64, activation='cart_tanh')) model.compile(loss=tf.keras.losses.MeanAbsoluteError(), optimizer='adam', metrics=['accuracy']) y = model(x) assert y.dtype == np.complex64 return model
def own_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform'): model = tf.keras.models.Sequential([ layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.float32), layers.ComplexDense(128, activation='cart_relu', dtype=np.float32, kernel_initializer=init1), layers.ComplexDense(10, activation='softmax_real', dtype=np.float32, kernel_initializer=init2) ]) model.compile( loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001), metrics=['accuracy'], ) start = timeit.default_timer() model.fit(ds_train, epochs=6, validation_data=ds_test, verbose=verbose, shuffle=False) stop = timeit.default_timer() return model.evaluate(ds_test, verbose=verbose), stop - start
def test_regression(): input_shape = (4, 28, 28, 3) x = tf.cast(tf.random.normal(input_shape), tf.complex64) model = tf.keras.models.Sequential() model.add(complex_layers.ComplexInput(input_shape=input_shape[1:])) model.add(complex_layers.ComplexFlatten()) model.add(complex_layers.ComplexDense(units=64, activation='cart_relu')) model.add(complex_layers.ComplexDense(units=10, activation='linear')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) y = model(x) assert y.dtype == np.complex64
def get_complex_mnist_model(): inputs = complex_layers.complex_input(shape=(28, 28, 1), dtype=np.float32) flat = complex_layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.float32)(inputs) dense = complex_layers.ComplexDense(128, activation='cart_relu', dtype=np.float32)(flat) drop = complex_layers.ComplexDropout(rate=0.5)(dense) out = complex_layers.ComplexDense(10, activation='softmax_real_with_abs', dtype=np.float32)(drop) complex_model = tf.keras.Model(inputs, out, name="rvnn") complex_model.compile( loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001), metrics=['accuracy'], ) complex_intermediate_model = tf.keras.Model(inputs, drop) return complex_model, complex_intermediate_model
def own_complex_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform'): tf.random.set_seed(24) model = tf.keras.models.Sequential([ layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.complex64), layers.ComplexDense(128, activation='cart_relu', dtype=np.complex64, kernel_initializer=init1, use_bias=False, init_technique='zero_imag'), layers.ComplexDense(10, activation='cast_to_real', dtype=np.complex64, kernel_initializer=init2, use_bias=False, init_technique='zero_imag'), tf.keras.layers.Activation('softmax') ]) model.compile( loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001), metrics=['accuracy'], ) # ds_train = ds_train.map(cast_to_complex) # ds_test = ds_test.map(cast_to_complex) weigths = model.get_weights() with tf.GradientTape() as tape: # for elem, label in iter(ds_train): elem, label = next(iter(ds_test)) loss = model.compiled_loss(y_true=label, y_pred=model(elem)) # calculate loss gradients = tape.gradient(loss, model.trainable_weights) # back-propagation logs = {'weights': weigths, 'loss': loss, 'gradients': gradients} start = timeit.default_timer() history = model.fit(ds_train, epochs=6, validation_data=ds_test, verbose=verbose, shuffle=False) stop = timeit.default_timer() return history, stop - start, logs
def test_cifar(): (train_images, train_labels), (test_images, test_labels) = get_dataset() # Create your model model = tf.keras.models.Sequential() model.add(complex_layers.ComplexInput(input_shape=(32, 32, 3))) # Always use ComplexInput at the start model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu')) model.add(complex_layers.ComplexAvgPooling2D((2, 2))) model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu')) model.add(complex_layers.ComplexFlatten()) model.add(complex_layers.ComplexDense(64, activation='cart_relu')) model.add(complex_layers.ComplexDense(10, activation='convert_to_real_with_abs')) model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) # model.summary() history = model.fit(train_images, train_labels, epochs=1, validation_data=(test_images, test_labels)) test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
def own_fit(epochs=10): tf.random.set_seed(1) init = tf.keras.initializers.GlorotUniform(seed=117) model = models.Sequential() model.add( complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu', input_shape=(32, 32, 3), dtype=np.float32, kernel_initializer=init)) model.add(complex_layers.ComplexMaxPooling2D((2, 2), dtype=np.float32)) model.add( complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', dtype=np.float32, kernel_initializer=init)) model.add(complex_layers.ComplexMaxPooling2D((2, 2), dtype=np.float32)) model.add( complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', dtype=np.float32, kernel_initializer=init)) model.add(complex_layers.ComplexFlatten()) model.add( complex_layers.ComplexDense(64, activation='cart_relu', dtype=np.float32, kernel_initializer=init)) model.add( complex_layers.ComplexDense(10, dtype=np.float32, kernel_initializer=init)) # model.summary() model.compile( optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) history = model.fit(train_images, train_labels, epochs=epochs, validation_data=(test_images, test_labels)) test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) return history
def dropout(): tf.random.set_seed(0) layer = complex_layers.ComplexDropout(.2, input_shape=(2, )) data = np.arange(10).reshape(5, 2).astype(np.float32) data = tf.complex(data, data) outputs = layer(data, training=True) expected_out = np.array([[0. + 0.j, 1.25 + 1.25j], [2.5 + 2.5j, 3.75 + 3.75j], [5. + 5.j, 6.25 + 6.25j], [7.5 + 7.5j, 8.75 + 8.75j], [10. + 10.j, 0. + 0.j]]) assert np.all(data == layer(data, training=False)) assert np.all(outputs == expected_out) ds_train, ds_test = get_dataset() model = tf.keras.models.Sequential([ complex_layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.float32), complex_layers.ComplexDense(128, activation='cart_relu', dtype=np.float32), complex_layers.ComplexDropout(rate=0.5), complex_layers.ComplexDense(10, activation='softmax_real', dtype=np.float32) ]) model.compile( loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001), metrics=['accuracy'], ) model.fit(ds_train, epochs=1, validation_data=ds_test, verbose=False, shuffle=False) model.evaluate(ds_test, verbose=False)
from cvnn.montecarlo import MonteCarlo import tensorflow as tf import cvnn.layers as layers import numpy as np fashion_mnist = tf.keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() own_model = tf.keras.Sequential([ layers.ComplexFlatten(input_shape=(28, 28)), layers.ComplexDense(128, activation='cart_relu', dtype=np.float32), layers.ComplexDense(10, dtype=np.float32) ], name="own_model") own_model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) keras_model = tf.keras.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10) ], name="keras_model") keras_model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) monte_carlo = MonteCarlo() monte_carlo.add_model(own_model) monte_carlo.add_model(keras_model)