Example #1
0
def own_complex_fit(epochs=10):
    tf.random.set_seed(1)
    init = tf.keras.initializers.GlorotUniform(seed=117)
    model = models.Sequential()
    model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu', input_shape=(32, 32, 3),
                                           kernel_initializer=init, use_bias=False, init_technique='zero_imag'))
    model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
    model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', kernel_initializer=init,
                                           use_bias=False, init_technique='zero_imag'))
    model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
    model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', kernel_initializer=init,
                                           use_bias=False, init_technique='zero_imag'))
    model.add(complex_layers.ComplexFlatten())
    model.add(complex_layers.ComplexDense(64, activation='cart_relu', kernel_initializer=init,
                                          use_bias=False, init_technique='zero_imag'))
    model.add(complex_layers.ComplexDense(10, activation='cast_to_real', kernel_initializer=init,
                                          use_bias=False, init_technique='zero_imag'))
    # model.summary()
    model.compile(optimizer='sgd',
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
                  metrics=['accuracy'])
    weigths = model.get_weights()
    with tf.GradientTape() as tape:
        loss = model.compiled_loss(y_true=tf.convert_to_tensor(test_labels), y_pred=model(test_images))
        gradients = tape.gradient(loss, model.trainable_weights)  # back-propagation
    history = model.fit(train_images, train_labels, epochs=epochs, validation_data=(test_images, test_labels))
    test_loss, test_acc = model.evaluate(test_images,  test_labels, verbose=2)
    logs = {
        'weights_at_init': weigths,
        'loss': loss,
        'gradients': gradients,
        'weights_at_end': model.get_weights()
    }
    return history, logs
def cifar10_test():
    dtype_1 = np.complex64
    (train_images, train_labels), (test_images,
                                   test_labels) = datasets.cifar10.load_data()
    # Normalize pixel values to be between 0 and 1
    train_images, test_images = train_images / 255.0, test_images / 255.0
    train_images = train_images.astype(dtype_1)
    test_images = test_images.astype(dtype_1)
    train_labels = train_labels.astype(dtype_1)
    test_labels = test_labels.astype(dtype_1)

    model = models.Sequential()
    model.add(layers.ComplexInput(input_shape=(32, 32, 3),
                                  dtype=dtype_1))  # Never forget this!!!
    model.add(layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
    model.add(layers.ComplexMaxPooling2D(
        (2, 2)))  # TODO: This is changing the dtype!
    model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
    model.add(layers.ComplexAvgPooling2D((2, 2)))
    model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
    model.add(layers.ComplexFlatten())
    model.add(layers.ComplexDense(64, activation='cart_relu'))
    model.add(layers.ComplexDense(10, activation='convert_to_real_with_abs'))
    model.summary()
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    history = model.fit(train_images,
                        train_labels,
                        epochs=2,
                        validation_data=(test_images, test_labels))
Example #3
0
def random_dataset():
    x_train = np.complex64(tf.complex(tf.random.uniform([640, 65, 82, 1]), tf.random.uniform([640, 65, 82, 1])))
    x_test = np.complex64(tf.complex(tf.random.uniform([200, 65, 82, 1]), tf.random.uniform([200, 65, 82, 1])))
    y_train = np.uint8(np.random.randint(5, size=(640, 1)))
    y_test = np.uint8(np.random.randint(5, size=(200, 1)))

    model = tf.keras.models.Sequential()
    model.add(complex_layers.ComplexInput(input_shape=(65, 82, 1)))  # Always use ComplexInput at the start
    model.add(complex_layers.ComplexConv2D(8, (5, 5), activation='cart_relu'))
    model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
    model.add(complex_layers.ComplexConv2D(16, (5, 5), activation='cart_relu'))
    model.add(complex_layers.ComplexFlatten())
    model.add(complex_layers.ComplexDense(256, activation='cart_relu'))
    model.add(complex_layers.ComplexDropout(0.1))
    model.add(complex_layers.ComplexDense(64, activation='cart_relu'))
    model.add(complex_layers.ComplexDropout(0.1))
    model.add(complex_layers.ComplexDense(5, activation='convert_to_real_with_abs'))
    # An activation that casts to real must be used at the last layer.
    # The loss function cannot minimize a complex number

    # Compile it
    model.compile(optimizer='adam',
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
                  metrics=['accuracy'],
                  # run_eagerly=Trutest_regressione
                  )
    model.summary()
    # Train and evaluate
    history = model.fit(x_train, y_train, epochs=2, validation_data=(x_test, y_test))
    test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
Example #4
0
def cifar10_test_model_2(train_images,
                         train_labels,
                         test_images,
                         test_labels,
                         dtype_1='complex64'):
    x = layers.complex_input(shape=(32, 32, 3), dtype=dtype_1)
    conv1 = layers.ComplexConv2D(32, (3, 3), activation='cart_relu')(x)
    pool1 = layers.ComplexMaxPooling2D((2, 2))(conv1)
    conv2 = layers.ComplexConv2D(64, (3, 3), activation='cart_relu')(pool1)
    pool2 = layers.ComplexAvgPooling2D((2, 2))(conv2)
    conv3 = layers.ComplexConv2D(64, (3, 3), activation='cart_relu')(pool2)
    flat = layers.ComplexFlatten()(conv3)
    dense1 = layers.ComplexDense(64, activation='cart_relu')(flat)
    y = layers.ComplexDense(10, activation='convert_to_real_with_abs')(dense1)

    model = models.Model(inputs=[x], outputs=[y])
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    model.summary()
    return model.fit(train_images,
                     train_labels,
                     epochs=2,
                     validation_data=(test_images, test_labels),
                     shuffle=False)
Example #5
0
def cifar10_test_model_1(train_images,
                         train_labels,
                         test_images,
                         test_labels,
                         dtype_1='complex64'):
    model = models.Sequential()
    model.add(layers.ComplexInput(input_shape=(32, 32, 3),
                                  dtype=dtype_1))  # Never forget this!!!
    model.add(layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
    model.add(layers.ComplexMaxPooling2D((2, 2)))
    model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
    model.add(layers.ComplexAvgPooling2D((2, 2)))
    model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
    model.add(layers.ComplexFlatten())
    model.add(layers.ComplexDense(64, activation='cart_relu'))
    model.add(layers.ComplexDense(10, activation='convert_to_real_with_abs'))
    model.summary()
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    return model.fit(train_images,
                     train_labels,
                     epochs=2,
                     validation_data=(test_images, test_labels),
                     shuffle=False)
Example #6
0
def all_layers_model():
    """
    Creates a model using all possible layers to assert no layer changes the dtype to real.
    """
    input_shape = (4, 28, 28, 3)
    x = tf.cast(tf.random.normal(input_shape), tf.complex64)

    model = tf.keras.models.Sequential()
    model.add(complex_layers.ComplexInput(
        input_shape=input_shape[1:]))  # Always use ComplexInput at the start
    model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
    model.add(complex_layers.ComplexAvgPooling2D((2, 2)))
    model.add(
        complex_layers.ComplexConv2D(64, (3, 3), activation='cart_sigmoid'))
    model.add(complex_layers.ComplexDropout(0.5))
    model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
    model.add(complex_layers.ComplexConv2DTranspose(32, (2, 2)))
    model.add(complex_layers.ComplexFlatten())
    model.add(complex_layers.ComplexDense(64, activation='cart_tanh'))
    model.compile(loss=tf.keras.losses.MeanAbsoluteError(),
                  optimizer='adam',
                  metrics=['accuracy'])
    y = model(x)
    assert y.dtype == np.complex64
    return model
Example #7
0
def own_fit(epochs=10):
    tf.random.set_seed(1)
    init = tf.keras.initializers.GlorotUniform(seed=117)
    model = models.Sequential()
    model.add(
        complex_layers.ComplexConv2D(32, (3, 3),
                                     activation='cart_relu',
                                     input_shape=(32, 32, 3),
                                     dtype=np.float32,
                                     kernel_initializer=init))
    model.add(complex_layers.ComplexMaxPooling2D((2, 2), dtype=np.float32))
    model.add(
        complex_layers.ComplexConv2D(64, (3, 3),
                                     activation='cart_relu',
                                     dtype=np.float32,
                                     kernel_initializer=init))
    model.add(complex_layers.ComplexMaxPooling2D((2, 2), dtype=np.float32))
    model.add(
        complex_layers.ComplexConv2D(64, (3, 3),
                                     activation='cart_relu',
                                     dtype=np.float32,
                                     kernel_initializer=init))
    model.add(complex_layers.ComplexFlatten())
    model.add(
        complex_layers.ComplexDense(64,
                                    activation='cart_relu',
                                    dtype=np.float32,
                                    kernel_initializer=init))
    model.add(
        complex_layers.ComplexDense(10,
                                    dtype=np.float32,
                                    kernel_initializer=init))
    # model.summary()
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    history = model.fit(train_images,
                        train_labels,
                        epochs=epochs,
                        validation_data=(test_images, test_labels))
    test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
    return history
Example #8
0
def test_functional_api():
    inputs = complex_layers.complex_input(shape=(128, 128, 3))
    c0 = complex_layers.ComplexConv2D(32, activation='cart_relu', kernel_size=3)(inputs)
    c1 = complex_layers.ComplexConv2D(32, activation='cart_relu', kernel_size=3)(c0)
    c2 = complex_layers.ComplexMaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(c1)
    t01 = complex_layers.ComplexConv2DTranspose(5, kernel_size=2, strides=(2, 2), activation='cart_relu')(c2)
    concat01 = tf.keras.layers.concatenate([t01, c1], axis=-1)

    c3 = complex_layers.ComplexConv2D(4, activation='cart_relu', kernel_size=3)(concat01)
    out = complex_layers.ComplexConv2D(4, activation='cart_relu', kernel_size=3)(c3)
    model = tf.keras.Model(inputs, out)
Example #9
0
def _downsample_cvnn(inputs, units, dtype=tf.float32):
    c0 = layers.ComplexConv2D(units,
                              activation='cart_relu',
                              kernel_size=3,
                              dtype=dtype)(inputs)
    c1 = layers.ComplexConv2D(units,
                              activation='cart_relu',
                              kernel_size=3,
                              dtype=dtype)(c0)
    c2 = layers.ComplexMaxPooling2D(pool_size=(2, 2),
                                    strides=(2, 2),
                                    padding='valid',
                                    dtype=dtype)(c1)
    return c0, c1, c2