Beispiel #1
0
def own_complex_fit(epochs=10):
    tf.random.set_seed(1)
    init = tf.keras.initializers.GlorotUniform(seed=117)
    model = models.Sequential()
    model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu', input_shape=(32, 32, 3),
                                           kernel_initializer=init, use_bias=False, init_technique='zero_imag'))
    model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
    model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', kernel_initializer=init,
                                           use_bias=False, init_technique='zero_imag'))
    model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
    model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', kernel_initializer=init,
                                           use_bias=False, init_technique='zero_imag'))
    model.add(complex_layers.ComplexFlatten())
    model.add(complex_layers.ComplexDense(64, activation='cart_relu', kernel_initializer=init,
                                          use_bias=False, init_technique='zero_imag'))
    model.add(complex_layers.ComplexDense(10, activation='cast_to_real', kernel_initializer=init,
                                          use_bias=False, init_technique='zero_imag'))
    # model.summary()
    model.compile(optimizer='sgd',
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
                  metrics=['accuracy'])
    weigths = model.get_weights()
    with tf.GradientTape() as tape:
        loss = model.compiled_loss(y_true=tf.convert_to_tensor(test_labels), y_pred=model(test_images))
        gradients = tape.gradient(loss, model.trainable_weights)  # back-propagation
    history = model.fit(train_images, train_labels, epochs=epochs, validation_data=(test_images, test_labels))
    test_loss, test_acc = model.evaluate(test_images,  test_labels, verbose=2)
    logs = {
        'weights_at_init': weigths,
        'loss': loss,
        'gradients': gradients,
        'weights_at_end': model.get_weights()
    }
    return history, logs
def cifar10_test():
    dtype_1 = np.complex64
    (train_images, train_labels), (test_images,
                                   test_labels) = datasets.cifar10.load_data()
    # Normalize pixel values to be between 0 and 1
    train_images, test_images = train_images / 255.0, test_images / 255.0
    train_images = train_images.astype(dtype_1)
    test_images = test_images.astype(dtype_1)
    train_labels = train_labels.astype(dtype_1)
    test_labels = test_labels.astype(dtype_1)

    model = models.Sequential()
    model.add(layers.ComplexInput(input_shape=(32, 32, 3),
                                  dtype=dtype_1))  # Never forget this!!!
    model.add(layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
    model.add(layers.ComplexMaxPooling2D(
        (2, 2)))  # TODO: This is changing the dtype!
    model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
    model.add(layers.ComplexAvgPooling2D((2, 2)))
    model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
    model.add(layers.ComplexFlatten())
    model.add(layers.ComplexDense(64, activation='cart_relu'))
    model.add(layers.ComplexDense(10, activation='convert_to_real_with_abs'))
    model.summary()
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    history = model.fit(train_images,
                        train_labels,
                        epochs=2,
                        validation_data=(test_images, test_labels))
Beispiel #3
0
def cifar10_test_model_2(train_images,
                         train_labels,
                         test_images,
                         test_labels,
                         dtype_1='complex64'):
    x = layers.complex_input(shape=(32, 32, 3), dtype=dtype_1)
    conv1 = layers.ComplexConv2D(32, (3, 3), activation='cart_relu')(x)
    pool1 = layers.ComplexMaxPooling2D((2, 2))(conv1)
    conv2 = layers.ComplexConv2D(64, (3, 3), activation='cart_relu')(pool1)
    pool2 = layers.ComplexAvgPooling2D((2, 2))(conv2)
    conv3 = layers.ComplexConv2D(64, (3, 3), activation='cart_relu')(pool2)
    flat = layers.ComplexFlatten()(conv3)
    dense1 = layers.ComplexDense(64, activation='cart_relu')(flat)
    y = layers.ComplexDense(10, activation='convert_to_real_with_abs')(dense1)

    model = models.Model(inputs=[x], outputs=[y])
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    model.summary()
    return model.fit(train_images,
                     train_labels,
                     epochs=2,
                     validation_data=(test_images, test_labels),
                     shuffle=False)
Beispiel #4
0
def random_dataset():
    x_train = np.complex64(tf.complex(tf.random.uniform([640, 65, 82, 1]), tf.random.uniform([640, 65, 82, 1])))
    x_test = np.complex64(tf.complex(tf.random.uniform([200, 65, 82, 1]), tf.random.uniform([200, 65, 82, 1])))
    y_train = np.uint8(np.random.randint(5, size=(640, 1)))
    y_test = np.uint8(np.random.randint(5, size=(200, 1)))

    model = tf.keras.models.Sequential()
    model.add(complex_layers.ComplexInput(input_shape=(65, 82, 1)))  # Always use ComplexInput at the start
    model.add(complex_layers.ComplexConv2D(8, (5, 5), activation='cart_relu'))
    model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
    model.add(complex_layers.ComplexConv2D(16, (5, 5), activation='cart_relu'))
    model.add(complex_layers.ComplexFlatten())
    model.add(complex_layers.ComplexDense(256, activation='cart_relu'))
    model.add(complex_layers.ComplexDropout(0.1))
    model.add(complex_layers.ComplexDense(64, activation='cart_relu'))
    model.add(complex_layers.ComplexDropout(0.1))
    model.add(complex_layers.ComplexDense(5, activation='convert_to_real_with_abs'))
    # An activation that casts to real must be used at the last layer.
    # The loss function cannot minimize a complex number

    # Compile it
    model.compile(optimizer='adam',
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
                  metrics=['accuracy'],
                  # run_eagerly=Trutest_regressione
                  )
    model.summary()
    # Train and evaluate
    history = model.fit(x_train, y_train, epochs=2, validation_data=(x_test, y_test))
    test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
Beispiel #5
0
def cifar10_test_model_1(train_images,
                         train_labels,
                         test_images,
                         test_labels,
                         dtype_1='complex64'):
    model = models.Sequential()
    model.add(layers.ComplexInput(input_shape=(32, 32, 3),
                                  dtype=dtype_1))  # Never forget this!!!
    model.add(layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
    model.add(layers.ComplexMaxPooling2D((2, 2)))
    model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
    model.add(layers.ComplexAvgPooling2D((2, 2)))
    model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
    model.add(layers.ComplexFlatten())
    model.add(layers.ComplexDense(64, activation='cart_relu'))
    model.add(layers.ComplexDense(10, activation='convert_to_real_with_abs'))
    model.summary()
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    return model.fit(train_images,
                     train_labels,
                     epochs=2,
                     validation_data=(test_images, test_labels),
                     shuffle=False)
Beispiel #6
0
def get_cvnn_model(dtype=tf.float32):
    tf.random.set_seed(1)
    inputs = layers.complex_input(shape=INPUT_SIZE + (3, ), dtype=dtype)
    # inputs = tf.keras.layers.InputLayer(input_shape=INPUT_SIZE + (3,), dtype=dtype)
    # inputs = tf.keras.layers.Input(shape=INPUT_SIZE + (3,))

    c0, c1, c2 = _downsample_cvnn(inputs, 64, dtype)
    c3, c4, c5 = _downsample_cvnn(c2, 128, dtype)
    c6, c7, c8 = _downsample_cvnn(c5, 256, dtype)
    c9, c10, c11 = _downsample_cvnn(c8, 512, dtype)

    c12 = layers.ComplexConv2D(1024,
                               activation='relu',
                               kernel_size=3,
                               dtype=dtype)(c11)
    c13 = layers.ComplexConv2D(1024,
                               activation='relu',
                               kernel_size=3,
                               padding='valid',
                               dtype=dtype)(c12)

    c14, c15 = _upsample_cvnn(c13, c10, 512, 4, dtype)
    c16, c17 = _upsample_cvnn(c15, c7, 256, 16, dtype)
    c18, c19 = _upsample_cvnn(c17, c4, 128, 40, dtype)
    c20, c21 = _upsample_cvnn(c19, c1, 64, 88, dtype)

    outputs = layers.ComplexConv2D(4, kernel_size=1, dtype=dtype)(c21)

    model = tf.keras.Model(inputs=inputs, outputs=outputs, name="u-net-cvnn")
    model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                  optimizer="adam",
                  metrics=["accuracy"])
    return model
Beispiel #7
0
def all_layers_model():
    """
    Creates a model using all possible layers to assert no layer changes the dtype to real.
    """
    input_shape = (4, 28, 28, 3)
    x = tf.cast(tf.random.normal(input_shape), tf.complex64)

    model = tf.keras.models.Sequential()
    model.add(complex_layers.ComplexInput(
        input_shape=input_shape[1:]))  # Always use ComplexInput at the start
    model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
    model.add(complex_layers.ComplexAvgPooling2D((2, 2)))
    model.add(
        complex_layers.ComplexConv2D(64, (3, 3), activation='cart_sigmoid'))
    model.add(complex_layers.ComplexDropout(0.5))
    model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
    model.add(complex_layers.ComplexConv2DTranspose(32, (2, 2)))
    model.add(complex_layers.ComplexFlatten())
    model.add(complex_layers.ComplexDense(64, activation='cart_tanh'))
    model.compile(loss=tf.keras.losses.MeanAbsoluteError(),
                  optimizer='adam',
                  metrics=['accuracy'])
    y = model(x)
    assert y.dtype == np.complex64
    return model
Beispiel #8
0
def test_functional_api():
    inputs = complex_layers.complex_input(shape=(128, 128, 3))
    c0 = complex_layers.ComplexConv2D(32, activation='cart_relu', kernel_size=3)(inputs)
    c1 = complex_layers.ComplexConv2D(32, activation='cart_relu', kernel_size=3)(c0)
    c2 = complex_layers.ComplexMaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(c1)
    t01 = complex_layers.ComplexConv2DTranspose(5, kernel_size=2, strides=(2, 2), activation='cart_relu')(c2)
    concat01 = tf.keras.layers.concatenate([t01, c1], axis=-1)

    c3 = complex_layers.ComplexConv2D(4, activation='cart_relu', kernel_size=3)(concat01)
    out = complex_layers.ComplexConv2D(4, activation='cart_relu', kernel_size=3)(c3)
    model = tf.keras.Model(inputs, out)
Beispiel #9
0
def _downsample_cvnn(inputs, units, dtype=tf.float32):
    c0 = layers.ComplexConv2D(units,
                              activation='cart_relu',
                              kernel_size=3,
                              dtype=dtype)(inputs)
    c1 = layers.ComplexConv2D(units,
                              activation='cart_relu',
                              kernel_size=3,
                              dtype=dtype)(c0)
    c2 = layers.ComplexMaxPooling2D(pool_size=(2, 2),
                                    strides=(2, 2),
                                    padding='valid',
                                    dtype=dtype)(c1)
    return c0, c1, c2
Beispiel #10
0
def test_cifar():
    (train_images, train_labels), (test_images, test_labels) = get_dataset()

    # Create your model
    model = tf.keras.models.Sequential()
    model.add(complex_layers.ComplexInput(input_shape=(32, 32, 3)))  # Always use ComplexInput at the start
    model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
    model.add(complex_layers.ComplexAvgPooling2D((2, 2)))
    model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
    model.add(complex_layers.ComplexFlatten())
    model.add(complex_layers.ComplexDense(64, activation='cart_relu'))
    model.add(complex_layers.ComplexDense(10, activation='convert_to_real_with_abs'))
    model.compile(optimizer='adam',
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
                  metrics=['accuracy'])
    # model.summary()
    history = model.fit(train_images, train_labels, epochs=1, validation_data=(test_images, test_labels))
    test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
Beispiel #11
0
def own_fit(epochs=10):
    tf.random.set_seed(1)
    init = tf.keras.initializers.GlorotUniform(seed=117)
    model = models.Sequential()
    model.add(
        complex_layers.ComplexConv2D(32, (3, 3),
                                     activation='cart_relu',
                                     input_shape=(32, 32, 3),
                                     dtype=np.float32,
                                     kernel_initializer=init))
    model.add(complex_layers.ComplexMaxPooling2D((2, 2), dtype=np.float32))
    model.add(
        complex_layers.ComplexConv2D(64, (3, 3),
                                     activation='cart_relu',
                                     dtype=np.float32,
                                     kernel_initializer=init))
    model.add(complex_layers.ComplexMaxPooling2D((2, 2), dtype=np.float32))
    model.add(
        complex_layers.ComplexConv2D(64, (3, 3),
                                     activation='cart_relu',
                                     dtype=np.float32,
                                     kernel_initializer=init))
    model.add(complex_layers.ComplexFlatten())
    model.add(
        complex_layers.ComplexDense(64,
                                    activation='cart_relu',
                                    dtype=np.float32,
                                    kernel_initializer=init))
    model.add(
        complex_layers.ComplexDense(10,
                                    dtype=np.float32,
                                    kernel_initializer=init))
    # model.summary()
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    history = model.fit(train_images,
                        train_labels,
                        epochs=epochs,
                        validation_data=(test_images, test_labels))
    test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
    return history
Beispiel #12
0
def _upsample_cvnn(in1, in2, units, crop, dtype=tf.float32):
    t01 = layers.ComplexConv2DTranspose(units,
                                        kernel_size=2,
                                        strides=(2, 2),
                                        activation='relu',
                                        dtype=dtype)(in1)
    crop01 = tf.keras.layers.Cropping2D(cropping=(crop, crop))(in2)

    concat01 = tf.keras.layers.concatenate([t01, crop01], axis=-1)

    out1 = layers.ComplexConv2D(units,
                                activation='relu',
                                kernel_size=3,
                                dtype=dtype)(concat01)
    out2 = layers.ComplexConv2D(units,
                                activation='relu',
                                kernel_size=3,
                                dtype=dtype)(out1)
    return out1, out2
Beispiel #13
0
 def test_complex_conv2d(self):
     conv2d = layers.ComplexConv2D(10, kernel_size=(3, 3))
     cimg = np.random.randn(1, 10, 10,
                            3) + 1j * np.random.randn(1, 10, 10, 3)
     _ = conv2d(cimg.astype('complex64'))