def test_layer_output_batched(self): odim = 2 input_dim = 2 batch = 3 sess = K.get_session() layer = FuzzyLayer(odim) layer.build(input_shape=(batch, input_dim)) x = K.placeholder(shape=(batch, input_dim)) c = K.placeholder(shape=(input_dim, odim)) a = K.placeholder(shape=(input_dim, odim)) layer.c = c layer.a = a xc = layer.call(x) xx = [[1, 1], [0, 0], [0.5, 0.5]] cc = [[1, 0], [1, 0]] aa = [[1 / 10, 1 / 10], [1 / 10, 1 / 10]] vals = sess.run(xc, feed_dict={x: xx, c: cc, a: aa}) self.assertEqual(len(vals), batch) self.assertEqual(len(vals[0]), odim) self.assertAlmostEqual(vals[0][0], 1, 7) self.assertAlmostEqual(vals[0][1], 0, 7) self.assertAlmostEqual(vals[1][0], 0, 7) self.assertAlmostEqual(vals[1][1], 1, 7) self.assertAlmostEqual(vals[2][0], 0.000003726653172, 7) self.assertAlmostEqual(vals[2][1], 0.000003726653172, 7)
def test_layer_output_batched2(self): odim = 2 input_dim = 2 batch = 2 sess = K.get_session() layer = FuzzyLayer(odim) layer.build(input_shape=(batch, input_dim)) x = K.placeholder(shape=(batch, input_dim)) c = K.placeholder(shape=(input_dim, odim)) a = K.placeholder(shape=(input_dim, odim)) layer.c = c layer.a = a xc = layer.call(x) xx = [[0.5, 0.8], [0.8, 0.5]] cc = [[1, 0.2], [0.8, 0]] aa = [[1 / 2, 1 / 4], [1, 1 / 8]] vals = sess.run(xc, feed_dict={x: xx, c: cc, a: aa}) self.assertEqual(len(vals), batch) self.assertEqual(len(vals[0]), odim) self.assertAlmostEqual(vals[0][0], 0.7788007831, 7) self.assertAlmostEqual(vals[0][1], 0.00002491600973, 7) self.assertAlmostEqual(vals[1][0], 0.9394130628, 7) self.assertAlmostEqual(vals[1][1], 0.004339483271, 7)
def test_layer_output_batched_and_context(self): input_dim = 1 context = 2 batch = 3 odim = 4 sess = K.get_session() layer = FuzzyLayer(odim) layer.build(input_shape=(batch, context, input_dim)) x = K.placeholder(shape=(batch, context, input_dim)) c = K.placeholder(shape=(input_dim, odim)) a = K.placeholder(shape=(input_dim, odim)) layer.c = c layer.a = a xc = layer.call(x) xx = [[[0.5], [0.8]], [[0.8], [0.6]], [[0.6], [0.4]]] cc = [[1, 0.8, 0.6, 0.4]] aa = [[1, 1, 1, 1]] vals = sess.run(xc, feed_dict={x: xx, c: cc, a: aa}) self.assertEqual(len(vals), batch) self.assertEqual(len(vals[0]), context) self.assertEqual(len(vals[0][0]), odim)
def test_layer_output_single2(self): odim = 2 input_dim = 2 batch = 1 sess = K.get_session() layer = FuzzyLayer(odim) layer.build(input_shape=(batch, input_dim)) x = K.placeholder(shape=(batch, input_dim)) c = K.placeholder(shape=(input_dim, odim)) a = K.placeholder(shape=(input_dim, odim)) layer.c = c layer.a = a xc = layer.call(x) xx = [[0.5, 0.5]] cc = [[1, 0], [1, 0]] aa = [[1 / 2, 1 / 2], [1 / 2, 1 / 2]] vals = sess.run(xc, feed_dict={x: xx, c: cc, a: aa}) self.assertEqual(len(vals), batch) self.assertEqual(len(vals[0]), odim) self.assertAlmostEqual(vals[0][0], 0.6065306597, 7) self.assertAlmostEqual(vals[0][1], 0.6065306597, 7)
r = 1 vals = np.linspace(0, 1.9 * m.pi, num=3000) np.random.shuffle(vals) for i in vals: x.append([r * m.cos(i), r * m.sin(i)]) y.append(i) for i in np.linspace(0, 1.9 * m.pi, num=25): x_test.append([r * m.cos(i), r * m.sin(i)]) y_test.append(i) x_train = np.array(x) y_train = np.array(y) f_layer = FuzzyLayer(20, input_dim=2) model = Sequential() model.add(f_layer) #model.add(Dense(20, activation='sigmoid')) model.add(DefuzzyLayer(1)) model.compile(loss='logcosh', optimizer='rmsprop', metrics=['mae']) model.fit(x_train, y_train, epochs=500, verbose=0, batch_size=100) y_pred = model.predict(np.array(x_test)) weights = f_layer.get_weights() print(weights) plt.ion()
for i in range(slice, len(x) - slice): if y[i] > 0 or np.random.random() > 0.99: X.append([a for a in x[(i - slice):(i + slice)]]) tmpy = np.zeros(3) tmpy[int(round(y[i]))] = 1 Y.append(tmpy) print("Total samples:", len(X)) x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.1) fuzzy_kernels = 40 indices = rnd.sample(range(len(x_train)), fuzzy_kernels) f_layer = FuzzyLayer(fuzzy_kernels, initial_centers=np.transpose( np.array([x_train[i] for i in indices])), input_dim=2 * slice) model = Sequential() model.add(f_layer) model.add(Dense(25, activation='softmax')) model.add(Dense(3, activation='softmax')) model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['binary_accuracy']) model.fit(np.array(x_train), np.array(y_train), epochs=200, verbose=0,
import numpy as np x_train = np.random.uniform(-1, 1, size=(1000, 2)) y_train = np.array([ ([1, 0, 0, 0] if a[0] < 0 and a[1] < 0 else [0, 1, 0, 0] if a[0] < 0 and a[1] > 0 else [0, 0, 1, 0] if a[0] > 0 and a[1] < 0 else [0, 0, 0, 1]) for a in x_train ]) model = Sequential() model.add( FuzzyLayer( 16, input_dim=2, initial_centers=[[15, 0, 15, 0, 1, 1, 1, 1, 15, 0, 15, 0, 1, 1, 1, 1], [0, 15, 15, 0, 1, 1, 1, 1, 15, 0, 15, 0, 1, 1, 1, 1]], initial_sigmas=[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])) model.add(DefuzzyLayer(4)) #model.add(Dense(4, activation='sigmoid')) model.compile(loss='logcosh', optimizer='rmsprop', metrics=['mae', 'acc']) model.fit(x_train, y_train, epochs=100, verbose=0, batch_size=10) # %% assert np.argmax(model.predict(np.array([[1, 1]]))) == 3 # %% assert np.argmax(model.predict(np.array([[-1, -1]]))) == 0 # %%
slice = 5 for i in range(slice, len(x) - slice): if y[i] > 0 or np.random.random() > 0.97: X.append([[a] for a in x[(i - slice):(i + slice)]]) tmpy = np.zeros(3) tmpy[int(round(y[i]))] = 1 Y.append(tmpy) print("Total samples:",len(X)) x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.1) fuzzy_kernels = 3 indices = rnd.sample(range(len(x_train)), fuzzy_kernels) f_layer = FuzzyLayer(fuzzy_kernels, initial_centers=lambda x: np.transpose(np.array([x_train[i] for i in indices])), input_shape = (2 * slice, 1)) model = Sequential() model.add(f_layer) model.add(LSTM(10)) model.add(Dense(3, activation='softmax')) model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['categorical_crossentropy']) model.fit(np.array(x_train), np.array(y_train), epochs=100, verbose=1, batch_size=1)
from fuzzy_layer import FuzzyLayer import tensorflow as tf from tensorflow.python.client import device_lib print(device_lib.list_local_devices()) #%% fuzzy_kernels = 10 fuzzy_inputs = 2 centroids_init_values= tf.random_uniform_initializer(0, 1)(shape=(fuzzy_inputs, fuzzy_kernels), dtype="float32") sigma_init_values = tf.constant_initializer(1e-1)(shape=(fuzzy_inputs, fuzzy_kernels), dtype="float32") input_img = Input(shape=(784,)) encoded = Dense(32, activation='sigmoid')(input_img) encoded = Dense(fuzzy_inputs, activation='sigmoid')(encoded) f_layer = FuzzyLayer(fuzzy_kernels, initial_centers=centroids_init_values, initial_sigmas=sigma_init_values) encoded = f_layer(encoded) decoded = Dense(fuzzy_inputs, activation='sigmoid')(encoded) decoded = Dense(32, activation='sigmoid')(decoded) decoded = Dense(784, activation='sigmoid')(decoded) autoencoder = Model(input_img, decoded) encoder = Model(input_img, encoded) encoded_input = Input(shape=(fuzzy_kernels,)) decoder_layer3 = autoencoder.layers[-3] decoder_layer2 = autoencoder.layers[-2] decoder_layer1 = autoencoder.layers[-1]
sys.path.insert(0, '../layers') import keras from fuzzy_layer import FuzzyLayer from defuzzy_layer import DefuzzyLayer from keras.models import Sequential from keras.layers import Dense, Dropout, Activation from keras.optimizers import SGD # Generate dummy data import numpy as np x_train = np.random.normal(5, 1, size=(100, 2)) y_train = np.random.normal(5, 15, size=(100, 4)) model = Sequential() model.add(Dense(2, activation='sigmoid')) model.add(FuzzyLayer(8)) model.add(Dense(8, activation='sigmoid')) model.add(DefuzzyLayer(4)) model.compile(loss='logcosh', optimizer='rmsprop', metrics=['mae', 'acc']) model.fit(x_train, y_train, epochs=10000, verbose=0, batch_size=100) print(model.predict(np.array([[5, 5]]))) print(model.predict(np.array([[5, 15]]))) print(model.predict(np.array([[15, 5]]))) print(model.predict(np.array([[15, 15]]))) print('Done') #%%
x_old = x_n x_n = l * x_n * (1 - x_n) x_nplus = l * x_n * (1 - x_n) x.append([[x_old], [x_n]]) y.append([x_nplus]) for i in range(0, 100): x_old = x_n x_n = l * x_n * (1 - x_n) x_nplus = l * x_n * (1 - x_n) x_test.append([[x_old], [x_n]]) y_test.append([x_nplus]) x_train = np.array(x) y_train = np.array(y) model = Sequential() model.add(FuzzyLayer(40, input_shape=(2, 1))) model.add(LSTM(20)) model.add(DefuzzyLayer(1)) model.compile(loss='logcosh', optimizer='rmsprop', metrics=['mae']) model.fit(x_train, y_train, epochs=1000, verbose=0, batch_size=1) score = model.evaluate(np.array(x_test), np.array(y_test), verbose=True) print(score) #%%
#%% import sys sys.path.insert(0, '../layers') from ast import Assert from fuzzy_layer import FuzzyLayer from keras.models import Sequential import numpy as np import tensorflow as tf fuzzy_layer = FuzzyLayer(output_dim=4, input_dim=2) x = tf.random.uniform((500, 2)) y = fuzzy_layer(x) # %% assert fuzzy_layer.weights == [fuzzy_layer.c, fuzzy_layer.a] # %% print("weights:", len(fuzzy_layer.weights)) print("non-trainable weights:", len(fuzzy_layer.non_trainable_weights)) print("trainable_weights:", fuzzy_layer.trainable_weights) # %% model = Sequential() model.add(fuzzy_layer) model.compile(loss='mse', optimizer='rmsprop', metrics=['mae', 'acc']) model.fit(x, np.array( [([1,0,0,0] if a[0]<0.5 and a[1]<0.5 else [0,1,0,0] if a[0]<0.5 and a[1]>0.5 else [0,0,1,0] if a[0]>0.5 and a[1]<0.5 else [0,0,0,1]) for a in x]), epochs=1000, verbose=0,
from keras.layers import Input, Dense from keras.models import Model from keras.datasets import mnist import numpy as np import matplotlib.pyplot as plt from fuzzy_layer import FuzzyLayer from tensorflow.python.client import device_lib from keras.utils import to_categorical print(device_lib.list_local_devices()) #%% input_img = Input(shape=(784, )) model = Dense(256)(input_img) model = Dense(2)(model) f_layer = FuzzyLayer(100) model = f_layer(model) model = Dense(10, activation='softmax')(model) mnist_classifier = Model(input_img, model) #%% mnist_classifier.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['mae', 'acc']) (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:]))) y_train = to_categorical(y_train, 10)
data = datasets.load_breast_cancer() Y = [] for y in data.target: tmp = np.zeros(2) tmp[y] = 1 Y.append(tmp) x_train, x_test, y_train, y_test = train_test_split(data.data, Y, test_size=0.1) K = 25 indices = rnd.sample(range(len(x_train)), K) f_layer = FuzzyLayer(K, initial_centers=lambda x: np.transpose( np.array([x_train[i] for i in indices])), input_dim=30) model = Sequential() model.add(f_layer) model.add(Dense(15, activation='softmax')) model.add(Dense(2, activation='softmax')) model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['binary_accuracy']) model.fit(np.array(x_train), np.array(y_train), epochs=10000, verbose=1,
latent_dim = 2 encoder_inputs = keras.Input(shape=(28, 28, 1)) x = layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(encoder_inputs) x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x) x = layers.Flatten()(x) x = layers.Dense(16, activation="relu")(x) z_mean = layers.Dense(latent_dim, name="z_mean")(x) z_log_var = layers.Dense(latent_dim, name="z_log_var")(x) z = Sampling()([z_mean, z_log_var]) encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder") encoder.summary() # %% latent_inputs = keras.Input(shape=(latent_dim,)) x = FuzzyLayer(30)(latent_inputs) x = layers.Dense(7 * 7 * 64, activation="relu")(x) x = layers.Reshape((7, 7, 64))(x) x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(x) x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x) decoder_outputs = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same")(x) decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder") decoder.summary() # %% (x_train, _), (x_test, _) = keras.datasets.mnist.load_data() mnist_digits = np.concatenate([x_train, x_test], axis=0) mnist_digits = np.expand_dims(mnist_digits, -1).astype("float32") / 255 vae = VAE(encoder, decoder) vae.compile(optimizer=keras.optimizers.Adam())