Esempio n. 1
0
    def __init__():

        conv1 = conv2d()
        Max1 = maxPooling(conv1)

        conv2 = conv2d(Max1)
        Max2 = MaxPooling(conv2)

        conv3 = conv2d(Max2)
        Max3 = MaxPooling(conv3)

        Dropout1 = Dropout(MAx3)

        flat1 = Flatten(Dropout1)

        Dense1 = Dense(flat1)
        Dense2 = Dense(Dense1)
Esempio n. 2
0
def TTV_Split(i, Memory, X, y, params, X_fill):
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.1,
                                                        random_state=i)
    X_train, X_val, y_train, y_val = train_test_split(X_train,
                                                      y_train,
                                                      test_size=0.11,
                                                      random_state=i)
    return (Dense.Train_Steps(params['epochs'],
                              params['N'],
                              X_train,
                              X_test,
                              X_val,
                              y_train,
                              y_test,
                              y_val,
                              i,
                              X_fill=X_fill,
                              Memory=Memory))
Esempio n. 3
0

if __name__ == "__main__":

    X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(flatten=True)

    plt.figure(figsize=[6, 6])
    for i in range(4):
        plt.subplot(2, 2, i + 1)
        plt.title("Label: %i" % y_train[i])
        plt.imshow(X_train[i].reshape([28, 28]), cmap='gray')

    input_shape = 0
    network = []

    network.append(Dense.Dense(X_train.shape[1], 100))
    network.append(ReLU.ReLU())
    network.append(Dense.Dense(100, 200))
    network.append(ReLU.ReLU())
    network.append(Dense.Dense(200, 10))

    train_log = []
    val_log = []

    for epoch in range(25):

        for x_batch, y_batch in iterate_minibatches(X_train, y_train, batchsize=32, shuffle=True):
            train(network, x_batch, y_batch)

        train_log.append(np.mean(predict(network, X_train) == y_train))
        val_log.append(np.mean(predict(network, X_val) == y_val))
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.

if __name__from keras.layers import Input, Dense
from keras.models import Model

# this is the size of our encoded representations
encoding_dim = 32  # 32 floats -> compression of factor 24.5, assuming the input is 784 floats

# this is our input placeholder
input_img = Input(shape=(784,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)

# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
Esempio n. 5
0
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_train = x_train.astype('float32')
x_train /= 255
# encode output which is a number in range [0,9] into a vector of size 10
# e.g. number 3 will become [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
y_train = utils.to_categorical(y_train)

# same for test data : 10000 samples
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
x_test = x_test.astype('float32')
x_test /= 255
y_test = utils.to_categorical(y_test)

neural = Network.Network()
neural.add(Conv2D.ConvLayer((28, 28, 1), 64))
neural.add(Activation.Activation(tanh, tanh_prime))
neural.add(Conv2D.ConvLayer((26, 26, 64), 16))
neural.add(Activation.Activation(tanh, tanh_prime))
neural.add(Flatten.Flatten())
neural.add(Dense.Dense(24 * 24 * 16, 100))
neural.add(Activation.Activation(tanh, tanh_prime))
neural.add(Dense.Dense(100, 10))
neural.add(Activation.Activation(tanh, tanh_prime))

neural.use(loss.mse, loss.mse_prime)
neural.fit(x_train[0:4000], y_train[0:4000], 15, 0.1)

print(neural.predict(x_test[0:5]))

print(y_test[0:5])
Esempio n. 6
0
# add preprocessing layer to the front of resNet
dense_121= DenseNet121(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)

for layer in dense_121.layers:
  layer.trainable = False



# useful for getting number of classes
folders = glob('Datasets/Train/*')


# Number of layers - Add more if u want 
x = Flatten()(dense_121.output)

prediction = Dense(len(folders), activation='softmax')(x)

# create a model object
model = Model(inputs=dense_121.input, outputs=prediction)

# view the structure of the model
model.summary()

# Compiling the model
model.compile(
  loss='categorical_crossentropy',
  optimizer='adam',
  metrics=['accuracy']
)

from keras.preprocessing.image import ImageDataGenerator
Esempio n. 7
0

while int(accuracy)<90:
   if flag == 1:
     model = keras.backend.clear_session()
     neurons = neurons+10
     epochs = epochs+1
     test = test+1
     kernel = kernel+1
     test = test+1
   print("***TRAIL*****:", test ,"------------")
   model = Sequential()
   model.add(Conv2D(kernel,(3,3), input_shape= (28,28,1), activation= 'relu')
   model.add(MaxPoolin2D(pool_size = (2,2)))
   model.add(Flatten())
   model.add(Dense(neurons, activation = 'relu'))
   model.add(Dense(10, activation = 'softmax'))
   model.compile(optimizers = "Adam" , loss = 'categorical_cross_entropy' , metrics = ['accuracy']
   train_x.shape
   model_predict = model.fit(train_x,train_y,batch_size=batch_size,
                             verbose= 1,epochs=epochs,
                             validation_data=(test_x,test_y),shuffle=True)
   scores = model.evaluate(test_x,test_y,verbose=False
   print("Test loss:" , scores[0]*100)
   print("**ACCURACY OF THE MODEL IS :", scores[1]*100)
   accuracy = scores[1]*100
   print("----------------------")
   print()
   print()
   flag = 1
print("Total number of epochs:", epochs)
X = tokenizer.texts_to_sequences(df['Statement'].values)
X = pad_sequences(X)


# In[ ]:


embed_dim = 128
lstm_out = 196

model = Sequential()
model.add(Embedding(max_fatures, embed_dim,input_length = X.shape[1]))
model.add(Dropout(0.5))
model.add(LSTM(128,dropout=0.4, recurrent_dropout=0.4,return_sequences=True))
model.add(LSTM(64,dropout=0.5, recurrent_dropout=0.5,return_sequences=False))
model.add(Dense(2,activation='sigmoid',kernel_initializer='glorot_normal'))
model.compile(loss = 'categorical_crossentropy', optimizer='Nadam',metrics = ['accuracy'])
print(model.summary())


# In[ ]:


Y = pd.get_dummies(df['Label']).values
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.20, random_state = 42)
print(X_train.shape,Y_train.shape)
print(X_test.shape,Y_test.shape)


# In[ ]:
Esempio n. 9
0
#                    activation=Linear())],
#     loss=MeanSquaredError(),
#     seed=20190501
# )

# nn = NeuralNetwork(
#     layers=[Dense(neurons=13,
#                    activation=Sigmoid()),
#             Dense(neurons=1,
#                    activation=Linear())],
#     loss=MeanSquaredError(),
#     seed=20190501
# )

dl = NeuralNetwork(layers=[
    Dense(neurons=13, activation=SigMoid()),
    Dense(neurons=13, activation=SigMoid()),
    Dense(neurons=1, activation=Linear())
],
                   loss=MeanSquaredError(),
                   seed=20190501)

boston = load_boston()
data = boston.data
target = boston.target
features = boston.feature_names
s = StandardScaler()
data = s.fit_transform(data)

X_train, X_test, y_train, y_test = train_test_split(data,
                                                    target,
Esempio n. 10
0
    temp_img=image.img_to_array(temp_img)

    test_img.append(temp_img)

test_img=np.array(test_img) 
test_img=preprocess_input(test_img)from keras.models import Modeldef vgg16_model(img_rows, img_cols, channel=1, num_classes=None):

    model = VGG16(weights='imagenet', include_top=True)

    model.layers.pop()

    model.outputs = [model.layers[-1].output]

    model.layers[-1].outbound_nodes = []

          x=Dense(num_classes, activation='softmax')(model.output)

    model=Model(model.input,x)#To set the first 8 layers to non-trainable (weights will not be updated)

          for layer in model.layers[:8]:

       layer.trainable = False# Learning rate is changed to 0.001
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])    return model

train_y=np.asarray(train['label'])

le = LabelEncoder()

train_y = le.fit_transform(train_y)
Esempio n. 11
0
# flattening the layers to conform to MLP input

train_x=features_train.reshape(49000,25088)
# converting target variable to array
train_y=np.asarray(train['label'])
# performing one-hot encoding for the target variable
train_y=pd.get_dummies(train_y)
train_y=np.array(train_y)# creating training and validation set

from sklearn.model_selection import train_test_split



X_train, X_valid, Y_train, Y_valid=train_test_split(train_x,train_y,test_size=0.3, random_state=42)# creating a mlp model

from keras.layers import Dense, Activation
model=Sequential()

model.add(Dense(1000, input_dim=25088, activation='relu',kernel_initializer='uniform'))
keras.layers.core.Dropout(0.3, noise_shape=None, seed=None)

model.add(Dense(500,input_dim=1000,activation='sigmoid'))
keras.layers.core.Dropout(0.4, noise_shape=None, seed=None)

model.add(Dense(150,input_dim=500,activation='sigmoid'))
keras.layers.core.Dropout(0.2, noise_shape=None, seed=None)

model.add(Dense(units=10))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])# fitting the model model.fit(X_train, Y_train, epochs=20, batch_size=128,validation_data=(X_valid,Y_valid))
Esempio n. 12
0
+model.add(MaxPooling2D(pool_size=(2, 2)))
+# Слой регуляризации Dropout
+model.add(Dropout(0.25))
+
+# Третий сверточный слой
+model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
+# Четвертый сверточный слой
+model.add(Conv2D(64, (3, 3), activation='relu'))
+# Второй слой подвыборки
+model.add(MaxPooling2D(pool_size=(2, 2)))
+# Слой регуляризации Dropout
+model.add(Dropout(0.25))
+# Слой преобразования данных из 2D представления в плоское
+model.add(Flatten())
+# Полносвязный слой для классификации
+model.add(Dense(512, activation='relu'))
+# Слой регуляризации Dropout
+model.add(Dropout(0.5))
+# Выходной полносвязный слой
+model.add(Dense(nb_classes, activation='softmax'))
+
+# Задаем параметры оптимизации
+sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
+model.compile(loss='categorical_crossentropy',
+              optimizer=sgd,
+              metrics=['accuracy'])
+# Обучаем модель
+model.fit(X_train, Y_train,
+              batch_size=batch_size,
+              epochs=nb_epoch,
+              validation_split=0.1,
    def __init__(self):
        """ Init

            Set all the layers that need to be tracked in the process of
            gradients descent (pooling and dropout for example dont need
            to be stored)
        """

        super(PiModel, self).__init__()
        self._conv1a = Conv2D.Conv2D(filters=128, kernel_size=[3, 3],
                                                        padding="same", activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                                                        kernel_initializer=tf.keras.initializers.he_uniform(),
                                                        bias_initializer=tf.keras.initializers.constant(
                                                            0.1),
                                                        weight_norm=True, mean_only_batch_norm=True)
        self._conv1b = Conv2D.Conv2D(filters=128, kernel_size=[3, 3],
                                                        padding="same", activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                                                        kernel_initializer=tf.keras.initializers.he_uniform(),
                                                        bias_initializer=tf.keras.initializers.constant(
                                                            0.1),
                                                        weight_norm=True, mean_only_batch_norm=True)
        self._conv1c = Conv2D.Conv2D(filters=128, kernel_size=[3, 3],
                                                        padding="same", activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                                                        kernel_initializer=tf.keras.initializers.he_uniform(),
                                                        bias_initializer=tf.keras.initializers.constant(
                                                            0.1),
                                                        weight_norm=True, mean_only_batch_norm=True)
        self._pool1 = tf.keras.layers.MaxPool2D(
            pool_size=2, strides=2, padding="same")
        self._dropout1 = tf.keras.layers.Dropout(0.5)

        self._conv2a = Conv2D.Conv2D(filters=256, kernel_size=[3, 3],
                                                        padding="same", activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                                                        kernel_initializer=tf.keras.initializers.he_uniform(),
                                                        bias_initializer=tf.keras.initializers.constant(
                                                            0.1),
                                                        weight_norm=True, mean_only_batch_norm=True)
        self._conv2b = Conv2D.Conv2D(filters=256, kernel_size=[3, 3],
                                                        padding="same", activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                                                        kernel_initializer=tf.keras.initializers.he_uniform(),
                                                        bias_initializer=tf.keras.initializers.constant(
                                                            0.1),
                                                        weight_norm=True, mean_only_batch_norm=True)
        self._conv2c = Conv2D.Conv2D(filters=256, kernel_size=[3, 3],
                                                        padding="same", activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                                                        kernel_initializer=tf.keras.initializers.he_uniform(),
                                                        bias_initializer=tf.keras.initializers.constant(
                                                            0.1),
                                                        weight_norm=True, mean_only_batch_norm=True)
        self._pool2 = tf.keras.layers.MaxPool2D(
            pool_size=2, strides=2, padding="same")
        self._dropout2 = tf.keras.layers.Dropout(0.5)

        self._conv3a_sup = Conv2D.Conv2D(filters=512, kernel_size=[3, 3],
                                                        padding="valid", activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                                                        kernel_initializer=tf.keras.initializers.he_uniform(),
                                                        bias_initializer=tf.keras.initializers.constant(
                                                            0.1),
                                                        weight_norm=True, mean_only_batch_norm=True)
        self._conv3b_sup = Conv2D.Conv2D(filters=256, kernel_size=[1, 1],
                                                        padding="same", activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                                                        kernel_initializer=tf.keras.initializers.he_uniform(),
                                                        bias_initializer=tf.keras.initializers.constant(
                                                            0.1),
                                                        weight_norm=True, mean_only_batch_norm=True)
        self._conv3c_sup = Conv2D.Conv2D(filters=128, kernel_size=[1, 1],
                                                        padding="same", activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                                                        kernel_initializer=tf.keras.initializers.he_uniform(),
                                                        bias_initializer=tf.keras.initializers.constant(
                                                            0.1),
                                                        weight_norm=True, mean_only_batch_norm=True)
                                                        
        self._conv3a_unsup = Conv2D.Conv2D(filters=512, kernel_size=[3, 3],
                                                        padding="valid", activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                                                        kernel_initializer=tf.keras.initializers.he_uniform(),
                                                        bias_initializer=tf.keras.initializers.constant(
                                                            0.1),
                                                        weight_norm=True, mean_only_batch_norm=True)
        self._conv3b_unsup = Conv2D.Conv2D(filters=256, kernel_size=[1, 1],
                                                        padding="same", activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                                                        kernel_initializer=tf.keras.initializers.he_uniform(),
                                                        bias_initializer=tf.keras.initializers.constant(
                                                            0.1),
                                                        weight_norm=True, mean_only_batch_norm=True)
        self._conv3c_unsup = Conv2D.Conv2D(filters=128, kernel_size=[1, 1],
                                                        padding="same", activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                                                        kernel_initializer=tf.keras.initializers.he_uniform(),
                                                        bias_initializer=tf.keras.initializers.constant(
                                                            0.1),
                                                        weight_norm=True, mean_only_batch_norm=True)

        self._dense_sup = Dense.Dense(units=2, activation=tf.nn.softmax,
                                                     kernel_initializer=tf.keras.initializers.he_uniform(),
                                                     bias_initializer=tf.keras.initializers.constant(
                                                         0.1),
                                                     weight_norm=True, mean_only_batch_norm=True)
        self._dense_unsup = Dense.Dense(units=2, activation=tf.nn.softmax,
                                                     kernel_initializer=tf.keras.initializers.he_uniform(),
                                                     bias_initializer=tf.keras.initializers.constant(
                                                         0.1),
                                                     weight_norm=True, mean_only_batch_norm=True)
Esempio n. 14
0
train_x = list(training[:,0])
train_y = list(training[:,1])
print("Training data created")

"""# 4. Build machine learning model

After creating training data, build a deep neaural network that has 3 layers. The following code uses Keras' sequential API. 

The model is trained for 200 epochs, achieving 100% accuracy on the model. 

"""

# Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons
# equal to number of intents to predict output intent with softmax
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))

# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

#fitting and saving the model 
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbot_model.h5', hist)

print("model created")