def get_cnn(input_data, num_labels): model = Sequential() model.add(Conv2D(16, kernel_size=2, activation='relu', input_shape=input_data.shape)) model.add(Conv2D(64, kernel_size=3, activation='relu')) model.add(Conv2D(128, kernel_size=3, activation='relu')) model.add(Flatten()) model.add(Dense(num_labels, activation='softmax')) opt = Adamax(learning_rate=LEARNING_RATE) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model
def get_stacked_cnn_lstm(input_data, num_labels): model = Sequential() model.add(TimeDistributed(Conv2D(16, kernel_size=3, activation='relu', input_shape=input_data.shape))) model.add(TimeDistributed(Conv2D(64, kernel_size=5, activation='relu'))) model.add(TimeDistributed(Flatten())) model.add(LSTM(units=2048)) model.add(Dense(num_labels, activation='softmax')) opt = Adamax(learning_rate=LEARNING_RATE) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) return model
def create_model(): checkpoint = ModelCheckpoint('sdr_model.h5', monitor='accuracy', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] gray_data = np.load("npy_data/gray_dataset.npy") color_data = np.load("npy_data/color_dataset.npy") # img_pixel_dataset = np.load("npy_data/img_pixel_dataset.npy") label = np.load("npy_data/label.npy") # dataset = pre_processing.npy_dataset_concatenate(gray_data, color_data) dataset = pre_processing.npy_dataset_concatenate(gray_data, color_data) # corr_matrix = np.corrcoef(dataset) # print(corr_matrix) le = preprocessing.LabelEncoder() label = le.fit_transform(label) x_train, x_test, y_train, y_test = train_test_split(dataset, label, test_size=0.20, shuffle=True) model = Sequential() model.add(Dense(14, input_dim=14, activation=None)) model.add(Dense(128, activation='tanh')) model.add(Dense(256, activation='sigmoid')) model.add(Dense(3, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=150, verbose=0, batch_size=20, shuffle=True, callbacks=callbacks_list) pred_y_test = model.predict_classes(x_test) acc_model = accuracy_score(y_test, pred_y_test) print("Prediction Acc model:", acc_model) print("Org. Labels:", y_test[:30]) print("Pred Labels:", (pred_y_test[:30])) # c_report = classification_report(y_test, pred_y_test, zero_division=0) # print(c_report) print("\n\n")
def create_model(embeddings_file: str, input_size, input_length, hidden_size): """ Create simple regression model with a single embedding layer. :param embeddings_file: embeddings file to load :param input_size: size of input layer :param hidden_size: size of embeddings :return: Keras model """ model = Sequential() model.add( Embedding(input_size, hidden_size, input_length=input_length, name='embedding')) model.add(keras.layers.Lambda(lambda x: keras.backend.sum(x, axis=1))) #model.add(Flatten()) model.add(Dense(92, activation="sigmoid")) if embeddings_file is not None: embeddings = np.loadtxt(embeddings_file) model.get_layer("embedding").set_weights([embeddings]) #model.summary() return model
def create_model(embeddings_file: str, input_size, hidden_size, output_size): """ Create simple regression model with a single embedding layer. :param embeddings_file: embeddings file to load :param input_size: size of input layer :param hidden_size: size of embeddings :param output_size: size of output layer :return: Keras model """ model = Sequential() model.add( Embedding(input_size, hidden_size, input_length=1, name='embedding')) if embeddings_file is not None: embeddings = np.loadtxt(embeddings_file) model.get_layer("embedding").set_weights([embeddings]) model.add(Flatten()) model.add(Dense(output_size, activation="sigmoid")) return model
# print(X_train.shape, X_val.shape, X_test.shape, y_train.shape, y_val.shape, y_test.shape) # model = Sequential([ # Dense(100, activation='relu', input_shape=(57,)), # Dense(40, activation='relu'), # Dense(20, activation='relu'), # Dense(1, activation='relu') # ]) # model.compile(optimizer='adam', # loss='mean_squared_logarithmic_error', # metrics=['mean_squared_logarithmic_error']) # hist = model.fit(X, y, epochs=100) NNmodel = Sequential() NNmodel.add(Dense(57, kernel_initializer='normal', activation='relu')) NNmodel.add(Dense(100, kernel_initializer='normal', activation='relu')) NNmodel.add(Dense(40, kernel_initializer='normal', activation='relu')) NNmodel.add(Dense(20, kernel_initializer='normal', activation='relu')) NNmodel.add(Dense(1, kernel_initializer='normal', activation='relu')) NNmodel.compile(loss='mean_squared_logarithmic_error', optimizer='adam', metrics=['mean_squared_logarithmic_error']) df_test['weather_4'] = 0 df_test = df_test[[ x for x in all_columns if x.startswith(tuple(train_columns)) ]]
def create_model(input_shape:int, label_count:int): """Creates the neural network model""" model = Sequential() model.add(Conv2D(16, kernel_size=(4, 4), activation='relu', input_shape=input_shape)) model.add(Conv2D(32, kernel_size=(3, 3), activation='relu')) # 64 3x3 kernels model.add(Conv2D(64, (3, 3), activation='relu')) # Reduce by taking the max of each 2x2 block model.add(MaxPooling2D(pool_size=(2, 2))) # Dropout to avoid overfitting model.add(Dropout(0.25)) # Flatten the results to one dimension for passing into our final layer model.add(Flatten()) # A hidden layer to learn with model.add(Dense(1024, activation='relu')) model.add(Dense(512, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) # Another dropout model.add(Dropout(0.5)) # Final categorization 0-9, A-z with softmax model.add(Dense(label_count, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def MNIST_CNY19(classes, input_shape, weights=None): model = Sequential() model.add( Convolution2D(40, (5, 5), strides=(1, 1), input_shape=input_shape, activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(20, (5, 5), strides=(1, 1), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(320, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(80, activation='relu')) model.add(Dense(40, activation='relu')) model.add(Dense(classes, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model
dataset['Price_Rise'] = np.where( dataset['Close'].shift(-1) > dataset['Close'], 1, 0) dataset.dropna(inplace=True) X = dataset.iloc[:, 4:-1] y = dataset.iloc[:, -1] split = int(len(dataset) * 0.8) X_train, X_test, y_train, y_test = X[:split], X[split:], y[:split], y[ split:] sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) y_train = y_train.to_numpy() classifier = Sequential() classifier.add(Dense(units=64, activation='relu', input_dim=X.shape[1])) classifier.add(Dense(units=64, activation='relu')) classifier.add(Dense(units=1, activation='sigmoid')) classifier.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) classifier.fit(X_train, y_train, batch_size=10, epochs=50) y_pred = classifier.predict(X_test) #y_pred = (y_pred > 0.5) #y_pred = (np.round(y_pred * 2) - 1 ) y_pred = (2 * np.round(y_pred) - 1) dataset['y_pred'] = np.NaN dataset.iloc[(len(dataset) - len(y_pred)):, -1:] = y_pred trade_dataset = dataset.dropna()
def _make_layers(self): # Create the model model = Sequential() model.add( Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48, 48, 1))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(7, activation='softmax')) return model
def CIFAR_CNY19(classes, input_shape, weights=None): model = Sequential() model.add( Convolution2D(40, (5, 5), strides=(1, 1), input_shape=input_shape)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Dropout(0.25)) model.add(Convolution2D(20, (5, 5), strides=(1, 1))) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(240, activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(84, activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(classes, activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model
from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow_datasets as tfds from tensorflow_core.python.keras.layers import Embedding from tensorflow_core.python.keras.models import Sequential tfds.disable_progress_bar() model = Sequential() model.add(Embedding(1000, 64, input_length=10)) # the model will take as input an integer matrix of size (batch, # input_length). # the largest integer (i.e. word index) in the input should be no larger # than 999 (vocabulary size). # now model.output_shape == (None, 10, 64), where None is the batch # dimension. input_array = np.random.randint(1000, size=(32, 10)) model.compile('rmsprop', 'mse') output_array = model.predict(input_array) assert output_array.shape == (32, 10, 64) embedding_layer = layers.Embedding(1000, 5)
def get_pen_cnn(input_data, num_labels): model = Sequential() model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_data.shape)) model.add(MaxPooling2D(pool_size=2, strides=2)) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(Flatten()) model.add(Dense(500, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_labels, activation='softmax')) opt = Adam(learning_rate=.0003) model.compile(loss='categorical_crossentropy', opt=opt, metrics=['accuracy']) return model
def get_clstm(input_data, num_labels): model = Sequential() model.add(ConvLSTM2D(filters=16, kernel_size=(3, 3), input_shape=input_data.shape, padding='same', return_sequences=True)) model.add(BatchNormalization()) model.add(ConvLSTM2D(filters=64, kernel_size=(5, 5), padding='same', return_sequences=False)) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(num_labels, activation='softmax')) opt = Adamax(learning_rate=LEARNING_RATE) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model
def get_cnn_adv(input_data, num_labels): model = Sequential() div = 4 model.add(Conv2D(64, kernel_size=(input_data.shape[0] // div, 1), activation='relu', input_shape=input_data.shape)) model.add(Conv2D(128, kernel_size=(div, 8), activation='relu')) model.add(Flatten()) model.add(Dense(500, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_labels, activation='softmax')) opt = Adamax(learning_rate=KeyConstants.ELR) model.compile(loss='categorical_crossentropy', opt=opt, metrics=['accuracy']) return model