def __init__(self, num_hidden, window, end_time, future_target_size, validation_ratio, validation_freq, effective_factor, mean_absolute_percentage_error=None, describe=None, epoc=10, metric_sender=None): self.__num_hidden = num_hidden self.__future_target = future_target_size self.__window = window self.__epochs = epoc self.__mean_absolute_percentage_error = mean_absolute_percentage_error self.__end_time = end_time self.__effective_factor = effective_factor self.__model = Sequential([ LSTM(num_hidden, input_shape=np.zeros((window, len(effective_factor))).shape), Dense(self.__future_target) ]) self.__model.compile(optimizer='adam', loss='mean_squared_error') self.__validation_ratio = validation_ratio self.__validation_freq = validation_freq self.__fit_model = ModelType.LSTM self.__describe = describe self.__metric_collector = MetricCollector(epochs=self.__epochs, metric_sender=metric_sender)
def create_model(embeddings_file: str, input_size, input_length, hidden_size): """ Create simple regression model with a single embedding layer. :param embeddings_file: embeddings file to load :param input_size: size of input layer :param hidden_size: size of embeddings :return: Keras model """ model = Sequential() model.add( Embedding(input_size, hidden_size, input_length=input_length, name='embedding')) model.add(keras.layers.Lambda(lambda x: keras.backend.sum(x, axis=1))) #model.add(Flatten()) model.add(Dense(92, activation="sigmoid")) if embeddings_file is not None: embeddings = np.loadtxt(embeddings_file) model.get_layer("embedding").set_weights([embeddings]) #model.summary() return model
def sequential_nn_model(X_train, y_train): model = Sequential([ Dense(100, activation='relu', input_shape=(X_train.shape[1], )), Dense(40, activation='relu'), Dense(20, activation='relu'), Dense(1, activation='relu') ]) model.compile(optimizer='nadam', loss=rmsle, metrics=['mean_squared_logarithmic_error']) hist = model.fit(X_train, y_train, epochs=50) return model
def __init__(self, state_size: int, action_size: int, representation_size: int, max_value: int, hidden_neurons: int = 64, weight_decay: float = 1e-4, representation_activation: str = 'tanh'): self.state_size = state_size self.action_size = action_size self.value_support_size = math.ceil(math.sqrt(max_value)) + 1 regularizer = regularizers.l2(weight_decay) representation_network = Sequential([ Dense(hidden_neurons, activation='relu', kernel_regularizer=regularizer), Dense(representation_size, activation=representation_activation, kernel_regularizer=regularizer) ]) value_network = Sequential([ Dense(hidden_neurons, activation='relu', kernel_regularizer=regularizer), Dense(self.value_support_size, kernel_regularizer=regularizer) ]) policy_network = Sequential([ Dense(hidden_neurons, activation='relu', kernel_regularizer=regularizer), Dense(action_size, kernel_regularizer=regularizer) ]) dynamic_network = Sequential([ Dense(hidden_neurons, activation='relu', kernel_regularizer=regularizer), Dense(representation_size, activation=representation_activation, kernel_regularizer=regularizer) ]) reward_network = Sequential([ Dense(16, activation='relu', kernel_regularizer=regularizer), Dense(1, kernel_regularizer=regularizer) ]) super().__init__(representation_network, value_network, policy_network, dynamic_network, reward_network)
def create_model(embeddings_file: str, input_size, hidden_size, output_size): """ Create simple regression model with a single embedding layer. :param embeddings_file: embeddings file to load :param input_size: size of input layer :param hidden_size: size of embeddings :param output_size: size of output layer :return: Keras model """ model = Sequential() model.add( Embedding(input_size, hidden_size, input_length=1, name='embedding')) if embeddings_file is not None: embeddings = np.loadtxt(embeddings_file) model.get_layer("embedding").set_weights([embeddings]) model.add(Flatten()) model.add(Dense(output_size, activation="sigmoid")) return model
class KerasNeuralNetwork(PredictionModel): def __init__(self, layers: Iterable[int], funcs: Union[str, Iterable[str]], batch_size=None, max_cores=8): session_conf = tf.compat.v1.ConfigProto( device_count={"CPU": max_cores}) sess = tf.compat.v1.Session(config=session_conf) tf.compat.v1.keras.backend.set_session(sess) layers = list(layers) self.input_shape = layers.pop(0), if isinstance(funcs, str): funcs = [funcs] * len(layers) self.__keras_network = Sequential([ Dense(size, activation=funcs[i]) for i, size in enumerate(layers) ]) self.__keras_network.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'], ) self.batch_size = batch_size def predict(self, data: Data) -> Label: flatten = False if len(data.shape) == 1: data = data.reshape(1, -1) flatten = True res = self.__keras_network.predict(data) if flatten: res = res.flatten() return res def train(self, data: Data, label: Label): return self.__keras_network.fit(data, label, batch_size=self.batch_size, verbose=False)
def __init__(self, layers: Iterable[int], funcs: Union[str, Iterable[str]], batch_size=None, max_cores=8): session_conf = tf.compat.v1.ConfigProto( device_count={"CPU": max_cores}) sess = tf.compat.v1.Session(config=session_conf) tf.compat.v1.keras.backend.set_session(sess) layers = list(layers) self.input_shape = layers.pop(0), if isinstance(funcs, str): funcs = [funcs] * len(layers) self.__keras_network = Sequential([ Dense(size, activation=funcs[i]) for i, size in enumerate(layers) ]) self.__keras_network.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'], ) self.batch_size = batch_size
random_state=42) print(X_test.head(10)) # print(X_train.shape, X_val.shape, X_test.shape, y_train.shape, y_val.shape, y_test.shape) # model = Sequential([ # Dense(100, activation='relu', input_shape=(57,)), # Dense(40, activation='relu'), # Dense(20, activation='relu'), # Dense(1, activation='relu') # ]) # model.compile(optimizer='adam', # loss='mean_squared_logarithmic_error', # metrics=['mean_squared_logarithmic_error']) # hist = model.fit(X, y, epochs=100) NNmodel = Sequential() NNmodel.add(Dense(57, kernel_initializer='normal', activation='relu')) NNmodel.add(Dense(100, kernel_initializer='normal', activation='relu')) NNmodel.add(Dense(40, kernel_initializer='normal', activation='relu')) NNmodel.add(Dense(20, kernel_initializer='normal', activation='relu')) NNmodel.add(Dense(1, kernel_initializer='normal', activation='relu')) NNmodel.compile(loss='mean_squared_logarithmic_error', optimizer='adam', metrics=['mean_squared_logarithmic_error']) df_test['weather_4'] = 0 df_test = df_test[[
def create_model(input_shape:int, label_count:int): """Creates the neural network model""" model = Sequential() model.add(Conv2D(16, kernel_size=(4, 4), activation='relu', input_shape=input_shape)) model.add(Conv2D(32, kernel_size=(3, 3), activation='relu')) # 64 3x3 kernels model.add(Conv2D(64, (3, 3), activation='relu')) # Reduce by taking the max of each 2x2 block model.add(MaxPooling2D(pool_size=(2, 2))) # Dropout to avoid overfitting model.add(Dropout(0.25)) # Flatten the results to one dimension for passing into our final layer model.add(Flatten()) # A hidden layer to learn with model.add(Dense(1024, activation='relu')) model.add(Dense(512, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) # Another dropout model.add(Dropout(0.5)) # Final categorization 0-9, A-z with softmax model.add(Dense(label_count, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def MNIST_CNY19(classes, input_shape, weights=None): model = Sequential() model.add( Convolution2D(40, (5, 5), strides=(1, 1), input_shape=input_shape, activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(20, (5, 5), strides=(1, 1), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(320, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(80, activation='relu')) model.add(Dense(40, activation='relu')) model.add(Dense(classes, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model
dataset['Price_Rise'] = np.where( dataset['Close'].shift(-1) > dataset['Close'], 1, 0) dataset.dropna(inplace=True) X = dataset.iloc[:, 4:-1] y = dataset.iloc[:, -1] split = int(len(dataset) * 0.8) X_train, X_test, y_train, y_test = X[:split], X[split:], y[:split], y[ split:] sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) y_train = y_train.to_numpy() classifier = Sequential() classifier.add(Dense(units=64, activation='relu', input_dim=X.shape[1])) classifier.add(Dense(units=64, activation='relu')) classifier.add(Dense(units=1, activation='sigmoid')) classifier.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) classifier.fit(X_train, y_train, batch_size=10, epochs=50) y_pred = classifier.predict(X_test) #y_pred = (y_pred > 0.5) #y_pred = (np.round(y_pred * 2) - 1 ) y_pred = (2 * np.round(y_pred) - 1) dataset['y_pred'] = np.NaN dataset.iloc[(len(dataset) - len(y_pred)):, -1:] = y_pred
def get_clstm(input_data, num_labels): model = Sequential() model.add(ConvLSTM2D(filters=16, kernel_size=(3, 3), input_shape=input_data.shape, padding='same', return_sequences=True)) model.add(BatchNormalization()) model.add(ConvLSTM2D(filters=64, kernel_size=(5, 5), padding='same', return_sequences=False)) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(num_labels, activation='softmax')) opt = Adamax(learning_rate=LEARNING_RATE) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model
class LSTMModel: def __init__(self, num_hidden, window, end_time, future_target_size, validation_ratio, validation_freq, effective_factor, mean_absolute_percentage_error=None, describe=None, epoc=10, metric_sender=None): self.__num_hidden = num_hidden self.__future_target = future_target_size self.__window = window self.__epochs = epoc self.__mean_absolute_percentage_error = mean_absolute_percentage_error self.__end_time = end_time self.__effective_factor = effective_factor self.__model = Sequential([ LSTM(num_hidden, input_shape=np.zeros((window, len(effective_factor))).shape), Dense(self.__future_target) ]) self.__model.compile(optimizer='adam', loss='mean_squared_error') self.__validation_ratio = validation_ratio self.__validation_freq = validation_freq self.__fit_model = ModelType.LSTM self.__describe = describe self.__metric_collector = MetricCollector(epochs=self.__epochs, metric_sender=metric_sender) def train(self, input_data: MultivariateData, batch_size, steps_per_epoc): input_factors = input_data.generate_outer_join_factors() input_factors = MultivariateData.generate_filled_missing_frame( input_factors, input_data.get_gran(), input_data.get_custom_in_seconds(), fill_type=input_data.fill_type, fill_value=input_data.fill_value) merged_input = MultivariateData.generate_inner_join_frame( [input_data.get_target(), input_factors]) input_target = merged_input[[TIMESTAMP, VALUE]] input_factors = merged_input.drop([TIMESTAMP, VALUE], axis=1) input_factors = input_factors.reindex(columns=self.__effective_factor) self.__describe = merged_input.describe().T train, label = input_data.get_normalized_batch(self.__window, self.__future_target, label=input_target, factors=input_factors) batch_size = int(min(batch_size, max(1, len(train) / steps_per_epoc))) train_multi = data.Dataset.from_tensor_slices( (train[:-int(len(train) * self.__validation_ratio)], label[:-int(len(label) * self.__validation_ratio)])) train_multi = train_multi.cache().shuffle( len(train) * 100).batch(batch_size).repeat() val_multi = data.Dataset.from_tensor_slices( (train[-int(len(train) * self.__validation_ratio):], label[-int(len(label) * self.__validation_ratio):])) val_multi = val_multi.cache().batch(batch_size).repeat() self.__model.fit(train_multi, epochs=self.__epochs, shuffle=False, validation_data=val_multi, validation_freq=self.__validation_freq, steps_per_epoch=len(train) * (1 - self.__validation_ratio) / batch_size, validation_steps=len(train) * self.__validation_ratio / batch_size, callbacks=[self.__metric_collector]) validation_result = self.__model.predict( train[-int(len(train) * self.__validation_ratio):]) validation_labels = label[-int(len(label) * self.__validation_ratio):] mean_average_percentage_error = np.abs( validation_result - validation_labels) / np.abs(validation_labels) mean_average_percentage_error[np.isinf( mean_average_percentage_error)] = np.nan mean_average_percentage_error = np.nanmean( mean_average_percentage_error, axis=0) self.__mean_absolute_percentage_error = mean_average_percentage_error def get_mean_absolute_percentage_error(self): return list(self.__mean_absolute_percentage_error) def get_effective_factor(self): return self.__effective_factor def save_model(self, model_dir): with open(os.path.join(model_dir, 'LSTM-Meta.pkl'), "wb") as f: meta = { 'mean_absolute_percentage_error': self.__mean_absolute_percentage_error, 'end_time': self.__end_time, 'future_target': self.__future_target, 'window': self.__window, 'effective_factor': self.__effective_factor, 'num_hidden': self.__num_hidden, 'describe': self.__describe } pickle.dump(meta, f) self.__model.save_weights( os.path.join(model_dir, self.__fit_model.name)) def get_model_type(self): return self.__fit_model @staticmethod def load_model_meta(model_dir): with open(os.path.join(model_dir, 'LSTM-Meta.pkl'), "rb") as f: meta = pickle.load(f) return { 'mean_absolute_percentage_error': meta['mean_absolute_percentage_error'], 'end_time': meta['end_time'], 'window': meta['window'], 'effective_factor': meta['effective_factor'], 'future_target': meta['future_target'], 'num_hidden': meta['num_hidden'], 'describe': meta['describe'] } def inference(self, input_data: MultivariateData, window, timestamp, **kwargs): input_factors = input_data.generate_outer_join_factors() if timestamp is None: ts = input_factors[TIMESTAMP].max() else: ts = pd.to_datetime(timestamp) ts = ts.tz_localize(None) input_factors = MultivariateData.gen_filled_missing_by_period( input_factors, input_data.get_gran(), input_data.get_custom_in_seconds(), end_time=ts, periods=window, fill_type=input_data.fill_type, fill_value=input_data.fill_value) input_factors = input_factors[self.__effective_factor] input_factors = input_factors.reindex(columns=self.__effective_factor) input_factors = input_factors.tail(window) # print(input_factors) for column in self.__effective_factor: min_value = self.__describe.loc[column]['min'] max_value = self.__describe.loc[column]['max'] if max_value == min_value: input_factors[column] = 0 else: input_factors[column] = (input_factors[column] - min_value) / (max_value - min_value) input_factors = input_factors.values input_factors[(input_factors < 0) | (input_factors > 1)] = 0 predicted = self.__model.predict(np.array([input_factors])) predicted = predicted.reshape(self.__future_target) predicted = predicted * (self.__describe.loc[VALUE]['max'] - self.__describe.loc[VALUE]['min'] ) + self.__describe.loc[VALUE]['min'] target_timestamps = pd.date_range( start=timestamp, periods=self.__future_target, freq=convert_freq(input_data.get_gran(), input_data.get_custom_in_seconds())) lower_boundary = [ predicted[i] - np.abs(predicted[i]) * self.__mean_absolute_percentage_error[i] for i in range(0, len(predicted)) ] upper_boundary = [ predicted[i] + np.abs(predicted[i]) * self.__mean_absolute_percentage_error[i] for i in range(0, len(predicted)) ] return [ UnivariateForecastItem( predicted[i], lower_boundary[i], upper_boundary[i], (1 - self.__mean_absolute_percentage_error[i]), timestamp=target_timestamps[i]).to_dict() for i in range(0, len(predicted)) ] def load_model(self, model_dir): self.__model.load_weights( os.path.join(model_dir, self.__fit_model.name)) def get_end_time(self): return self.__end_time
def _make_layers(self): # Create the model model = Sequential() model.add( Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48, 48, 1))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(7, activation='softmax')) return model
def get_cnn_adv(input_data, num_labels): model = Sequential() div = 4 model.add(Conv2D(64, kernel_size=(input_data.shape[0] // div, 1), activation='relu', input_shape=input_data.shape)) model.add(Conv2D(128, kernel_size=(div, 8), activation='relu')) model.add(Flatten()) model.add(Dense(500, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_labels, activation='softmax')) opt = Adamax(learning_rate=KeyConstants.ELR) model.compile(loss='categorical_crossentropy', opt=opt, metrics=['accuracy']) return model
def get_stacked_cnn_lstm(input_data, num_labels): model = Sequential() model.add(TimeDistributed(Conv2D(16, kernel_size=3, activation='relu', input_shape=input_data.shape))) model.add(TimeDistributed(Conv2D(64, kernel_size=5, activation='relu'))) model.add(TimeDistributed(Flatten())) model.add(LSTM(units=2048)) model.add(Dense(num_labels, activation='softmax')) opt = Adamax(learning_rate=LEARNING_RATE) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) return model
def CIFAR_CNY19(classes, input_shape, weights=None): model = Sequential() model.add( Convolution2D(40, (5, 5), strides=(1, 1), input_shape=input_shape)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Dropout(0.25)) model.add(Convolution2D(20, (5, 5), strides=(1, 1))) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(240, activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(84, activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(classes, activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model
from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow_datasets as tfds from tensorflow_core.python.keras.layers import Embedding from tensorflow_core.python.keras.models import Sequential tfds.disable_progress_bar() model = Sequential() model.add(Embedding(1000, 64, input_length=10)) # the model will take as input an integer matrix of size (batch, # input_length). # the largest integer (i.e. word index) in the input should be no larger # than 999 (vocabulary size). # now model.output_shape == (None, 10, 64), where None is the batch # dimension. input_array = np.random.randint(1000, size=(32, 10)) model.compile('rmsprop', 'mse') output_array = model.predict(input_array) assert output_array.shape == (32, 10, 64) embedding_layer = layers.Embedding(1000, 5)
def get_pen_cnn(input_data, num_labels): model = Sequential() model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_data.shape)) model.add(MaxPooling2D(pool_size=2, strides=2)) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(Flatten()) model.add(Dense(500, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_labels, activation='softmax')) opt = Adam(learning_rate=.0003) model.compile(loss='categorical_crossentropy', opt=opt, metrics=['accuracy']) return model
def create_model(): checkpoint = ModelCheckpoint('sdr_model.h5', monitor='accuracy', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] gray_data = np.load("npy_data/gray_dataset.npy") color_data = np.load("npy_data/color_dataset.npy") # img_pixel_dataset = np.load("npy_data/img_pixel_dataset.npy") label = np.load("npy_data/label.npy") # dataset = pre_processing.npy_dataset_concatenate(gray_data, color_data) dataset = pre_processing.npy_dataset_concatenate(gray_data, color_data) # corr_matrix = np.corrcoef(dataset) # print(corr_matrix) le = preprocessing.LabelEncoder() label = le.fit_transform(label) x_train, x_test, y_train, y_test = train_test_split(dataset, label, test_size=0.20, shuffle=True) model = Sequential() model.add(Dense(14, input_dim=14, activation=None)) model.add(Dense(128, activation='tanh')) model.add(Dense(256, activation='sigmoid')) model.add(Dense(3, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=150, verbose=0, batch_size=20, shuffle=True, callbacks=callbacks_list) pred_y_test = model.predict_classes(x_test) acc_model = accuracy_score(y_test, pred_y_test) print("Prediction Acc model:", acc_model) print("Org. Labels:", y_test[:30]) print("Pred Labels:", (pred_y_test[:30])) # c_report = classification_report(y_test, pred_y_test, zero_division=0) # print(c_report) print("\n\n")
def get_cnn(input_data, num_labels): model = Sequential() model.add(Conv2D(16, kernel_size=2, activation='relu', input_shape=input_data.shape)) model.add(Conv2D(64, kernel_size=3, activation='relu')) model.add(Conv2D(128, kernel_size=3, activation='relu')) model.add(Flatten()) model.add(Dense(num_labels, activation='softmax')) opt = Adamax(learning_rate=LEARNING_RATE) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model