def __init__(self, state_size: int, action_size: int, representation_size: int, max_value: int, hidden_neurons: int = 64, weight_decay: float = 1e-4, representation_activation: str = 'tanh'): self.state_size = state_size self.action_size = action_size self.value_support_size = math.ceil(math.sqrt(max_value)) + 1 regularizer = regularizers.l2(weight_decay) representation_network = Sequential([ Dense(hidden_neurons, activation='relu', kernel_regularizer=regularizer), Dense(representation_size, activation=representation_activation, kernel_regularizer=regularizer) ]) value_network = Sequential([ Dense(hidden_neurons, activation='relu', kernel_regularizer=regularizer), Dense(self.value_support_size, kernel_regularizer=regularizer) ]) policy_network = Sequential([ Dense(hidden_neurons, activation='relu', kernel_regularizer=regularizer), Dense(action_size, kernel_regularizer=regularizer) ]) dynamic_network = Sequential([ Dense(hidden_neurons, activation='relu', kernel_regularizer=regularizer), Dense(representation_size, activation=representation_activation, kernel_regularizer=regularizer) ]) reward_network = Sequential([ Dense(16, activation='relu', kernel_regularizer=regularizer), Dense(1, kernel_regularizer=regularizer) ]) super().__init__(representation_network, value_network, policy_network, dynamic_network, reward_network)
def create_model(embeddings_file: str, input_size, input_length, hidden_size): """ Create simple regression model with a single embedding layer. :param embeddings_file: embeddings file to load :param input_size: size of input layer :param hidden_size: size of embeddings :return: Keras model """ model = Sequential() model.add( Embedding(input_size, hidden_size, input_length=input_length, name='embedding')) model.add(keras.layers.Lambda(lambda x: keras.backend.sum(x, axis=1))) #model.add(Flatten()) model.add(Dense(92, activation="sigmoid")) if embeddings_file is not None: embeddings = np.loadtxt(embeddings_file) model.get_layer("embedding").set_weights([embeddings]) #model.summary() return model
def CIFAR_CNY19(classes, input_shape, weights=None): model = Sequential() model.add( Convolution2D(40, (5, 5), strides=(1, 1), input_shape=input_shape)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Dropout(0.25)) model.add(Convolution2D(20, (5, 5), strides=(1, 1))) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(240, activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(84, activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(classes, activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model
def MNIST_CNY19(classes, input_shape, weights=None): model = Sequential() model.add( Convolution2D(40, (5, 5), strides=(1, 1), input_shape=input_shape, activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(20, (5, 5), strides=(1, 1), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(320, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(80, activation='relu')) model.add(Dense(40, activation='relu')) model.add(Dense(classes, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model
def create_model(input_shape:int, label_count:int): """Creates the neural network model""" model = Sequential() model.add(Conv2D(16, kernel_size=(4, 4), activation='relu', input_shape=input_shape)) model.add(Conv2D(32, kernel_size=(3, 3), activation='relu')) # 64 3x3 kernels model.add(Conv2D(64, (3, 3), activation='relu')) # Reduce by taking the max of each 2x2 block model.add(MaxPooling2D(pool_size=(2, 2))) # Dropout to avoid overfitting model.add(Dropout(0.25)) # Flatten the results to one dimension for passing into our final layer model.add(Flatten()) # A hidden layer to learn with model.add(Dense(1024, activation='relu')) model.add(Dense(512, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) # Another dropout model.add(Dropout(0.5)) # Final categorization 0-9, A-z with softmax model.add(Dense(label_count, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def __init__(self, num_hidden, window, end_time, future_target_size, validation_ratio, validation_freq, effective_factor, mean_absolute_percentage_error=None, describe=None, epoc=10, metric_sender=None): self.__num_hidden = num_hidden self.__future_target = future_target_size self.__window = window self.__epochs = epoc self.__mean_absolute_percentage_error = mean_absolute_percentage_error self.__end_time = end_time self.__effective_factor = effective_factor self.__model = Sequential([ LSTM(num_hidden, input_shape=np.zeros((window, len(effective_factor))).shape), Dense(self.__future_target) ]) self.__model.compile(optimizer='adam', loss='mean_squared_error') self.__validation_ratio = validation_ratio self.__validation_freq = validation_freq self.__fit_model = ModelType.LSTM self.__describe = describe self.__metric_collector = MetricCollector(epochs=self.__epochs, metric_sender=metric_sender)
def get_cnn(input_data, num_labels): model = Sequential() model.add(Conv2D(16, kernel_size=2, activation='relu', input_shape=input_data.shape)) model.add(Conv2D(64, kernel_size=3, activation='relu')) model.add(Conv2D(128, kernel_size=3, activation='relu')) model.add(Flatten()) model.add(Dense(num_labels, activation='softmax')) opt = Adamax(learning_rate=LEARNING_RATE) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model
def get_cnn_adv(input_data, num_labels): model = Sequential() div = 4 model.add(Conv2D(64, kernel_size=(input_data.shape[0] // div, 1), activation='relu', input_shape=input_data.shape)) model.add(Conv2D(128, kernel_size=(div, 8), activation='relu')) model.add(Flatten()) model.add(Dense(500, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_labels, activation='softmax')) opt = Adamax(learning_rate=KeyConstants.ELR) model.compile(loss='categorical_crossentropy', opt=opt, metrics=['accuracy']) return model
def get_stacked_cnn_lstm(input_data, num_labels): model = Sequential() model.add(TimeDistributed(Conv2D(16, kernel_size=3, activation='relu', input_shape=input_data.shape))) model.add(TimeDistributed(Conv2D(64, kernel_size=5, activation='relu'))) model.add(TimeDistributed(Flatten())) model.add(LSTM(units=2048)) model.add(Dense(num_labels, activation='softmax')) opt = Adamax(learning_rate=LEARNING_RATE) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) return model
def sequential_nn_model(X_train, y_train): model = Sequential([ Dense(100, activation='relu', input_shape=(X_train.shape[1], )), Dense(40, activation='relu'), Dense(20, activation='relu'), Dense(1, activation='relu') ]) model.compile(optimizer='nadam', loss=rmsle, metrics=['mean_squared_logarithmic_error']) hist = model.fit(X_train, y_train, epochs=50) return model
def create_model(): checkpoint = ModelCheckpoint('sdr_model.h5', monitor='accuracy', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] gray_data = np.load("npy_data/gray_dataset.npy") color_data = np.load("npy_data/color_dataset.npy") # img_pixel_dataset = np.load("npy_data/img_pixel_dataset.npy") label = np.load("npy_data/label.npy") # dataset = pre_processing.npy_dataset_concatenate(gray_data, color_data) dataset = pre_processing.npy_dataset_concatenate(gray_data, color_data) # corr_matrix = np.corrcoef(dataset) # print(corr_matrix) le = preprocessing.LabelEncoder() label = le.fit_transform(label) x_train, x_test, y_train, y_test = train_test_split(dataset, label, test_size=0.20, shuffle=True) model = Sequential() model.add(Dense(14, input_dim=14, activation=None)) model.add(Dense(128, activation='tanh')) model.add(Dense(256, activation='sigmoid')) model.add(Dense(3, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=150, verbose=0, batch_size=20, shuffle=True, callbacks=callbacks_list) pred_y_test = model.predict_classes(x_test) acc_model = accuracy_score(y_test, pred_y_test) print("Prediction Acc model:", acc_model) print("Org. Labels:", y_test[:30]) print("Pred Labels:", (pred_y_test[:30])) # c_report = classification_report(y_test, pred_y_test, zero_division=0) # print(c_report) print("\n\n")
def get_clstm(input_data, num_labels): model = Sequential() model.add(ConvLSTM2D(filters=16, kernel_size=(3, 3), input_shape=input_data.shape, padding='same', return_sequences=True)) model.add(BatchNormalization()) model.add(ConvLSTM2D(filters=64, kernel_size=(5, 5), padding='same', return_sequences=False)) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(num_labels, activation='softmax')) opt = Adamax(learning_rate=LEARNING_RATE) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model
def get_pen_cnn(input_data, num_labels): model = Sequential() model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_data.shape)) model.add(MaxPooling2D(pool_size=2, strides=2)) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(Flatten()) model.add(Dense(500, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_labels, activation='softmax')) opt = Adam(learning_rate=.0003) model.compile(loss='categorical_crossentropy', opt=opt, metrics=['accuracy']) return model
def create_model(embeddings_file: str, input_size, hidden_size, output_size): """ Create simple regression model with a single embedding layer. :param embeddings_file: embeddings file to load :param input_size: size of input layer :param hidden_size: size of embeddings :param output_size: size of output layer :return: Keras model """ model = Sequential() model.add( Embedding(input_size, hidden_size, input_length=1, name='embedding')) if embeddings_file is not None: embeddings = np.loadtxt(embeddings_file) model.get_layer("embedding").set_weights([embeddings]) model.add(Flatten()) model.add(Dense(output_size, activation="sigmoid")) return model
def __init__(self, layers: Iterable[int], funcs: Union[str, Iterable[str]], batch_size=None, max_cores=8): session_conf = tf.compat.v1.ConfigProto( device_count={"CPU": max_cores}) sess = tf.compat.v1.Session(config=session_conf) tf.compat.v1.keras.backend.set_session(sess) layers = list(layers) self.input_shape = layers.pop(0), if isinstance(funcs, str): funcs = [funcs] * len(layers) self.__keras_network = Sequential([ Dense(size, activation=funcs[i]) for i, size in enumerate(layers) ]) self.__keras_network.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'], ) self.batch_size = batch_size
def _make_layers(self): # Create the model model = Sequential() model.add( Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48, 48, 1))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(7, activation='softmax')) return model
random_state=42) print(X_test.head(10)) # print(X_train.shape, X_val.shape, X_test.shape, y_train.shape, y_val.shape, y_test.shape) # model = Sequential([ # Dense(100, activation='relu', input_shape=(57,)), # Dense(40, activation='relu'), # Dense(20, activation='relu'), # Dense(1, activation='relu') # ]) # model.compile(optimizer='adam', # loss='mean_squared_logarithmic_error', # metrics=['mean_squared_logarithmic_error']) # hist = model.fit(X, y, epochs=100) NNmodel = Sequential() NNmodel.add(Dense(57, kernel_initializer='normal', activation='relu')) NNmodel.add(Dense(100, kernel_initializer='normal', activation='relu')) NNmodel.add(Dense(40, kernel_initializer='normal', activation='relu')) NNmodel.add(Dense(20, kernel_initializer='normal', activation='relu')) NNmodel.add(Dense(1, kernel_initializer='normal', activation='relu')) NNmodel.compile(loss='mean_squared_logarithmic_error', optimizer='adam', metrics=['mean_squared_logarithmic_error']) df_test['weather_4'] = 0 df_test = df_test[[
dataset['Price_Rise'] = np.where( dataset['Close'].shift(-1) > dataset['Close'], 1, 0) dataset.dropna(inplace=True) X = dataset.iloc[:, 4:-1] y = dataset.iloc[:, -1] split = int(len(dataset) * 0.8) X_train, X_test, y_train, y_test = X[:split], X[split:], y[:split], y[ split:] sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) y_train = y_train.to_numpy() classifier = Sequential() classifier.add(Dense(units=64, activation='relu', input_dim=X.shape[1])) classifier.add(Dense(units=64, activation='relu')) classifier.add(Dense(units=1, activation='sigmoid')) classifier.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) classifier.fit(X_train, y_train, batch_size=10, epochs=50) y_pred = classifier.predict(X_test) #y_pred = (y_pred > 0.5) #y_pred = (np.round(y_pred * 2) - 1 ) y_pred = (2 * np.round(y_pred) - 1) dataset['y_pred'] = np.NaN dataset.iloc[(len(dataset) - len(y_pred)):, -1:] = y_pred