def build_model(self, file_saved_weights=''): model = Sequential() model.add( layers.Dense( self.parameters['dimension_layer1'], activation=self.parameters['activation_layer1'], kernel_initializer='glorot_normal', input_shape=( self.input_layer_size, ))) # lecun_uniform #ceva random model.add( layers.Dense(self.parameters['dimension_layer2'], activation=self.parameters['activation_layer2'], kernel_initializer='glorot_normal')) model.add( layers.Dense(4, activation=self.parameters['activation_layer3'])) model.compile(optimizer=optimizers.Adam(lr=1e-2), loss='mean_squared_error', metrics=['accuracy']) if file_saved_weights != '': model.load_weights(file_saved_weights) return model
def get_gen_nn(start_dim=128 * 7 * 7, random_dim=128, lr=0.0002, beta_1=0.5, weights_path="", verbose=False): model = Sequential() model.add( Dense(128 * 7 * 7, input_dim=random_dim, kernel_initializer=RandomNormal(stddev=0.02))) model.add(LeakyReLU(0.2)) model.add(Reshape((7, 7, 128))) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=5, padding='same')) model.add(LeakyReLU(0.2)) model.add(UpSampling2D()) model.add(Conv2D(1, kernel_size=5, padding='same', activation='tanh')) if weights_path: try: model.load_weights(weights_path) print("weights loaded") except: print("weights were not loaded") if verbose: print(model.summary()) return model
def get_lstm(x_tr, y_tr, x_val, y_val, lstm_epochs, lstm_batch_size, episode_length): K.clear_session() model = Sequential() model.add(LSTM(episode_length, go_backwards=True)) model.add(Dense(episode_length)) model.compile(optimizer='Adam', loss='mse', metrics=['mse']) es = EarlyStopping(monitor='val_loss', mode='min', restore_best_weights=True, verbose=1, patience=50) mc = callbacks.ModelCheckpoint('best_model.h5', monitor='val_loss', mode='min', save_best_only=True, verbose=1, save_weights_only=True) model.fit(x_tr, y_tr, validation_data=[x_val, y_val], callbacks=[es, mc], epochs=lstm_epochs, batch_size=lstm_batch_size, verbose=1, shuffle=True) model.load_weights("best_model.h5") return model
def recognize_cnn(face, model_name, filepath='fitted_models/', ext='', return_name=True): people = pickle.load(open(filepath + 'ids_' + model_name + '.sav', 'rb')) X = face / 255 X = X.reshape(1, 100, 100, 3) model = Sequential() model.add( Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(100, 100, 3))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.15)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.35)) model.add(Dense(len(people), activation='sigmoid')) model.load_weights(filepath + model_name + ext) if return_name == True: predictions = model.predict_proba(X)[0] return people[np.where(predictions == max(predictions))[0][0]] else: return model.predict(X)
def get_model_visualiation(X_sample, true_label, modelpath): model = Sequential() model.load_weights(modelpath) y_predict = model.predict_classes(X_sample) tsne = TSNE(n_components=2, random_state=0) X_2d = tsne.fit_transform(X_sample)
def get_disc_nn(input_shape=(28, 28, 1), lr=0.0001, beta_1=0.5, weights_path="", verbose=False): model = Sequential() model.add( Conv2D(64, kernel_size=5, strides=2, padding='same', input_shape=input_shape, kernel_initializer=RandomNormal(stddev=0.02))) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Conv2D(128, kernel_size=5, strides=2, padding='same')) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=Adam(lr=lr, beta_1=beta_1)) if weights_path: try: model.load_weights(weights_path) print("weights loaded") except: print("weights were not loaded") if verbose: print(model.summary()) return model
def allcnn(weights=None): model = Sequential() model.add(Conv2D(96, (3, 3), padding='same', input_shape=(32, 32, 3))) model.add(Activation('relu')) model.add(Conv2D(96, (3, 3), padding='same')) model.add(Activation('relu')) model.add(Conv2D(96, (3, 3), padding='same', strides=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(192, (3, 3), padding='same')) model.add(Activation('relu')) model.add(Conv2D(192, (3, 3), padding='same')) model.add(Activation('relu')) model.add(Conv2D(192, (3, 3), padding='same', strides=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(192, (3, 3), padding='same')) model.add(Activation('relu')) model.add(Conv2D(192, (1, 1), padding='valid')) model.add(Activation('relu')) model.add(Conv2D(10, (1, 1), padding='valid')) model.add(GlobalAveragePooling2D()) model.add(Activation('softmax')) if weights: model.load_weights(weights) return model
class Agent: def __init__(self, action_size): self.epsilon_min = 0.01 self.epsilon_decay = 0.95 self.create_model() self.memory = deque(maxlen=2000) self.gamma = 0.95 # discount rate self.epsilon = 0.1 # exploration rate self.learning_rate = 0.1 self.action_size = action_size def create_model(self): self.model = Sequential() self.model.add(Dense(100, input_shape=(32, ), activation='sigmoid')) self.model.add(Dense(4, activation='sigmoid')) self.model.load_weights("./models/model121.h5") # self.model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) def remember(self, state, action, reward, next_state, done): self.memory.append((state, action, reward, next_state, done)) def act(self, state): if np.random.rand() <= self.epsilon: return random.randrange(self.action_size) act_values = self.model.predict(state) return np.argmax(act_values[0])
def create_model_input(): model_custom_input = Sequential() model_custom_input.add(Conv2D(32, (3, 3), input_shape=(100, 100, 4))) model_custom_input.add(Activation('relu')) model_custom_input.add(MaxPooling2D(pool_size=(2, 2))) model_custom_input.add(Conv2D(32, (3, 3))) model_custom_input.add(Activation('relu')) model_custom_input.add(MaxPooling2D(pool_size=(2, 2))) model_custom_input.add(Conv2D(64, (3, 3))) model_custom_input.add(Activation('relu')) model_custom_input.add(MaxPooling2D(pool_size=(2, 2))) model_custom_input.add(Flatten()) model_custom_input.add(Dense(64)) model_custom_input.add(Activation('relu')) model_custom_input.add(Dropout(0.5)) model_custom_input.add(Dense(1)) model_custom_input.add(Activation('sigmoid')) model_custom_input.load_weights("model.h5") model_custom_input.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model_custom_input
class ToxicModel: def __init__(self, input_shape, output_shape): self.model = Sequential() self.model.add(Dense(1024, input_shape=(input_shape, ))) self.model.add(Dropout(.8)) self.model.add(Dense(512, activation='relu')) self.model.add(Dense(512, activation='relu')) self.model.add(Dense(512, activation='relu')) self.model.add(Dropout(.5)) self.model.add(Dense(256, activation='relu')) self.model.add(Dense(256, activation='relu')) self.model.add(Dropout(.5)) self.model.add(Dense(128, activation='relu')) self.model.add(Dense(128, activation='relu')) self.model.add(Dropout(.2)) self.model.add(Dense(output_shape, activation='softmax')) self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) def train(self, X, y, callback_dirs=[None, None]): self.model.fit(X, y, batch_size=128, epochs=10,\ callbacks=[TensorBoard(callback_dirs[0]),\ ModelCheckpoint(os.path.join(callback_dirs[1], \ 'weigths{epoch:02d}-{loss:.4f}.hdf5'))]) def predict(self, X): return np.round(self.model.predict(X), 1) def load(self, weights_path): self.model.load_weights(weights_path) def save(self, weights_path): self.model.save(weights_path)
def initialise_classifier(self): """"Sets up the keras model with user-specified parameters and embedding layer, Loads weights from a cache if specified by the user """ # Input is an embedding layer using the weights from the pre-trained word2vec vectoriser embedding_matrix = self.vectorizer.embedding_matrix vocab_size, embedding_vector_length = embedding_matrix.shape model = Sequential() model.add(Embedding(input_dim=vocab_size, output_dim=300, weights=[embedding_matrix], trainable=False, mask_zero=True)) # Add either an LSTM or RNN layer with the same parameters if self.use_lstm: model.add(LSTM(self.layer_size, dropout=self.dropout, recurrent_dropout=0.2)) else: model.add(SimpleRNN(self.layer_size, dropout=self.dropout, recurrent_dropout=0.2)) # Output layer model.add(Dense(1, activation='sigmoid')) # Load pre-trained weights if specified by user if self.use_cache: weight_file = LSTM_WEIGHTS if self.use_lstm else RNN_WEIGHTS model.load_weights(weight_file) optimizer = optimizers.adam(lr=self.learning_rate) model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) return model
def lstm_model(model_name, train=False): if train: # define model model = Sequential() model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse', metrics=[r2_keras]) # fit model model.fit(X_train, y_train[:, 0], epochs=60, verbose=0) # Save model model_json = model.to_json() with open("%s_model.json" % model_name, "w") as json_file: json_file.write(model_json) model.save_weights("%s_model.h5" % model_name) print("Saving %s model to disk .." % model_name) else: json_file = open('%s_model.json' % model_name, 'r') loaded_model_json = json_file.read() json_file.close() model = model_from_json(loaded_model_json) model.load_weights('%s_model.h5' % model_name) print('loaded model from disk') model.compile(optimizer='adam', loss='mse', metrics=[r2_keras]) return model
def create(self, weights_path=None): ''' channel_last (rows, cols, channels) <=> (word_num, embedding_dim, channels) :param weights_path: :return: ''' word_num = self.config["word_num"] embedding_dim = self.config["embedding_dim"] channels = self.config["channels"] labels_num = self.config["labels_num"] filter_sizes = self.config["filter_sizes"] pool_sizes = self.config["pool_sizes"] num_filters = self.config["num_filters"] dropout = self.config["dropout"] dense_units = self.config["dense_units"] cnn_activation = self.config["cnn_activation"] dense_activation = self.config["dense_activation"] weight_decay = self.config["weight_decay"] # 文本内容卷积 input_tensor = Input((word_num, embedding_dim, channels)) conv_blocks = list() for idx, filter_size in enumerate(filter_sizes): conv = Conv2D( num_filters, kernel_size=(filter_size, embedding_dim), padding='valid', kernel_initializer='normal', kernel_regularizer=l2(weight_decay), # activity_regularizer=regularizers.l1(weight_decay), activation=cnn_activation)(input_tensor) maxpool = MaxPool2D(pool_size=(math.ceil(pool_sizes[idx]), 1), strides=(int(pool_sizes[idx]), 1), padding='valid')(conv) conv_blocks.append(maxpool) concatenated_tensor = Concatenate(axis=1)(conv_blocks) flatten = Flatten()(concatenated_tensor) output_tensor = Dropout(dropout)(flatten) model_content = Model(inputs=input_tensor, outputs=output_tensor) # 分类 model = Sequential() model.add(model_content) model.add(Dense(dense_units, activation=dense_activation)) model.add(Dropout(dropout)) model.add(Dense(labels_num, activation="softmax")) model_content.summary(positions=[.33, .60, .77, 1.]) model.summary(positions=[.33, .60, .77, 1.]) if weights_path: model.load_weights(weights_path) self.keras_model = model return self.keras_model
def vgg16_model(img_row, img_col, channel, num_classes): model = Sequential() # model.add(ZeroPadding2D(1, 1), input_shape = (img_row, img_col, channel)) model.add( Conv2D(64, (3, 3), padding='same', input_shape=(img_row, img_col, channel), activation='relu')) #224 model.add(Conv2D(64, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) #112 注意步长是2才能下采样! model.add(Conv2D(128, (3, 3), padding='same', activation='relu')) model.add(Conv2D(128, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) #56 model.add(Conv2D(256, (3, 3), padding='same', activation='relu')) model.add(Conv2D(256, (3, 3), padding='same', activation='relu')) model.add(Conv2D(256, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) #28 model.add(Conv2D(512, (3, 3), padding='same', activation='relu')) model.add(Conv2D(512, (3, 3), padding='same', activation='relu')) model.add(Conv2D(512, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) #14 model.add(Conv2D(512, (3, 3), padding='same', activation='relu')) model.add(Conv2D(512, (3, 3), padding='same', activation='relu')) model.add(Conv2D(512, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) #7 model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) ## 加载预训练模型在ImageNet上训练好的权重 model.load_weights( 'C:\\Users\\chiyuan\\.keras\\models\\vgg16_weights_tf_dim_ordering_tf_kernels.h5' ) ## 去掉最后一层FC,用自己的代替 model.layers.pop() #去掉最后一层 model.outputs = [model.layers[-1].output] #加中括号将多个结果直接放进一个列表 model.layers[-1].outbound_nodes = [] ##??????????????? model.add(Dense(num_classes, activation='softmax')) ## 保持前十层权重不变(freeze) for layer in model.layers[:10]: layer.trainable = False sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) return model
def train(train_data, train_labels, test_data, test_labels, file_name, epochs=4, batch_size=150, train_temp=1, init=None, callbacks=False): # neural net parameters units = [200, 200] activation_function = "relu" kernel = "glorot_uniform" bias = "zeros" dropout = 0.2 learn_rate = 0.001 model = Sequential() # neural net init model.add(Dense(units=units[0], activation=activation_function, input_dim=total_features, kernel_initializer=kernel, bias_initializer=bias)) model.add(Dropout(dropout)) # add dropout rate for hidden_layer_units in units[1:]: # add hidden layers defined units in train_models.py model.add(Dense(units=hidden_layer_units, activation=activation_function, kernel_initializer=kernel, bias_initializer=bias)) model.add(Dropout(dropout)) model.add(Dense(2)) # output layer, with with two neurons and without activation function if init is not None: model.load_weights(init) def fn(correct, predicted): return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=(predicted / train_temp)) # loss the fn method defined above, Adam optimizer model.compile(loss=fn, optimizer=Adam(lr=learn_rate), metrics=["accuracy"]) if callbacks: log_dir = path + "log\\dir\\DNN" tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1, write_graph=True, write_images=True) early_stopping_callback = EarlyStopping(monitor='val_loss', mode='min', patience=10, verbose=2) model_checkpoint_callback = ModelCheckpoint(file_name, monitor='val_accuracy', mode='max', verbose=2, save_best_only=True) model.fit(train_data, train_labels, epochs=epochs, batch_size=batch_size, validation_data=(test_data, test_labels), callbacks=[tensorboard_callback, early_stopping_callback, model_checkpoint_callback], verbose=2) else: model.fit(train_data, train_labels, epochs=epochs, batch_size=batch_size, validation_data=(test_data, test_labels), verbose=2) if file_name is not None: model.save(file_name) return model
class DNN(Model): """ This class is parent class for all Deep neural network models """ def __init__(self, input_shape, num_classes, **params): """ Constructor to initialize the deep neural network model :param input_shape: shape of the input data :param num_classes: number of classes in the data """ super(DNN, self).__init__(**params) self.input_shape = input_shape self.model = Sequential() self.make_default_model() self.model.add(Dense(num_classes, activation='softmax')) self.model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(self.model.summary()) self.save_path = self.save_path or self.name + '_best_model.h5' def load_model(self, to_load): try: self.model.load_weights(to_load) except: sys.stderr.write("Invalid saved file provided") sys.exit(-1) def save_model(self): self.model.save_weights(self.save_path) def evaluate(self, x_test, y_test): print('Accuracy =', self.model.evaluate(x_test, y_test)[1]) def train(self, x_train, y_train, x_val=None, y_val=None): best_acc = 0 for i in range(50): # Shuffle the data for each epoch in unison inspired from https://stackoverflow.com/a/4602224 p = np.random.permutation(len(x_train)) x_train = x_train[p] y_train = y_train[p] self.model.fit(x_train, y_train, batch_size=256, verbose=1, epochs=1) loss, acc = self.model.evaluate(x_val, y_val) if acc > best_acc: best_acc = acc self.trained = True def make_default_model(self): """ Make the model with default hyper parameters """ raise NotImplementedError()
def create_rnn(name, word_vectors, output_size, sequence_len, lstm_size, weights_file, learning_rate, dropout_rate): """ Creates the graph of the applied neural net, whereas the first layer consists of pre-trained word vectors. It inputs a sequence of word vectors and output a probabiltiy distribution of the next word. :param name: name of the model :param word_vectors: pre-trained word vectors :param output_size: number of neuron in the last layer :param sequence_len: length of the input sequence :param lstm_size: size of the underlying lstm cells :param weights_file: path of file in which initial weights are read from :param learning_rate: learnign rate of optimization algorithm :param dropout_rate: rate of applied dropout :return: created Keras model """ vocab_size = len(word_vectors) # prepare pre-trained word embeddings word_ids = {wv.id: wv for wv in word_vectors.values()} embedding_matrix = np.zeros((vocab_size, EMBEDDING_SIZE), dtype=np.float64) for i in range(vocab_size): embedding_matrix[i] = word_ids[i].vector # define model model = Sequential(name=name) model.add( Embedding(vocab_size, EMBEDDING_SIZE, weights=[embedding_matrix], input_length=sequence_len, trainable=False)) model.add(LSTM(lstm_size, return_sequences=True)) model.add(Dropout(rate=dropout_rate)) model.add(LSTM(lstm_size)) model.add(Dropout(rate=dropout_rate)) model.add(Dense(output_size, activation='softmax')) # compile network optimizer = Adam(lr=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # try load existing model_file try: if weights_file and Path(weights_file).exists(): logger.info('Reading weights from %s', weights_file) model.load_weights(filepath=weights_file, by_name=True) logger.info('Successfully loaded weights from %s', weights_file) else: logger.info('No stored weights found.') except: logger.exception('Cannot not read weights model!') return model
class DroneModel: def __init__(self): self.model = Sequential() self.model.add(Dense(8, input_dim=2, activation='relu')) self.model.add(Dense(8, activation='relu')) self.model.add(Dense(8, activation='relu')) self.model.add(Dense(2)) optimizer = keras.optimizers.Nadam(learning_rate=0.001) self.model.compile(loss='mse', optimizer=optimizer, metrics=[keras.metrics.MeanAbsoluteError()]) def loadModel(self, file): self.model.load_weights(file) def train(self, dataset, epochs, batch_size): trainData = dataset[:3 * len(dataset) // 4] testData = dataset[3 * len(dataset) // 4:] inputData = trainData[:, 2:4] outputData = trainData[:, 0:2] self.model.fit(inputData, outputData, epochs=epochs, batch_size=batch_size) inputData = testData[:, 2:4] outputData = testData[:, 0:2] _, accuracy = self.model.evaluate(inputData, outputData) print('Accuracy: %.2f' % (accuracy * 100)) print('Neural network trained') def saveModel(self, file): self.model.save_weights(file) print('Model saved') def predict(self, angle, angularVelocity): angle = TrainingData.normalizeAngle(angle) angularVelocity = TrainingData.normalizeAngularVelocity( angularVelocity) prediction = self.model.predict(np.array([[angle, angularVelocity]])) left = TrainingData.denormalizeForce(prediction[0, 0]) right = TrainingData.denormalizeForce(prediction[0, 1]) if left < 0: left = 0 if right < 0: right = 0 return left, right
def get_lstm(): lstm = Sequential() lstm.add( LSTM(128, return_sequences=True, input_shape=(6, 1024), dropout=.4)) lstm.add(LSTM(128, dropout=.2)) lstm.add(Dense(1)) lstm.compile(loss='mse', optimizer='adam', metrics=[metrics.mean_squared_error]) lstm.load_weights('base_model.hdf5') return lstm
def index(): #Needs to be a list of last 50 pollution observations pollutionData = app.request.args['sensorData'] model = Sequential() model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2]))) model.add(Dense(1)) model.compile(loss='mae', optimizer='adam', metrics=[metrics.mae, metrics.categorical_accuracy]) model.load_weights("model.h5", by_name=True) predictedAQI = model.predict(pollutionData) return jsonify({'result': predictedAQI[0]}) # Returning most recent prediction
def create_model(): model = Sequential() model.add( ZeroPadding2D(input_shape=(224, 224, 3), data_format="channels_last")) model.add(Conv2D(filters=64, kernel_size=(3, 3), activation="relu")) model.add(ZeroPadding2D()) model.add(Conv2D(filters=64, kernel_size=(3, 3), activation="relu")) model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPool2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(MaxPool2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(MaxPool2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(MaxPool2D((2, 2), strides=(2, 2))) model.add(Conv2D(4096, (7, 7), activation='relu')) model.add(Dropout(0.5)) model.add(Conv2D(4096, (1, 1), activation='relu')) model.add(Dropout(0.5)) model.add(Conv2D(2622, (1, 1))) model.add(Flatten()) model.add(Activation('softmax')) model.load_weights("weights/vgg_face_weights.h5") # new network from first layer to previous of output layer vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output) return vgg_face_descriptor
def predictRentLA(x, y): NN_model = Sequential() NN_model.add( Dense(100, input_dim=2, kernel_initializer='normal', activation='relu')) NN_model.add(Dense(1, kernel_initializer='normal')) NN_model.load_weights("rentLA_weights.hdf5") data = array([[x, y]]) predictions = NN_model.predict(data[0:1]) return predictions
def loadVggFaceModel(): model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3))) model.add(Convolution2D(64, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, (3, 3), activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, (3, 3), activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, (3, 3), activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(Convolution2D(4096, (7, 7), activation='relu')) model.add(Dropout(0.5)) model.add(Convolution2D(4096, (1, 1), activation='relu')) model.add(Dropout(0.5)) model.add(Convolution2D(2622, (1, 1))) model.add(Flatten()) model.add(Activation('softmax')) # load weights model.load_weights('vgg_face_weights.h5') vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output) return vgg_face_descriptor
def build_model(self): model = Sequential() model.add( Dense(64, input_shape=(self.state_space, ), activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(self.action_space, activation='linear')) model.compile(loss='mse', optimizer=adam(lr=self.learning_rate)) if self.load_weights: model.load_weights(self.weights) return model
def load_prediction_model(folder_name='base'): global prediction_model model = Sequential() model.add( Embedding(NUM_WORDS, EMBEDDING_DIM, input_length=SEQUENCE_SIZE - 1)) model.add(Dropout(0.2)) model.add(LSTM(UNITS)) model.add(Dropout(0.4)) model.add(Dense(units=NUM_WORDS, activation=ACTIVATION_FUNCTION)) model.compile(optimizer=OPTIM, loss=LOSS, metrics=['accuracy']) model.load_weights( "app/songifai/models/{}/language_model.h5".format(folder_name)) prediction_model = model
def network(self): model = Sequential() model.add( Dense(output_dim=self.first_layer, activation='relu', input_dim=11)) model.add(Dense(output_dim=self.second_layer, activation='relu')) model.add(Dense(output_dim=self.third_layer, activation='relu')) model.add(Dense(output_dim=4, activation='softmax')) opt = Adam(self.learning_rate) model.compile(loss='mse', optimizer=opt) if self.load_weights: model.load_weights(self.weights) return model
class DNN(Model): def __init__(self, input_shape, num_classes, **params): super(DNN, self).__init__(**params) self.input_shape = input_shape self.model = Sequential() self.make_default_model() self.model.add(Dense(num_classes, activation='softmax')) self.model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(self.model.summary()) self.save_path = self.save_path or self.name + '_best_model.h5' def load_model(self, to_load): try: self.model.load_weights(to_load) except: sys.stderr.write("Invalid saved file provided") sys.exit(-1) def save_model(self): self.model.save_weights(self.save_path) def evaluate(self, x_test, y_test): print('Accuracy =', self.model.evaluate(x_test, y_test)[1]) def train(self, x_train, y_train, x_val=None, y_val=None): best_acc = 0 for i in range(50): # Shuffle the data for each epoch in unison inspired from https://stackoverflow.com/a/4602224 p = np.random.permutation(len(x_train)) x_train = x_train[p] y_train = y_train[p] self.model.fit(x_train, y_train, batch_size=32, epochs=1) loss, acc = self.model.evaluate(x_val, y_val) if acc > best_acc: best_acc = acc self.trained = True def make_default_model(self): raise NotImplementedError() def predict(self, x_test): if not self.trained: sys.stderr.write( "Model should be trained or loaded before doing predict\n") sys.exit(-1) return self.model.predict(data)
def get_model(weights_path=None): model = Sequential() model.add( Convolution2D( 32, 9, activation="relu", input_shape=(400, 400, 3), padding="same" ) ) model.add(Convolution2D(16, 5, activation="relu", padding="same")) model.add(Convolution2D(3, 5, activation="relu", padding="same")) if weights_path: model.load_weights(weights_path) model.compile(optimizer="adam", loss="mse", metrics=["accuracy"]) return model
class DNN(Model): def __init__(self, input_shape, num_classes, **params): super(DNN, self).__init__(**params) self.input_shape = input_shape self.model = Sequential() self.make_default_model() self.model.add(Dense(num_classes, activation='softmax')) self.model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(self.model.summary(), file=sys.stderr) self.save_path = self.save_path or self.name + '_best_model.h5' def load_model(self, to_load): try: self.model.load_weights(to_load) except: sys.stderr.write("Invalid saved file provided") sys.exit(-1) def save_model(self): """ Save the model weights to `save_path` provided while creating the model. """ self.model.save_weights(self.save_path) def train(self, x_train, y_train, x_val=None, y_val=None, n_epochs=50): best_acc = 0 if x_val is None or y_val is None: x_val, y_val = x_train, y_train for i in range(n_epochs): p = np.random.permutation(len(x_train)) x_train = x_train[p] y_train = y_train[p] self.model.fit(x_train, y_train, batch_size=32, epochs=1) loss, acc = self.model.evaluate(x_val, y_val) if acc > best_acc: best_acc = acc self.trained = True def predict_one(self, sample): if not self.trained: sys.stderr.write( "Model should be trained or loaded before doing predict\n") sys.exit(-1) return np.argmax(self.model.predict(np.array([sample]))) def make_default_model(self) -> None: raise NotImplementedError()
def make_model(): input_shape = (1, 1024) output_dim = 4 model = Sequential() model.add( CuDNNLSTM(64, input_shape=input_shape, batch_size=None, return_sequences=False)) model.add(Dense(200, activation='relu')) model.add(Dense(output_dim, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') model.load_weights('../data/weights_best_model') return model