def get_conv_model(input_shape): model = Sequential() model.add( Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape)) model.add( Conv2D(64, (3, 3), activation='relu', strides=(1, 1), padding='same')) model.add( Conv2D(32, (3, 3), activation='relu', strides=(1, 1), padding='same')) model.add(MaxPool2D((2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc', km.binary_precision(), km.binary_recall()]) return model
def fit_model_siames(train_x, train_emb_x_1, train_emb_x_2, train_y, val_x, val_emb_x_1, val_emb_x_2, val_y, model_train, n_epochs, optimizer, batchsize, loss_weigths, verb): tensorboard = TensorBoard(log_dir=working_level + "/board_logs/" + model_train.Name + "-" + model_name + "-{}".format(time())) checkpoint = ModelCheckpoint( working_level + "/model_checkpoints/{0}-check-{{epoch:02d}}-{{val_main_acc:.2f}}.hdf5". format(model_train.name + "-" + model_name), save_weights_only=True, period=int(n_epochs / 5)) best_model_save = ModelCheckpoint( working_level + "/model_checkpoints/{0}-best.hdf5".format(model_train.name + "-" + model_name), monitor='val_main_acc', save_weights_only=True, save_best_only=True, mode='max') logger = EpochLogger(display=25) model_train.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy', km.binary_recall()], loss_weights=loss_weigths) return model_train.fit( [train_emb_x_1, train_emb_x_2, train_x], y=[train_y, train_y], verbose=verb, validation_data=([val_emb_x_1, val_emb_x_2, val_x], [val_y, val_y]), epochs=n_epochs, batch_size=batchsize, callbacks=[tensorboard, checkpoint, best_model_save, logger]) # starts training
def gruModel(embeddingMatrix, maxDataLenght, embeddingVectorLength, numAttributes, numNeurons): model = Sequential() model.add( Embedding(input_dim=numAttributes, output_dim=embeddingVectorLength, weights=[embeddingMatrix], input_length=maxDataLenght, trainable=False)) model.add(GRU(numNeurons, return_sequences=False)) model.add(Dropout(0.2)) model.add( Dense(1, activation='sigmoid', kernel_regularizer=regularizers.l2(0.001))) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[ 'accuracy', km.binary_f1_score(), km.binary_precision(), km.binary_recall() ]) return model
def setup_sentence_model(): X_train = get_sentence_embeddings(headlines_train) X_eval = get_sentence_embeddings(headlines_eval) model = Sequential() model.add( Dense(256, input_dim=512, activation='relu', bias_initializer='zeros', kernel_regularizer=regularizers.l2(args.lambd))) model.add(Dropout(0.25)) model.add(Dense(1, activation='sigmoid', bias_initializer='zeros')) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', km.binary_precision(), km.binary_recall()]) # Fit the model model.fit(X_train, labels_train, epochs=200, batch_size=args.batch_size, callbacks=callbacks, validation_data=(X_eval, labels_eval)) return model
def fit_model_level(train_x, train_emb_x, train_y, val_x, val_emb_x, val_y, model_train, n_epochs, optimizer, batchsize, loss_weigths): tensorboard = TensorBoard(log_dir="../" + train_filepath + working_level + "/board_logs/{}".format(time())) checkpoint = ModelCheckpoint( "../" + train_filepath + working_level + "/model_checkpoints/{0}-check-{{epoch:02d}}.hdf5".format( model_train.Name), period=int(n_epochs / 5)) best_model_save = ModelCheckpoint( "../" + train_filepath + working_level + "/model_checkpoints/{0}-best.hdf5".format(model_train.Name), monitor='val_acc', save_best_only=True, mode='max') logger = EpochLogger(display=25) model_train.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy', km.binary_recall()], loss_weights=loss_weigths) size = len(train_emb_x) return model_train.fit( [train_emb_x.values, train_x.values], y=[train_y.values, train_y.values], verbose=0, validation_data=([val_emb_x.values, val_x.values], [val_y.values, val_y.values]), epochs=n_epochs, batch_size=batchsize, callbacks=[tensorboard, checkpoint, best_model_save, logger]) # starts training
def model_preout_np(lable_name, x_train, x_test): print("===========running ", lable_name, " predict of preout layer.....===========") model_filename = GLVAR.MULTY_BINARY_SELF_ATTENTION_MODEL_DIR + lable_name + ".h5" print("load model:", model_filename) recall = keras_metrics.binary_recall(label=0) trained_model = load_model(model_filename, custom_objects={ 'Self_Attention_Layer': Self_Attention_Layer, 'binary_recall': recall }) # trained_model.summary() trained_preout_model = Model(inputs=trained_model.input, outputs=trained_model.layers[4].output) print("running ", lable_name, " x_train predict of preout layer.....") x_train_tmp = trained_preout_model.predict(x_train) print("running ", lable_name, " x_test predict of preout layer.....") x_test_tmp = trained_preout_model.predict(x_test) x_train_tmp = x_train_tmp.reshape(-1, 256) x_test_tmp = x_test_tmp.reshape(-1, 256) print("x_train_tmp shape is:", x_train_tmp.shape) print("x_test_tmp shape is:", x_test_tmp.shape) return x_train_tmp, x_test_tmp
def get_binary_model(input_shape=128): model = Sequential() model.add( Dense(128, activation='relu', kernel_initializer='random_normal', input_dim=input_shape)) model.add(Dense(256, activation='relu', kernel_initializer='random_normal')) model.add(Dropout(0.5)) model.add(Dense(128, activation='relu', kernel_initializer='random_normal')) model.add(Dense(64, activation='relu', kernel_initializer='random_normal')) model.add(Dense(32, activation='relu', kernel_initializer='random_normal')) model.add(Dropout(0.5)) model.add(Dense(8, activation='relu', kernel_initializer='random_normal')) model.add( Dense(1, activation='sigmoid', kernel_initializer='random_normal')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc', km.binary_precision(), km.binary_recall()]) return model
def single_identification(scan_path, detection_model_path, identification_model_path, plot_path, spacing=(1.0, 1.0, 1.0)): scan_path_without_ext = scan_path[:-len(".nii.gz")] centroid_path = scan_path_without_ext + ".lml" labels, centroids = opening_files.extract_centroid_info_from_lml( centroid_path) centroid_indexes = centroids / np.array(spacing) cut = np.round(np.mean(centroid_indexes[:, 0])).astype(int) weights = np.array([0.1, 0.9]) detection_model_objects = { 'loss': weighted_categorical_crossentropy(weights), 'binary_recall': km.binary_recall(), 'dice_coef': dice_coef_label(label=1) } detection_model = load_model(detection_model_path, custom_objects=detection_model_objects) identification_model_objects = { 'ignore_background_loss': ignore_background_loss, 'vertebrae_classification_rate': vertebrae_classification_rate } identification_model = load_model( identification_model_path, custom_objects=identification_model_objects) volume = opening_files.read_nii(scan_path, spacing=spacing) detections = apply_detection_model(volume, detection_model, np.array([64, 64, 80]), np.array([32, 32, 40])) identification = apply_identification_model(volume, cut - 1, cut + 1, identification_model) volume_slice = volume[cut, :, :] detection_slice = detections[cut, :, :] identification_slice = identification[cut, :, :] identification_slice *= detection_slice masked_data = np.ma.masked_where(identification_slice == 0, identification_slice) fig, ax = plt.subplots(1) ax.imshow(volume_slice.T, cmap='gray') ax.imshow(masked_data.T, cmap=cm.jet, vmin=1, vmax=27, alpha=0.4, origin='lower') fig.savefig(plot_path + '/single_identification.png')
def load_object(root_folder, obj_descr_type, model_name, dataset, refactoring_name): if model_name == 'deep-learning' and obj_descr_type == 'model': file_name = root_folder + "/" + obj_descr_type + "-" + model_name + "-" + dataset + "-" + refactoring_name.replace( " ", "") + ".h5" return keras_load_model(file_name, custom_objects={ "binary_precision": binary_precision(), "binary_recall": binary_recall() }) else: file_name = root_folder + "/" + obj_descr_type + "-" + model_name + "-" + dataset + "-" + refactoring_name.replace( " ", "") + ".joblib" return load(file_name)
def load_predict(pre_list): # 清除session,避免重复调用出错 keras.backend.clear_session() BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) model_path = os.path.join(BASE_DIR, 'recommend', 'model.h5') model = load_model(model_path, custom_objects={ 'binary_precision': km.binary_precision(), 'binary_recall': km.binary_recall(), 'binary_f1_score': km.f1_score() }) predictions = model.predict(pre_list) print(predictions) return predictions
def predic_unseen(model_name, file_info, unseen_files, data_path, threshold=0.5): # Load Model dependencies = { 'binary_precision': km.binary_precision(), 'binary_recall': km.binary_recall() } model = load_model(data_path + 'Models/' + model_name, custom_objects=dependencies) # Load data test_x = [] test_y = [] for j in range(len(unseen_files)): # Get the file name and read the data file = file_info[file_info.fname == unseen_files[j]].fname.iloc[0] audio_data = pickle.load(open(data_path + 'VGG/' + file, 'rb')) test_x.append(audio_data) # Get the class label label = file_info[file_info.fname == unseen_files[j]].label.iloc[0] test_y.append(np.repeat([label], (len(audio_data)))) # Make predictions test_x = [response for sublist in test_x for response in sublist] test_y = [response for sublist in test_y for response in sublist] test_x = np.array(test_x) test_y = np.array(test_y) y_pred = model.predict(test_x) y_pred_th = (y_pred > threshold) # Confusion matrix cm_test = confusion_matrix(test_y, y_pred_th) print(cm_test) # Recall tpr_test = recall_score(test_y, y_pred_th) print('TPR: ', tpr_test) # Precision precision_test = precision_score(test_y, y_pred_th) print('Precision: ', precision_test) # F1 score fscore_test = f1_score(test_y, y_pred_th) print('F1 score: ', fscore_test)
def load_models(folder, streams): models_list = [] for stream in streams: print('\nLoading', stream, 'model\n') models_list.append( load_model(folder + stream + '_best-weights.h5', custom_objects={ 'binary_precision': keras_metrics.binary_precision(), 'binary_recall': keras_metrics.binary_recall() })) return models_list
def compile_model(self): metrics = [km.binary_precision(0), km.binary_recall(0)] self.NeuMF.compile(optimizer=SGD(lr=LEARN_RATE), loss='binary_crossentropy', metrics=metrics) # update history dict self.history['precision'] = [] self.history['recall'] = [] self.history['val_precision'] = [] self.history['val_recall'] = [] self.history['loss'] = [] self.history['val_loss'] = [] print('compiling NeuMF Model ...')
def get_custom_activations_dict(filepath=None): """ Import all implemented custom activation functions so they can be used when loading a Keras model. Parameters ---------- filepath : Optional[str] Path to json file containing additional custom objects. """ from snntoolbox.utils.utils import binary_sigmoid, binary_tanh, \ ClampedReLU, LimitedReLU, NoisySoftplus import keras_metrics as km # Todo: We should be able to load a different activation for each layer. # Need to remove this hack: activation_str = 'relu_Q1.4' activation = get_quantized_activation_function_from_string(activation_str) custom_objects = { 'binary_sigmoid': binary_sigmoid, 'binary_tanh': binary_tanh, # Todo: This should work regardless of the specific attributes of the # ClampedReLU class used during training. 'clamped_relu': ClampedReLU(), 'LimitedReLU': LimitedReLU, 'relu6': LimitedReLU({'max_value': 6}), activation_str: activation, 'Noisy_Softplus': NoisySoftplus, 'precision': precision, 'binary_precision': km.binary_precision(label=0), 'binary_recall': km.binary_recall(label=0), 'activity_regularizer': keras.regularizers.l1} if filepath is not None and filepath != '': with open(filepath) as f: kwargs = json.load(f) for key in kwargs: if 'LimitedReLU' in key: custom_objects[key] = LimitedReLU(kwargs[key]) return custom_objects
def get_recurrent_model(input_shape): model = Sequential() model.add(LSTM(128, return_sequences=True, input_shape=input_shape)) model.add(LSTM(128, return_sequences=True)) model.add(Dropout(0.5)) model.add(TimeDistributed(Dense(64, activation='relu'))) model.add(TimeDistributed(Dense(32, activation='relu'))) model.add(TimeDistributed(Dense(16, activation='relu'))) model.add(TimeDistributed(Dense(8, activation='relu'))) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc', km.binary_precision(), km.binary_recall()]) return model
def model_predict_lable_np(lable_name, x_train, x_test): print("===========running ", lable_name, " Self Attention predict .....===========") model_filename = GLVAR.MULTY_BINARY_SELF_ATTENTION_MODEL_DIR + lable_name + ".h5" print("load model:", model_filename) recall = keras_metrics.binary_recall(label=0) trained_model = load_model(model_filename, custom_objects={ 'Self_Attention_Layer': Self_Attention_Layer, 'binary_recall': recall }) # trained_model.summary() trained_sa_model = Model(inputs=trained_model.input, outputs=trained_model.output) print("running ", lable_name, " x_train predict .....") x_train_result = trained_sa_model.predict(x_train) print("running ", lable_name, " x_test predict .....") x_test_result = trained_sa_model.predict(x_test) with open(GLVAR.LABLE_INDEX_FINAME) as raw_data: lables_set_index = {} for line in raw_data: lable_index = line.replace('\n', '').split('-') lables_set_index[lable_index[0]] = lable_index[1] times = 2 x_train_result_tmp = np.where(x_train_result[:, 0] < x_train_result[:, 1], lables_set_index[lable_name], -1) x_train_result_repet = np.repeat(x_train_result_tmp, GLVAR.NUM_CLASSES * times).reshape( -1, GLVAR.NUM_CLASSES * times) x_test_result_tmp = np.where(x_test_result[:, 0] < x_test_result[:, 1], lables_set_index[lable_name], -1) x_test_result_repet = np.repeat(x_test_result_tmp, GLVAR.NUM_CLASSES * times).reshape( -1, GLVAR.NUM_CLASSES * times) # x_train_result = np.repeat(x_train_result,4).reshape(-1, GLVAR.NUM_CLASSES) # x_test_result = np.repeat(x_test_result,4).reshape(-1, GLVAR.NUM_CLASSES) print("x_train_tmp shape is:", x_train_result_repet.shape) print("x_test_tmp shape is:", x_test_result_repet.shape) return x_train_result_repet, x_test_result_repet
def build_tf_graph(self): '''keras to tf conversion''' # loading keras model K.set_learning_phase(0) model = load_model(self.keras_model_path, custom_objects={ 'binary_precision': km.binary_precision(), 'binary_recall': km.binary_recall() }) # create frozen graph of the keras model frozen_graph = self.__freeze_session__( K.get_session(), output_names=[out.op.name for out in model.outputs]) # save model as .pb file tf.train.write_graph(frozen_graph, 'saved_models/tf_model', self.tf_path, as_text=False)
def main(input_path, model_path): model = load_model(model_path, custom_objects={ "binary_precision": keras_metrics.binary_precision(), "binary_recall": keras_metrics.binary_recall() }) allowed_filetypes = [ '.avi', '.wmv', '.mpg', '.mov', '.mp4', '.mkv', '.3gp', '.webm', '.ogv' ] videos_path = [] if os.path.isfile(input_path) and input_path.lower().endswith( tuple(allowed_filetypes)): videos_path = [input_path] elif os.path.isdir(input_path): search_path = os.path.join(input_path, '**') for ext in allowed_filetypes: videos_path.extend(glob2.glob(os.path.join(search_path, '*' + ext))) else: print("Not a valid input.") videos_path = natsorted(videos_path) for video_path in videos_path: folder = os.path.dirname(os.path.abspath(video_path)) filename = os.path.basename(video_path) json_path = os.path.join(folder, filename.split('.')[0] + '.json') print(video_path) if os.path.isfile(json_path): continue else: game_inference(model, video_path, json_path, samples=24, height=224, width=224)
def fit_model(train_x, train_y, val_x, val_y, model_train, n_epochs, optimizer, batchsize, model_params=None): tensorboard = TensorBoard(log_dir=working_level + "/board_logs/" + model_train.Name + "-" + model_name + "-{}".format(time())) checkpoint = ModelCheckpoint( working_level + "/model_checkpoints/{0}-check-{{epoch:02d}}-{{val_acc:.2f}}.hdf5". format(model_train.Name + "-" + model_name), save_weights_only=True, period=int(n_epochs / 5)) best_model_save = ModelCheckpoint( working_level + "/model_checkpoints/{0}-best.hdf5".format(model_train.Name + "-" + model_name), monitor='val_acc', save_best_only=True, save_weights_only=True, mode='max') logger = EpochLogger(display=25) model_train.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy', km.binary_recall()]) return model_train.fit( train_x.values, train_y.values, verbose=0, epochs=n_epochs, batch_size=batchsize, validation_data=[val_x.values, val_y.values], callbacks=[tensorboard, checkpoint, best_model_save, logger])
def Lstm(dataInput, maxDataLength): train, test = train_test_split(dataInput, test_size=0.2) xTrain, yTrain = list(zip(*train)) xTest, yTest = list(zip(*test)) yTrain = np.array(yTrain) yTest = np.array(yTest) xTrain = sequence.pad_sequences(xTrain, maxlen=maxDataLength) xTest = sequence.pad_sequences(xTest, maxlen=maxDataLength) embedding_vector_length = 32 model = Sequential() model.add( Embedding(NUM_OF_ATTRIBUTES, embedding_vector_length, input_length=maxDataLength)) model.add(Bidirectional(LSTM(NUM_OF_NEURONS, return_sequences=False))) model.add(Dropout(0.2)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[ 'accuracy', km.binary_f1_score(), km.binary_precision(), km.binary_recall() ]) model.fit(xTrain, yTrain, validation_data=(xTest, yTest), epochs=NUM_OF_EPOCHS, batch_size=512, verbose=0) scores = model.evaluate(xTest, yTest, verbose=0) for i in range(1, 5): print("%s : %.2f%%" % (model.metrics_names[i], scores[i] * 100)) print("========================================================")
predictions = Dense(num_classes, activation='softmax')(x) finetune_model = Model(inputs=base_model.input, outputs=predictions) return finetune_model FC_LAYERS = [200, 50] dropout = 0.5 finetune_model = build_finetune_model(base_model, dropout=dropout, fc_layers=FC_LAYERS, num_classes=len(class_list)) recall = km.binary_recall(label=1) precision = km.binary_precision(label=1) finetune_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[ 'accuracy', auc_roc, km.binary_precision(label=1), km.binary_recall(label=1) ]) print('fitting model') class_weights = {0: 1., 1: 1.} STEP_SIZE_TRAIN = train_gen.n // train_gen.batch_size STEP_SIZE_VALID = val_gen.n // val_gen.batch_size
def Build_Model_DNN_Image(shape, number_of_classes, sparse_categorical, min_hidden_layer_dnn, max_hidden_layer_dnn, min_nodes_dnn, max_nodes_dnn, random_optimizor, dropout): ''' buildModel_DNN_image(shape, number_of_classes,sparse_categorical) Build Deep neural networks Model for text classification Shape is input feature space number_of_classes is number of classes ''' model = Sequential() values = list(range(min_nodes_dnn, max_nodes_dnn)) Numberof_NOde = random.choice(values) Lvalues = list(range(min_hidden_layer_dnn, max_hidden_layer_dnn)) nLayers = random.choice(Lvalues) print(shape) model.add(Flatten(input_shape=shape)) model.add(Dense(Numberof_NOde, activation='relu')) model.add(Dropout(dropout)) for i in range(0, nLayers - 1): Numberof_NOde = random.choice(values) model.add(Dense(Numberof_NOde, activation='relu')) model.add(Dropout(dropout)) if number_of_classes == 2: model.add(Dense(1, activation='sigmoid')) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add(Dense(number_of_classes, activation='softmax')) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_CNN_Text(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical, min_hidden_layer_cnn, max_hidden_layer_cnn, min_nodes_cnn, max_nodes_cnn, random_optimizor, dropout, simple_model=False, _l2=0.01, lr=1e-3): """ def buildModel_CNN(word_index,embedding_index,number_of_classes,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM,Complexity=0): word_index in word index , embedding_index is embeddings index, look at data_helper.py number_of_classes is number of classes, MAX_SEQUENCE_LENGTH is maximum lenght of text sequences, EMBEDDING_DIM is an int value for dimention of word embedding look at data_helper.py Complexity we have two different CNN model as follows F=0 is simple CNN with [1 5] hidden layer Complexity=2 is more complex model of CNN with filter_length of range [1 10] """ model = Sequential() if simple_model: embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: embedding_matrix[i] = embedding_index['UNK'] model.add( Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True)) values = list(range(min_nodes_cnn, max_nodes_cnn)) Layer = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn)) Layer = random.choice(Layer) for i in range(0, Layer): Filter = random.choice(values) model.add( Conv1D(Filter, 5, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) model.add(MaxPooling1D(5)) model.add(Flatten()) Filter = random.choice(values) model.add(Dense(Filter, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) Filter = random.choice(values) model.add(Dense(Filter, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) if number_of_classes == 2: model.add( Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_regularizer=l2(_l2))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) else: embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: embedding_matrix[i] = embedding_index['UNK'] embedding_layer = Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True) # applying a more complex convolutional approach convs = [] values_layer = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn)) filter_sizes = [] layer = random.choice(values_layer) print("Filter ", layer) for fl in range(0, layer): filter_sizes.append((fl + 2)) values_node = list(range(min_nodes_cnn, max_nodes_cnn)) node = random.choice(values_node) print("Node ", node) sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32') embedded_sequences = embedding_layer(sequence_input) for fsz in filter_sizes: l_conv = Conv1D(node, kernel_size=fsz, activation='relu')(embedded_sequences) l_pool = MaxPooling1D(5)(l_conv) #l_pool = Dropout(0.25)(l_pool) convs.append(l_pool) l_merge = Concatenate(axis=1)(convs) l_cov1 = Conv1D(node, 5, activation='relu')(l_merge) l_cov1 = Dropout(dropout)(l_cov1) l_pool1 = MaxPooling1D(5)(l_cov1) l_cov2 = Conv1D(node, 5, activation='relu')(l_pool1) l_cov2 = Dropout(dropout)(l_cov2) l_pool2 = MaxPooling1D(30)(l_cov2) l_flat = Flatten()(l_pool2) l_dense = Dense(1024, activation='relu')(l_flat) l_dense = Dropout(dropout)(l_dense) l_dense = Dense(512, activation='relu')(l_dense) l_dense = Dropout(dropout)(l_dense) if number_of_classes == 2: preds = Dense(1, activation='sigmoid')(l_dense) else: preds = Dense(number_of_classes, activation='softmax')(l_dense) model = Model(sequence_input, preds) model_tmp = model if number_of_classes == 2: model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_RNN_Text(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical, min_hidden_layer_rnn, max_hidden_layer_rnn, min_nodes_rnn, max_nodes_rnn, random_optimizor, dropout, use_cuda=True, use_bidirectional=True, _l2=0.01, lr=1e-3): """ def buildModel_RNN(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical): word_index in word index , embedding_index is embeddings index, look at data_helper.py number_of_classes is number of classes, MAX_SEQUENCE_LENGTH is maximum lenght of text sequences """ Recurrent = CuDNNGRU if use_cuda else GRU model = Sequential() values = list(range(min_nodes_rnn, max_nodes_rnn + 1)) values_layer = list(range(min_hidden_layer_rnn - 1, max_hidden_layer_rnn)) layer = random.choice(values_layer) print(layer) embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: embedding_matrix[i] = embedding_index['UNK'] model.add( Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True)) gru_node = random.choice(values) print(gru_node) for i in range(0, layer): if use_bidirectional: model.add( Bidirectional( Recurrent(gru_node, return_sequences=True, kernel_regularizer=l2(_l2)))) else: model.add( Recurrent(gru_node, return_sequences=True, kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) if use_bidirectional: model.add( Bidirectional(Recurrent(gru_node, kernel_regularizer=l2(_l2)))) else: model.add(Recurrent(gru_node, kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) model.add(Dense(256, activation='relu', kernel_regularizer=l2(_l2))) if number_of_classes == 2: model.add(Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_regularizer=l2(_l2))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_RNN_Image(shape, number_of_classes, sparse_categorical, min_nodes_rnn, max_nodes_rnn, random_optimizor, dropout): """ def Image_model_RNN(num_classes,shape): num_classes is number of classes, shape is (w,h,p) """ values = list(range(min_nodes_rnn - 1, max_nodes_rnn)) node = random.choice(values) x = Input(shape=shape) # Encodes a row of pixels using TimeDistributed Wrapper. encoded_rows = TimeDistributed(CuDNNLSTM(node, recurrent_dropout=dropout))(x) node = random.choice(values) # Encodes columns of encoded rows. encoded_columns = CuDNNLSTM(node, recurrent_dropout=dropout)(encoded_rows) # Final predictions and model. #prediction = Dense(256, activation='relu')(encoded_columns) if number_of_classes == 2: prediction = Dense(1, activation='sigmoid')(encoded_columns) else: prediction = Dense(number_of_classes, activation='softmax')(encoded_columns) model = Model(x, prediction) model_tmp = model if number_of_classes == 2: model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_CNN_Image(shape, number_of_classes, sparse_categorical, min_hidden_layer_cnn, max_hidden_layer_cnn, min_nodes_cnn, max_nodes_cnn, random_optimizor, dropout): """"" def Image_model_CNN(num_classes,shape): num_classes is number of classes, shape is (w,h,p) """ "" model = Sequential() values = list(range(min_nodes_cnn, max_nodes_cnn)) Layers = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn)) Layer = random.choice(Layers) Filter = random.choice(values) model.add(Conv2D(Filter, (3, 3), padding='same', input_shape=shape)) model.add(Activation('relu')) model.add(Conv2D(Filter, (3, 3))) model.add(Activation('relu')) for i in range(0, Layer): Filter = random.choice(values) model.add(Conv2D(Filter, (3, 3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(dropout)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(dropout)) if number_of_classes == 2: model.add(Dense(1, activation='sigmoid', kernel_constraint=maxnorm(3))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_constraint=maxnorm(3))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_DNN_Text(shape, number_of_classes, sparse_categorical, min_hidden_layer_dnn, max_hidden_layer_dnn, min_nodes_dnn, max_nodes_dnn, random_optimizor, dropout, _l2=0.01, lr=1e-3): """ buildModel_DNN_Tex(shape, number_of_classes,sparse_categorical) Build Deep neural networks Model for text classification Shape is input feature space number_of_classes is number of classes """ model = Sequential() layer = list(range(min_hidden_layer_dnn, max_hidden_layer_dnn)) node = list(range(min_nodes_dnn, max_nodes_dnn)) Numberof_NOde = random.choice(node) nLayers = random.choice(layer) Numberof_NOde_old = Numberof_NOde model.add( Dense(Numberof_NOde, input_dim=shape, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) for i in range(0, nLayers): Numberof_NOde = random.choice(node) model.add( Dense(Numberof_NOde, input_dim=Numberof_NOde_old, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) Numberof_NOde_old = Numberof_NOde if number_of_classes == 2: model.add(Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_regularizer=l2(_l2))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
# The final sigmoid layer outputs probability values between [0, 1] model = models.Sequential() model.add(layers.Embedding(10000, 8, input_length=data.shape[1])) model.add(layers.Flatten()) model.add(layers.Dense(1, activation='sigmoid')) # ========================= # Train model # ========================= # As the model outputs probabilities, binary crossentropy is the best loss # metric as it measures the distance between probability distributions model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=[km.binary_precision(), km.binary_recall()]) history = model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_val, y_val)) # Prep history dictionary precision = history.history['precision'] val_precision = history.history['val_precision'] recall = history.history['recall'] val_recall = history.history['val_recall'] epochs = range(1, len(precision) + 1) # Plot the training and validation precision
callback = ModelSaveBestAvgAcc(filepath="model-{epoch:02d}-{avgacc:.2f}.hdf5", verbose=True, cond=filter_val_f1score) losses = [] for i in range(0, 6): losses.append(binary_focal_loss(gamma=2.)) model = get_model(input_shape) model.compile(optimizer=opt.Adam(lr=1e-4), loss=losses, metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score() ]) model.summary() model.fit_generator(gen_train, steps_per_epoch=len(dataset_train.image_ids) // batch_size, epochs=epochs, validation_data=gen_val, validation_steps=len(dataset_val.image_ids) // batch_size, callbacks=[callback], verbose=2) print('fine')
def get_stats(scans_dir, detection_model_path, identification_model_path, spacing=(1.0, 1.0, 1.0)): print("detection model: ", detection_model_path) print("identification model: ", identification_model_path) scan_paths = glob.glob(scans_dir + "/**/*.nii.gz", recursive=True) weights = np.array([0.1, 0.9]) detection_model_objects = { 'loss': weighted_categorical_crossentropy(weights), 'binary_recall': km.binary_recall(), 'dice_coef': dice_coef_label(label=1) } detection_model = load_model(detection_model_path, custom_objects=detection_model_objects) identification_model_objects = { 'ignore_background_loss': ignore_background_loss, 'vertebrae_classification_rate': vertebrae_classification_rate } identification_model = load_model( identification_model_path, custom_objects=identification_model_objects) all_correct = 0.0 all_no = 0.0 cervical_correct = 0.0 cervical_no = 0.0 thoracic_correct = 0.0 thoracic_no = 0.0 lumbar_correct = 0.0 lumbar_no = 0.0 all_difference = [] cervical_difference = [] thoracic_difference = [] lumbar_difference = [] differences_per_vertebrae = {} for i, scan_path in enumerate(scan_paths): print(i, scan_path) scan_path_without_ext = scan_path[:-len(".nii.gz")] centroid_path = scan_path_without_ext + ".lml" labels, centroids = opening_files.extract_centroid_info_from_lml( centroid_path) centroid_indexes = centroids / np.array(spacing) pred_labels, pred_centroid_estimates, pred_detections, pred_identifications = test_scan( scan_path=scan_path, detection_model=detection_model, detection_X_shape=np.array([64, 64, 80]), detection_y_shape=np.array([32, 32, 40]), identification_model=identification_model, spacing=spacing) for label, centroid_idx in zip(labels, centroid_indexes): min_dist = 20 min_label = '' for pred_label, pred_centroid_idx in zip(pred_labels, pred_centroid_estimates): dist = np.linalg.norm(pred_centroid_idx - centroid_idx) if dist <= min_dist: min_dist = dist min_label = pred_label all_no += 1 if label[0] == 'C': cervical_no += 1 elif label[0] == 'T': thoracic_no += 1 elif label[0] == 'L': lumbar_no += 1 if label == min_label: all_correct += 1 if label[0] == 'C': cervical_correct += 1 elif label[0] == 'T': thoracic_correct += 1 elif label[0] == 'L': lumbar_correct += 1 print(label, min_label) # get average distance total_difference = 0.0 no = 0.0 for pred_label, pred_centroid_idx in zip(pred_labels, pred_centroid_estimates): if pred_label in labels: label_idx = labels.index(pred_label) print(pred_label, centroid_indexes[label_idx], pred_centroid_idx) difference = np.linalg.norm(pred_centroid_idx - centroid_indexes[label_idx]) total_difference += difference no += 1 # Add to specific vertebrae hash if pred_label in differences_per_vertebrae: differences_per_vertebrae[pred_label].append(difference) else: differences_per_vertebrae[pred_label] = [difference] # Add to total difference all_difference.append(difference) if pred_label[0] == 'C': cervical_difference.append(difference) elif pred_label[0] == 'T': thoracic_difference.append(difference) elif pred_label[0] == 'L': lumbar_difference.append(difference) average_difference = total_difference / no print("average", average_difference, "\n") data = [] labels_used = [] for label in LABELS_NO_L6: if label in differences_per_vertebrae: labels_used.append(label) data.append(differences_per_vertebrae[label]) plt.figure(figsize=(20, 10)) plt.boxplot(data, labels=labels_used) plt.savefig('plots/boxplot.png') all_rate = np.around(100.0 * all_correct / all_no, decimals=1) all_mean = np.around(np.mean(all_difference), decimals=2) all_std = np.around(np.std(all_difference), decimals=2) cervical_rate = np.around(100.0 * cervical_correct / cervical_no, decimals=1) cervical_mean = np.around(np.mean(cervical_difference), decimals=2) cervical_std = np.around(np.std(cervical_difference), decimals=2) thoracic_rate = np.around(100.0 * thoracic_correct / thoracic_no, decimals=1) thoracic_mean = np.around(np.mean(thoracic_difference), decimals=2) thoracic_std = np.around(np.std(thoracic_difference), decimals=2) lumbar_rate = np.around(100.0 * lumbar_correct / lumbar_no, decimals=1) lumbar_mean = np.around(np.mean(lumbar_difference), decimals=2) lumbar_std = np.around(np.std(lumbar_difference), decimals=2) print("All Id rate: " + str(all_rate) + "% mean: " + str(all_mean) + " std: " + str(all_std) + "\n") print("Cervical Id rate: " + str(cervical_rate) + "% mean:" + str(cervical_mean) + " std:" + str(cervical_std) + "\n") print("Thoracic Id rate: " + str(thoracic_rate) + "% mean:" + str(thoracic_mean) + " std:" + str(thoracic_std) + "\n") print("Lumbar Id rate: " + str(lumbar_rate) + "% mean:" + str(lumbar_mean) + " std:" + str(lumbar_std) + "\n")