def Build_Model_DNN_Image(shape, number_of_classes, sparse_categorical, min_hidden_layer_dnn, max_hidden_layer_dnn, min_nodes_dnn, max_nodes_dnn, random_optimizor, dropout): ''' buildModel_DNN_image(shape, number_of_classes,sparse_categorical) Build Deep neural networks Model for text classification Shape is input feature space number_of_classes is number of classes ''' model = Sequential() values = list(range(min_nodes_dnn, max_nodes_dnn)) Numberof_NOde = random.choice(values) Lvalues = list(range(min_hidden_layer_dnn, max_hidden_layer_dnn)) nLayers = random.choice(Lvalues) print(shape) model.add(Flatten(input_shape=shape)) model.add(Dense(Numberof_NOde, activation='relu')) model.add(Dropout(dropout)) for i in range(0, nLayers - 1): Numberof_NOde = random.choice(values) model.add(Dense(Numberof_NOde, activation='relu')) model.add(Dropout(dropout)) if number_of_classes == 2: model.add(Dense(1, activation='sigmoid')) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add(Dense(number_of_classes, activation='softmax')) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_RNN_Text(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical, min_hidden_layer_rnn, max_hidden_layer_rnn, min_nodes_rnn, max_nodes_rnn, random_optimizor, dropout, use_cuda=True, use_bidirectional=True, _l2=0.01, lr=1e-3): """ def buildModel_RNN(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical): word_index in word index , embedding_index is embeddings index, look at data_helper.py number_of_classes is number of classes, MAX_SEQUENCE_LENGTH is maximum lenght of text sequences """ Recurrent = CuDNNGRU if use_cuda else GRU model = Sequential() values = list(range(min_nodes_rnn, max_nodes_rnn + 1)) values_layer = list(range(min_hidden_layer_rnn - 1, max_hidden_layer_rnn)) layer = random.choice(values_layer) print(layer) embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: embedding_matrix[i] = embedding_index['UNK'] model.add( Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True)) gru_node = random.choice(values) print(gru_node) for i in range(0, layer): if use_bidirectional: model.add( Bidirectional( Recurrent(gru_node, return_sequences=True, kernel_regularizer=l2(_l2)))) else: model.add( Recurrent(gru_node, return_sequences=True, kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) if use_bidirectional: model.add( Bidirectional(Recurrent(gru_node, kernel_regularizer=l2(_l2)))) else: model.add(Recurrent(gru_node, kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) model.add(Dense(256, activation='relu', kernel_regularizer=l2(_l2))) if number_of_classes == 2: model.add(Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_regularizer=l2(_l2))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_CNN_Text(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical, min_hidden_layer_cnn, max_hidden_layer_cnn, min_nodes_cnn, max_nodes_cnn, random_optimizor, dropout, simple_model=False, _l2=0.01, lr=1e-3): """ def buildModel_CNN(word_index,embedding_index,number_of_classes,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM,Complexity=0): word_index in word index , embedding_index is embeddings index, look at data_helper.py number_of_classes is number of classes, MAX_SEQUENCE_LENGTH is maximum lenght of text sequences, EMBEDDING_DIM is an int value for dimention of word embedding look at data_helper.py Complexity we have two different CNN model as follows F=0 is simple CNN with [1 5] hidden layer Complexity=2 is more complex model of CNN with filter_length of range [1 10] """ model = Sequential() if simple_model: embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: embedding_matrix[i] = embedding_index['UNK'] model.add( Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True)) values = list(range(min_nodes_cnn, max_nodes_cnn)) Layer = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn)) Layer = random.choice(Layer) for i in range(0, Layer): Filter = random.choice(values) model.add( Conv1D(Filter, 5, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) model.add(MaxPooling1D(5)) model.add(Flatten()) Filter = random.choice(values) model.add(Dense(Filter, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) Filter = random.choice(values) model.add(Dense(Filter, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) if number_of_classes == 2: model.add( Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_regularizer=l2(_l2))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) else: embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: embedding_matrix[i] = embedding_index['UNK'] embedding_layer = Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True) # applying a more complex convolutional approach convs = [] values_layer = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn)) filter_sizes = [] layer = random.choice(values_layer) print("Filter ", layer) for fl in range(0, layer): filter_sizes.append((fl + 2)) values_node = list(range(min_nodes_cnn, max_nodes_cnn)) node = random.choice(values_node) print("Node ", node) sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32') embedded_sequences = embedding_layer(sequence_input) for fsz in filter_sizes: l_conv = Conv1D(node, kernel_size=fsz, activation='relu')(embedded_sequences) l_pool = MaxPooling1D(5)(l_conv) #l_pool = Dropout(0.25)(l_pool) convs.append(l_pool) l_merge = Concatenate(axis=1)(convs) l_cov1 = Conv1D(node, 5, activation='relu')(l_merge) l_cov1 = Dropout(dropout)(l_cov1) l_pool1 = MaxPooling1D(5)(l_cov1) l_cov2 = Conv1D(node, 5, activation='relu')(l_pool1) l_cov2 = Dropout(dropout)(l_cov2) l_pool2 = MaxPooling1D(30)(l_cov2) l_flat = Flatten()(l_pool2) l_dense = Dense(1024, activation='relu')(l_flat) l_dense = Dropout(dropout)(l_dense) l_dense = Dense(512, activation='relu')(l_dense) l_dense = Dropout(dropout)(l_dense) if number_of_classes == 2: preds = Dense(1, activation='sigmoid')(l_dense) else: preds = Dense(number_of_classes, activation='softmax')(l_dense) model = Model(sequence_input, preds) model_tmp = model if number_of_classes == 2: model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_RNN_Image(shape, number_of_classes, sparse_categorical, min_nodes_rnn, max_nodes_rnn, random_optimizor, dropout): """ def Image_model_RNN(num_classes,shape): num_classes is number of classes, shape is (w,h,p) """ values = list(range(min_nodes_rnn - 1, max_nodes_rnn)) node = random.choice(values) x = Input(shape=shape) # Encodes a row of pixels using TimeDistributed Wrapper. encoded_rows = TimeDistributed(CuDNNLSTM(node, recurrent_dropout=dropout))(x) node = random.choice(values) # Encodes columns of encoded rows. encoded_columns = CuDNNLSTM(node, recurrent_dropout=dropout)(encoded_rows) # Final predictions and model. #prediction = Dense(256, activation='relu')(encoded_columns) if number_of_classes == 2: prediction = Dense(1, activation='sigmoid')(encoded_columns) else: prediction = Dense(number_of_classes, activation='softmax')(encoded_columns) model = Model(x, prediction) model_tmp = model if number_of_classes == 2: model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_CNN_Image(shape, number_of_classes, sparse_categorical, min_hidden_layer_cnn, max_hidden_layer_cnn, min_nodes_cnn, max_nodes_cnn, random_optimizor, dropout): """"" def Image_model_CNN(num_classes,shape): num_classes is number of classes, shape is (w,h,p) """ "" model = Sequential() values = list(range(min_nodes_cnn, max_nodes_cnn)) Layers = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn)) Layer = random.choice(Layers) Filter = random.choice(values) model.add(Conv2D(Filter, (3, 3), padding='same', input_shape=shape)) model.add(Activation('relu')) model.add(Conv2D(Filter, (3, 3))) model.add(Activation('relu')) for i in range(0, Layer): Filter = random.choice(values) model.add(Conv2D(Filter, (3, 3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(dropout)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(dropout)) if number_of_classes == 2: model.add(Dense(1, activation='sigmoid', kernel_constraint=maxnorm(3))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_constraint=maxnorm(3))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_DNN_Text(shape, number_of_classes, sparse_categorical, min_hidden_layer_dnn, max_hidden_layer_dnn, min_nodes_dnn, max_nodes_dnn, random_optimizor, dropout, _l2=0.01, lr=1e-3): """ buildModel_DNN_Tex(shape, number_of_classes,sparse_categorical) Build Deep neural networks Model for text classification Shape is input feature space number_of_classes is number of classes """ model = Sequential() layer = list(range(min_hidden_layer_dnn, max_hidden_layer_dnn)) node = list(range(min_nodes_dnn, max_nodes_dnn)) Numberof_NOde = random.choice(node) nLayers = random.choice(layer) Numberof_NOde_old = Numberof_NOde model.add( Dense(Numberof_NOde, input_dim=shape, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) for i in range(0, nLayers): Numberof_NOde = random.choice(node) model.add( Dense(Numberof_NOde, input_dim=Numberof_NOde_old, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) Numberof_NOde_old = Numberof_NOde if number_of_classes == 2: model.add(Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_regularizer=l2(_l2))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def _compile_keras_model(self, optimizer=None): if optimizer is None: optimizer = self.keras_train_model.optimizer if optimizer is None: optimizer = self._create_optimizer() def zero_loss(true_word, topk_predictions): return tf.constant(0.0, shape=(), dtype=tf.float32) margin = 0.0000001 theta = lambda t: (tf.keras.backend.sign(t) + 1.) / 2. def loss(y_true, y_pred): y_true = tf.dtypes.cast(y_true, dtype=tf.float32) y_pred = tf.dtypes.cast(y_pred, dtype=tf.float32) return -( 1 - theta(y_true - margin) * theta(y_pred - margin) - theta(1 - margin - y_true) * theta(1 - margin - y_pred)) * ( y_true * tf.keras.backend.log(y_pred + 1e-8) + (1 - y_true) * tf.keras.backend.log(1 - y_pred + 1e-8)) class FocalLoss(keras.losses.Loss): def __init__(self, gamma=2., alpha=4., reduction=keras.losses.Reduction.AUTO, name='focal_loss'): """Focal loss for multi-classification FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t) Notice: y_pred is probability after softmax gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x) Focal Loss for Dense Object Detection https://arxiv.org/abs/1708.02002 Keyword Arguments: gamma {float} -- (default: {2.0}) alpha {float} -- (default: {4.0}) """ super(FocalLoss, self).__init__(reduction=reduction, name=name) self.gamma = float(gamma) self.alpha = float(alpha) def call(self, y_true, y_pred): """ Arguments: y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls] y_pred {tensor} -- model's output, shape of [batch_size, num_cls] Returns: [tensor] -- loss. """ epsilon = 1.e-9 y_true = tf.dtypes.cast(y_true, dtype=tf.float32) y_pred = tf.dtypes.cast(y_pred, dtype=tf.float32) model_out = tf.add(y_pred, epsilon) ce = tf.multiply(y_true, -tf.math.log(model_out)) weight = tf.multiply( y_true, tf.pow(tf.subtract(1., model_out), self.gamma)) fl = tf.multiply(self.alpha, tf.multiply(weight, ce)) reduced_fl = tf.reduce_max(fl, axis=1) return tf.reduce_mean(reduced_fl) def matthews_correlation(y_true, y_pred): """Matthews correlation metric. # Aliases It is only computed as a batch-wise average, not globally. Computes the Matthews correlation coefficient measure for quality of binary classification problems. """ y_pred_pos = K.round(K.clip(y_pred, 0, 1)) y_pred_neg = 1 - y_pred_pos y_pos = K.round(K.clip(y_true, 0, 1)) y_neg = 1 - y_pos tp = K.sum(y_pos * y_pred_pos) tn = K.sum(y_neg * y_pred_neg) fp = K.sum(y_neg * y_pred_pos) fn = K.sum(y_pos * y_pred_neg) numerator = (tp * tn - fp * fn) denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) return numerator / (denominator + K.epsilon()) def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) false_negative = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall @tf.function def fbeta_score(y_true, y_pred, beta=1): """Computes the F score. The F score is the weighted harmonic mean of precision and recall. Here it is only computed as a batch-wise average, not globally. This is useful for multi-label classification, where input samples can be classified as sets of labels. By only using accuracy (precision) a model would achieve a perfect score by simply assigning every class to every input. In order to avoid this, a metric should penalize incorrect class assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0) computes this, as a weighted mean of the proportion of correct class assignments vs. the proportion of incorrect class assignments. With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning correct classes becomes more important, and with beta > 1 the metric is instead weighted towards penalizing incorrect class assignments. """ if beta < 0: raise ValueError( 'The lowest choosable beta is zero (only precision).') # If there are no true positives, fix the F score at 0 like sklearn. if K.sum(K.round(K.clip(y_true, 0, 1))) == 0: return 0 p = precision(y_true, y_pred) r = recall(y_true, y_pred) bb = beta**2 fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon()) return fbeta_score def fmeasure(y_true, y_pred): """Computes the f-measure, the harmonic mean of precision and recall. Here it is only computed as a batch-wise average, not globally. """ return fbeta_score(y_true, y_pred, beta=1) def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2 * ((precision * recall) / (precision + recall + K.epsilon())) #召回率评价指标 def metric_precision(y_true, y_pred): threshold = 0.99998 #threshold = 0.5 y_pred = tf.dtypes.cast(tf.keras.backend.less(threshold, y_pred), y_true.dtype) TP = tf.reduce_sum(y_true * tf.round(y_pred)) TN = tf.reduce_sum((1 - y_true) * (1 - tf.round(y_pred))) FP = tf.reduce_sum((1 - y_true) * tf.round(y_pred)) FN = tf.reduce_sum(y_true * (1 - tf.round(y_pred))) precision = TP / (TP + FP) return precision #召回率评价指标 def metric_recall(y_true, y_pred): threshold = 0.99998 #threshold = 0.5 y_pred = tf.dtypes.cast(tf.keras.backend.less(threshold, y_pred), y_true.dtype) TP = tf.reduce_sum(y_true * tf.round(y_pred)) TN = tf.reduce_sum((1 - y_true) * (1 - tf.round(y_pred))) FP = tf.reduce_sum((1 - y_true) * tf.round(y_pred)) FN = tf.reduce_sum(y_true * (1 - tf.round(y_pred))) recall = TP / (TP + FN) return recall #F1-score评价指标 def metric_F1score(y_true, y_pred): threshold = 0.99998 @tf.function def load_data(inputs): print("-------------------------------------------") tf.print(inputs) # print inside the graph context load_data(y_true) load_data(y_pred) #threshold = 0.5 y_pred = tf.dtypes.cast(tf.keras.backend.less(threshold, y_pred), y_true.dtype) TP = tf.reduce_sum(y_true * tf.round(y_pred)) TN = tf.reduce_sum((1 - y_true) * (1 - tf.round(y_pred))) FP = tf.reduce_sum((1 - y_true) * tf.round(y_pred)) FN = tf.reduce_sum(y_true * (1 - tf.round(y_pred))) precision = TP / (TP + FP) recall = TP / (TP + FN) F1score = 2 * precision * recall / (precision + recall) return F1score def threshold_binary_accuracy(y_true, y_pred): threshold = 0.99998 #threshold = 0.5 return tf.keras.backend.mean( tf.keras.backend.equal( y_true, tf.dtypes.cast(tf.keras.backend.less(threshold, y_pred), y_true.dtype))) def TP(y_true, y_pred): threshold = 0.99998 #threshold = 0.5 y_pred = tf.dtypes.cast(tf.keras.backend.less(threshold, y_pred), y_true.dtype) TP = tf.reduce_sum(y_true * tf.round(y_pred)) return TP def TN(y_true, y_pred): threshold = 0.99998 #threshold = 0.5 y_pred = tf.dtypes.cast(tf.keras.backend.less(threshold, y_pred), y_true.dtype) TN = tf.reduce_sum((1 - y_true) * (1 - tf.round(y_pred))) return TN def FP(y_true, y_pred): threshold = 0.99998 #threshold = 0.5 y_pred = tf.dtypes.cast(tf.keras.backend.less(threshold, y_pred), y_true.dtype) FP = tf.reduce_sum((1 - y_true) * tf.round(y_pred)) return FP def FN(y_true, y_pred): threshold = 0.99998 #threshold = 0.5 y_pred = tf.dtypes.cast(tf.keras.backend.less(threshold, y_pred), y_true.dtype) FN = tf.reduce_sum(y_true * (1 - tf.round(y_pred))) return FN self.keras_train_model.compile( loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=["sparse_categorical_accuracy", metric_F1score]) self.keras_eval_model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=[ "sparse_categorical_accuracy", metric_precision, km.sparse_categorical_precision(), metric_F1score, TP, TN, FP, FN ])
#3 Flattening #Create vector classifier.add(Flatten()) #4 Full connection #hidden layer # 128 needed experiments to decide correct number classifier.add(Dense(output_dim=194, activation='relu')) classifier.add(Dropout(0.5)) classifier.add(Dense(output_dim=4, activation='softmax')) recall = km.binary_recall(label=0) precision = km.binary_precision(label=1) c_precision = km.categorical_precision() sc_precision = km.sparse_categorical_precision() c_precision, 'accuracy', sc_precision, recall, precision #5 compile the CNN classifier.compile( optimizer='Adam', loss='categorical_crossentropy', metrics=[c_precision, 'accuracy', sc_precision, recall, precision]) #Image augmentation. Do this? train_datagen = ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=False)