def update_state(self, y_true, y_pred, sample_weight=None): y_pred = tf.round(y_pred) y_pred = tf.cast(y_pred, tf.bool) y_true = tf.cast(y_true, tf.bool) values_tn = tf.logical_and(tf.equal(y_true, False), tf.equal(y_pred, False)) values_tn = tf.cast(values_tn, self.dtype) values_fp = tf.logical_and(tf.equal(y_true, False), tf.equal(y_pred, True)) values_fp = tf.cast(values_fp, self.dtype) values_fn = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, False)) values_fn = tf.cast(values_fn, self.dtype) values_tp = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True)) values_tp = tf.cast(values_tp, self.dtype) if sample_weight is not None: sample_weight = tf.cast(sample_weight, self.dtype) sample_weight_tn = tf.broadcast_weights(sample_weight, values_tn) sample_weight_fp = tf.broadcast_weights(sample_weight, values_fp) sample_weight_fn = tf.broadcast_weights(sample_weight, values_fn) sample_weight_tp = tf.broadcast_weights(sample_weight, values_tp) values_tn = tf.multiply(values_tn, sample_weight_tn) values_fp = tf.multiply(values_fp, sample_weight_fp) values_fn = tf.multiply(values_fn, sample_weight_fn) values_tp = tf.multiply(values_tp, sample_weight_tp) self.tn.assign_add(tf.reduce_sum(values_tn)) self.fp.assign_add(tf.reduce_sum(values_fp)) self.fn.assign_add(tf.reduce_sum(values_fn)) self.tp.assign_add(tf.reduce_sum(values_tp))
def correlation_thresh(y_true, y_pred, thresh=2.58, sample_weight=None): y_true = tf.cast(y_true, tf.float32) y_pred = tf.cast(y_pred, tf.float32) #### THRESHOLD DATA #### data_intersect = tf.cast(tf.math.greater(y_true, thresh), dtype=tf.float32) * y_pred mask_intersect = tf.cast(data_intersect, dtype=tf.bool) y_true = tf.boolean_mask(y_true, mask_intersect) y_pred = tf.boolean_mask(y_pred, mask_intersect) mean_ytrue = tf.reduce_mean(y_true, keepdims=True) mean_ypred = tf.reduce_mean(y_pred, keepdims=True) demean_ytrue = y_true - mean_ytrue demean_ypred = y_pred - mean_ypred if sample_weight is not None: sample_weight = tf.broadcast_weights(sample_weight, y_true) std_y = tf.sqrt( tf.reduce_sum(sample_weight * tf.square(demean_ytrue)) * tf.reduce_sum(sample_weight * tf.square(demean_ypred))) correlation = tf.reduce_sum( sample_weight * demean_ytrue * demean_ypred) / std_y else: std_y = tf.sqrt( tf.reduce_sum(tf.square(demean_ytrue)) * tf.reduce_sum(tf.square(demean_ypred))) correlation = tf.reduce_sum(demean_ytrue * demean_ypred) / std_y #tf.print("correlation:", correlation,output_stream=sys.stdout) return tf.maximum(tf.minimum(correlation, 1.0), -1.0)
def update_state(self, y_true, y_pred, sample_weight=None): ''' Arguments: y_true The actual y. Passed by default to Metric classes. y_pred The predicted y. Passed by default to Metric classes. ''' # Compute the number of negatives. y_true = tf.cast(y_true, tf.bool) actual_positives = tf.reduce_sum( tf.cast(tf.equal(y_true, True), self.dtype)) self.actual_positives.assign_add(actual_positives) # Compute the number of false positives. y_pred = tf.greater_equal( y_pred, 0.5 ) # Using default threshold of 0.5 to call a prediction as positive labeled. predicted_positives = tf.reduce_sum( tf.cast(tf.equal(y_pred, True), self.dtype)) self.predicted_positives.assign_add(predicted_positives) true_positive_values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True)) true_positive_values = tf.cast(true_positive_values, self.dtype) if sample_weight is not None: sample_weight = tf.cast(sample_weight, self.dtype) sample_weight = tf.broadcast_weights(sample_weight, values) values = tf.multiply(true_positive_values, sample_weight) true_positives = tf.reduce_sum(true_positive_values) self.true_positives.assign_add(true_positives)
def correlation(y_true, y_pred, sample_weight=None): #### GET RID OF ZEROS #### y_true = tf.cast(y_true, tf.float32) y_pred = tf.cast(y_pred, tf.float32) data_intersect = y_true * y_pred mask_intersect = tf.cast(data_intersect, dtype=tf.bool) y_true = tf.boolean_mask(y_true, mask_intersect) y_pred = tf.boolean_mask(y_pred, mask_intersect) mean_ytrue = tf.reduce_mean(y_true, keepdims=True) mean_ypred = tf.reduce_mean(y_pred, keepdims=True) demean_ytrue = y_true - mean_ytrue demean_ypred = y_pred - mean_ypred if sample_weight is not None: sample_weight = tf.broadcast_weights(sample_weight, y_true) std_y = tf.sqrt( tf.reduce_sum(sample_weight * tf.square(demean_ytrue)) * tf.reduce_sum(sample_weight * tf.square(demean_ypred))) correlation = tf.reduce_sum( sample_weight * demean_ytrue * demean_ypred) / std_y else: std_y = tf.sqrt( tf.reduce_sum(tf.square(demean_ytrue)) * tf.reduce_sum(tf.square(demean_ypred))) correlation = tf.reduce_sum(demean_ytrue * demean_ypred) / std_y return tf.maximum(tf.minimum(correlation, 1.0), -1.0)
def update_state(self, y_true, y_pred, sample_weight = None): y_true = tf.cast(y_true[:, 0], tf.bool) y_pred = tf.math.greater_equal(y_pred[:, 0], self.thresholds) values = tf.logical_and(tf.equal(y_true, False), tf.equal(y_pred, True)) values = tf.cast(values, self.dtype) if sample_weight is not None: sample_weight = tf.cast(sample_weight, self.dtype) sample_weight = tf.broadcast_weights(sample_weight, values) values = tf.math.multiply(values, sample_weight) self.false_positives.assign_add(tf.reduce_sum(values))
def update_state(self, y_true, y_pred, sample_weight=None): """Update confusion matrix after epoch""" y_true = tf.argmax(y_true, axis=1) y_true = tf.cast(y_true, tf.int32) y_pred = tf.argmax(y_pred, axis=1) y_pred = tf.cast(y_pred, tf.int32) conf_mat = tf.math.confusion_matrix(y_true, y_pred, num_classes=self.num_classes, dtype=tf.float32) if sample_weight: sample_weight = tf.cast(sample_weight, self.dtype) sample_weight = tf.broadcast_weights(sample_weight, conf_mat) conf_mat = tf.multiply(conf_mat, sample_weight) self.conf_mat.assign_add(conf_mat)
def calculate_correlation(y_true, y_pred, sample_weight=None): assert len(y_true.shape) == 5 mean_ytrue = tf.reduce_mean(y_true, keepdims=True, axis=[1, 2, 3, 4]) mean_ypred = tf.reduce_mean(y_pred, keepdims=True, axis=[1, 2, 3, 4]) demean_ytrue = y_true - mean_ytrue demean_ypred = y_pred - mean_ypred if sample_weight is not None: sample_weight = tf.broadcast_weights(sample_weight, y_true) std_y = tf.sqrt( tf.reduce_sum(sample_weight * tf.square(demean_ytrue)) * tf.reduce_sum(sample_weight * tf.square(demean_ypred))) correlation = tf.reduce_sum( sample_weight * demean_ytrue * demean_ypred) / std_y else: std_y = tf.sqrt( tf.reduce_sum(tf.square(demean_ytrue)) * tf.reduce_sum(tf.square(demean_ypred))) correlation = tf.reduce_sum(demean_ytrue * demean_ypred) / std_y return tf.maximum(tf.minimum(correlation, 1.0), -1.0)
def correlation_gm(y_true, y_pred, sample_weight=None): sz = K.ndim(y_true) gm = nibabel.load( '/data/Templates/Yeo2011_17Networks_2mm_LiberalMask_64.nii.gz' ).get_fdata() if K.eval(sz) == 5: gm = np.expand_dims(gm, axis=[0, -1]) gm = tf.cast(gm, tf.bool) #### GM Mask #### y_true = tf.boolean_mask(y_true, gm) y_pred = tf.boolean_mask(y_pred, gm) mean_ytrue = tf.reduce_mean(y_true, keepdims=True) mean_ypred = tf.reduce_mean(y_pred, keepdims=True) demean_ytrue = y_true - mean_ytrue demean_ypred = y_pred - mean_ypred if sample_weight is not None: sample_weight = tf.broadcast_weights(sample_weight, y_true) std_y = tf.sqrt( tf.reduce_sum(sample_weight * tf.square(demean_ytrue)) * tf.reduce_sum(sample_weight * tf.square(demean_ypred))) correlation = tf.reduce_sum( sample_weight * demean_ytrue * demean_ypred) / std_y else: std_y = tf.sqrt( tf.reduce_sum(tf.square(demean_ytrue)) * tf.reduce_sum(tf.square(demean_ypred))) correlation = tf.reduce_sum(demean_ytrue * demean_ypred) / std_y return tf.maximum(tf.minimum(correlation, 1.0), -1.0)