def kde_entropy(output, var): dims = K.cast(K.shape(output)[1], K.floatx() ) #int(K.shape(output)[1]) N = K.cast(K.shape(output)[0], K.floatx() ) normconst = (dims/2.0)*K.log(2*np.pi*var) # Kernel density estimation of entropy # get dists matrix x2 = K.expand_dims(K.sum(K.square(output), axis=1), 1) #x2 = x2 + K.transpose(x2) #return K.shape(x2) dists = x2 + K.transpose(x2) - 2*K.dot(output, K.transpose(output)) dists = dists / (2*var) #y1 = K.expand_dims(output, 0) #y2 = K.expand_dims(output, 1) #dists = K.sum(K.square(y1-y2), axis=2) / (2*var) normCount = N ## Removes effect of diagonals, i.e. leave-one-out entropy #normCount = N-1 #diagvals = get_diag(10e20*K.ones_like(dists[0,:])) #dists = dists + diagvals lprobs = logsumexp(-dists, axis=1) - K.log(normCount) - normconst h = -K.mean(lprobs) return nats2bits * h # , normconst + (dims/2.0)
def w_categorical_crossentropy(y_true, y_pred, weights): nb_cl = len(weights) final_mask = K.zeros_like(y_pred[:,:, 0]) y_pred_max = K.max(y_pred, axis=-1) y_pred_max = K.expand_dims(y_pred_max, axis=-1) y_pred_max_mat = K.cast(K.equal(y_pred, y_pred_max), K.floatx()) for c_p, c_t in itertools.product(range(nb_cl), range(nb_cl)): final_mask += (weights[c_t, c_p] * y_pred_max_mat[:,:, c_p] * y_true[:,:, c_t]) #return K.mean( K.categorical_crossentropy(y_pred, y_true) * final_mask ) return K.categorical_crossentropy(y_pred, y_true) * final_mask
def accuracy(y_true, y_predicted): y = tf.argmax(y_true, axis=-1) y_ = tf.argmax(y_predicted, axis=-1) mask = tf.greater(y, 0) return K.cast(K.equal(tf.boolean_mask(y, mask), tf.boolean_mask(y_, mask)), K.floatx())
def kde_condentropy(output, var): dims = K.cast(K.shape(output)[1], K.floatx() ) # int(output.get_shape()[1]) # #normconst = (dims/2.0)*K.log(2*np.pi*var) # #return normconst + (dims/2.0) normconst = (dims/2.0)*K.log(2*np.pi*var) return nats2bits * normconst