示例#1
0
 def accfun(y0, y1):
     x_pos = K.ones_like(x_r)
     x_neg = K.zeros_like(x_r)
     loss_r = K.mean(binary_accuracy(x_pos, x_r))
     loss_f = K.mean(binary_accuracy(x_neg, x_f))
     loss_p = K.mean(binary_accuracy(x_neg, x_p))
     return (1.0 / 3.0) * (loss_r + loss_p + loss_f)
示例#2
0
def symmetric_accuracy(y_true, y_pred):
    idx = tf.where(tf.not_equal(y_true, -1.0))
    not_y_true = tf.gather(y_true, K.constant([1, 0], dtype=np.int32), axis=-1)
    y_true = tf.gather_nd(y_true, idx)
    y_pred = tf.gather_nd(y_pred, idx)
    not_y_true = tf.gather_nd(not_y_true, idx)
    return K.maximum(metrics.binary_accuracy(y_true, y_pred),
                     metrics.binary_accuracy(not_y_true, y_pred))
示例#3
0
def market_attribute_accuracy(y_true, y_pred):
    # binary categories
    acc = binary_accuracy(y_true[:, binary_1], y_pred[:, binary_1]) * 0.5
    acc += binary_accuracy(y_true[:, binary_2], y_pred[:, binary_2]) * 0.5
    # top colors
    acc_top_color = categorical_accuracy(y_true[:, up_colors],
                                         y_pred[:, up_colors])
    # down colors
    acc_down_color = categorical_accuracy(y_true[:, down_colors],
                                          y_pred[:, down_colors])

    # print(acc, acc_down_color, acc_top_color)
    return acc * 9 / 11 + acc_down_color * 1 / 11 + acc_top_color * 1 / 11
示例#4
0
    def _generic_accuracy(y_true, y_pred):
        if K.int_shape(y_pred)[1] == 1:
            return binary_accuracy(y_true, y_pred)
        if K.int_shape(y_true)[-1] == 1:
            return sparse_categorical_accuracy(y_true, y_pred)

        return categorical_accuracy(y_true, y_pred)
def validation(sess, model, x, y):
    y_val = tf.placeholder(tf.float32,
                           shape=(None, 1),
                           name='validation_labels')
    X_val = tf.placeholder(tf.float32,
                           shape=(None, batch_shape[1]),
                           name='validation_inputs')

    pred, _ = model.get_pred_and_emb(X_val)
    accuracy = tf.reduce_mean(binary_accuracy(y_val, pred))

    total_batch = int(x.shape[0] / batch_shape[0])
    avg_val_acc = 0
    x, y = shuffle(x, y)
    for i in range(total_batch):
        offset = (i * batch_shape[0]) % (y.shape[0] - batch_shape[0])
        # Generate a minibatch.
        batch_data = x[offset:offset + batch_shape[0]]
        batch_labels = y[offset:offset + batch_shape[0]]
        fd = {
            X_val: batch_data,
            y_val: batch_labels,
            K.backend.learning_phase(): 0
        }
        acc_val = sess.run(accuracy, feed_dict=fd)
        avg_val_acc += acc_val / total_batch

    print("Average accuracy on validation is {:.3f}".format(avg_val_acc))
    return avg_val_acc
def my_binary_accuracy(y_true, y_pred):
    #     print("my_binary_accuracy")
    #     print(f"y_true:{y_true}, y_pred:{y_pred}")

    y_true_, y_pred_ = tarnsform_metrics(y_true, y_pred)
    #     print(f"y_true_:{y_true_}, y_pred_:{y_pred_}")

    accuracy = binary_accuracy(y_true_, y_pred_)
    return accuracy
示例#7
0
def treatment_accuracy(concat_true, concat_pred):
    """
    Returns keras' binary_accuracy between treatment and prediction of propensity.

    Args:
        - concat_true (tf.tensor): tensor of true samples, with shape (n_samples, 2)
                                   Each row in concat_true is comprised of (y, treatment)
        - concat_pred (tf.tensor): tensor of predictions, with shape (n_samples, 4)
                                   Each row in concat_pred is comprised of (y0, y1, propensity, epsilon)
    Returns:
        - (float): binary accuracy
    """
    t_true = concat_true[:, 1]
    t_pred = concat_pred[:, 2]
    return binary_accuracy(t_true, t_pred)
def metric_with_cilin(y_true, y_pred):
  # loss for skip-gram model
  y_true_ctx = y_true[:, 0]
  y_pred_ctx = y_pred[:, 0]
  accuracy_sg = binary_accuracy(y_true_ctx, y_pred_ctx)
  # loss for cilin
  y_true_cilin = y_true[:, 1]
  y_pred_cilin = y_pred[:, 1]
  # obtain indexes of elements with value equal to 0
  nonzero_count = tf.count_nonzero(y_true_cilin)
  accuracy_cilin = tf.cond(tf.greater(nonzero_count, 0),
            lambda: absolute_accuracy_cilin(y_true_cilin, y_pred_cilin, nonzero_count),
            lambda: zero_loss())
  weights = tf.constant([0.5, 0.5]) 
  accuracy = tf.reduce_sum(tf.multiply(weights, tf.stack([accuracy_sg, accuracy_cilin])))
  return accuracy
示例#9
0
def my_accuracy(y_true, y_pred):
    mask_shape = tf.shape(y_pred)
    y_pred = K.reshape(y_pred,
                       (-1, mask_shape[2], mask_shape[3], mask_shape[4]))
    mask_shape = tf.shape(y_true)
    y_true = K.reshape(y_true,
                       (-1, mask_shape[2], mask_shape[3], mask_shape[4]))

    sm = tf.reduce_sum(y_true, [1, 2, 3])

    ix = tf.where(sm > 0)[:, 0]

    y_true = tf.gather(y_true, ix)
    y_pred = tf.gather(y_pred, ix)

    return binary_accuracy(y_true, y_pred)
示例#10
0
def _do_test(model, xt, yt):
    if isinstance(model, Bagging):
        yt2 = np.argmax(yt, axis=1)
        res_rounded = model.predict_on_batch(xt, yt2)
        res = res_rounded
    else:
        res = model.predict(xt)
        # res = np.argmax(res, axis=1)
        res_rounded = np.round(res, decimals=0).astype(int)

    cat_res = np_utils.to_categorical(res_rounded)
    fscore = metrics.fscore(yt, res_rounded)
    prec = K.eval(
        K.mean(binary_accuracy(K.variable(yt), K.variable(res_rounded))))

    return fscore, prec, res_rounded, res
def test(sess, model):
    X_test = np.load(
        os.path.join(config.dataset_base_dir,
                     "X_test_{}.npz".format(config.dataset_name)))['arr_0']
    y_test = np.load(
        os.path.join(config.dataset_base_dir,
                     "y_test_{}.npz".format(config.dataset_name)))['arr_0']
    y_test = y_test.reshape(y_test.shape[0], 1)

    # Padding
    if not padding_post:
        X_test = K.preprocessing.sequence.pad_sequences(X_test,
                                                        maxlen=batch_shape[1])
    else:
        X_test = K.preprocessing.sequence.pad_sequences(X_test,
                                                        maxlen=batch_shape[1],
                                                        padding='post')

    y = tf.placeholder(tf.float32, shape=(None, 1), name='test_labels')
    X = tf.placeholder(tf.float32,
                       shape=(None, batch_shape[1]),
                       name='test_input')

    pred, _ = model.get_pred_and_emb(X)
    accuracy = tf.reduce_mean(binary_accuracy(y, pred))

    test_accuracies = list()
    total_batch = int(X_test.shape[0] / batch_shape[0])
    X_test, y_test = shuffle(X_test, y_test)
    avg_test_acc = 0
    for i in range(total_batch):
        offset = (i * batch_shape[0]) % (y_test.shape[0] - batch_shape[0])
        # Generate a minibatch.
        batch_data = X_test[offset:offset + batch_shape[0]]
        batch_labels = y_test[offset:offset + batch_shape[0]]
        # test mode
        fd = {X: batch_data, y: batch_labels, K.backend.learning_phase(): 0}
        acc_test = sess.run(accuracy, feed_dict=fd)
        test_accuracies.append(acc_test)
        avg_test_acc += acc_test / total_batch

    np.savez_compressed(
        'Plot/{}_test_accuracies_adv_{}_vadv_{}_decay_{}_clip_{}.npz'.format(
            config.dataset_name, adversarial, virt_adversarial, decay,
            clipping), test_accuracies)
    print("\nAverage accuracy on test set  is {:.3f}".format(avg_test_acc))
示例#12
0
def compute_detection_score(input, file_segments, model, generator, steps, type):
  #Predict and average of all segments for each utterance
  #Save on a file the score of each utterance and the respective label
  #This file name 'LA_dev_score.txt'
  #Define two list: label contais the true value, detection_score contains the score
  f=open(file_segments, 'r')
  f1=open('risultati'+type+'.txt', 'w', encoding='utf8')
  label=[]
  y=[]
  rows=f.readlines()
  start=0
  score_bonafide=[]
  score_spoof=[]
  
  predicted=model.predict(x=generator, steps=steps, verbose=1, max_queue_size=10)  
  print(predicted.shape)
  for row in rows:
    fields=row.split('\t')
    N=int(fields[0])
    y_pred=np.sum(predicted[start: start+N])/N
    start=start+(N)
    
  #Compute log-probability of bonafide class and save results on a file txt
    prob=1-y_pred
    log_prob=np.log10(prob)
    label.append(int(fields[1]))
    f1.write(str(y_pred)+'\t'+str(fields[1])+'\n')
    y.append(y_pred)
    if int(fields[1])==0:
      score_bonafide.append(np.copy(log_prob))
    else: 
      score_spoof.append(np.copy(log_prob))
    
  label=np.array(label)
  y=np.array(y)
  print(len(score_bonafide))
  print(len(score_spoof))
  score_bonafide=np.array(score_bonafide)
  score_spoof=np.array(score_spoof)

  print('Accuracy: ', np.array(metrics.binary_accuracy(label, y)))
  f.close()
  f1.close()
  return score_bonafide, score_spoof
示例#13
0
def predict_and_eval(model, X_train, y_train, X_test, y_test, threshold = None):
    logging.info("Performing prediction on train and test for evaluation ...")
    y_pred_train = model.predict(X_train, batch_size=batch_size, verbose=1)
    y_pred_test = model.predict(X_test, batch_size=batch_size, verbose=1)

    eval_types = ['Train', 'Test']

    logs = {}

    for e, eval_type in enumerate(eval_types):
        # print "[%s]" % eval_type

        metric_prefix = '' if e == 0 else 'val_'
        X_eval = X_train if e == 0 else X_test
        y_eval = y_train if e == 0 else y_test
        y_pred_eval = y_pred_train if e == 0 else y_pred_test

        # threshold = 0.48
        # y_pred_eval = (0.5 - threshold) + y_pred_eval

        y_eval = y_eval.astype(float)

        if threshold != None:
            y_eval = K.clip((0.5 - threshold) + y_eval, 0., 1.)

        logs[metric_prefix + 'loss']        = metrics.binary_crossentropy(y_eval, y_pred_eval).eval()
        logs[metric_prefix + 'acc']         = metrics.binary_accuracy(y_eval, y_pred_eval).eval()
        logs[metric_prefix + 'precision']   = metrics.precision(y_eval, y_pred_eval).eval()
        logs[metric_prefix + 'recall']      = metrics.recall(y_eval, y_pred_eval).eval()
        logs[metric_prefix + 'fbeta_score'] = metrics.fmeasure(y_eval, y_pred_eval).eval()

        # log_file.write("%d,%.5f,%s,%.5f\n" % (epoch, threshold, eval_type, average_faux_jaccard_similarity))
        # print "%d,%.5f,%s,%.4f" % (epoch, threshold, eval_type, average_faux_jaccard_similarity)

    metrics_line = ''
    for s in ['loss', 'acc', 'precision', 'recall', 'fbeta_score']:
        metrics_line += "%s: %.5f %s: %.5f - " %(s, logs[s], 'val_'+s, logs['val_' +s])

    logging.info(metrics_line)
    return logs
def batch_pairwise_metrics(y_true, y_pred):
    #assert K.get_variable_shape(y_true)[1] == K.get_variable_shape(y_pred)[1]
    num_classes = K.get_variable_shape(y_pred)[1]
    preds_cats = K.argmax(y_pred, axis=1)
    preds_one_hot = K.one_hot(preds_cats, num_classes)

    overall_precision = [None for _ in range(num_classes)]
    overall_recall = [None for _ in range(num_classes)]
    overall_fmeasure = [None for _ in range(num_classes)]

    out_dict = {}
    for cc in range(num_classes):
        #Metrics should take 1D arrays which are 1 for positive, 0 for negative
        two_true, two_pred = y_true[:, cc], preds_one_hot[:, cc]
        cur_dict = {
            'precision/%02d' % cc:
            kmetrics.precision(two_true, two_pred),
            'recall/%02d' % cc:
            kmetrics.recall(two_true, two_pred),
            'fmeasure/%02d' % cc:
            kmetrics.fmeasure(two_true, two_pred),
            'binary_accuracy/%02d' % cc:
            kmetrics.binary_accuracy(two_true, two_pred),
            'act_pos/%02d' % cc:
            K.sum(two_true),
            'pred_pos/%02d' % cc:
            K.sum(two_pred)
        }
        out_dict.update(cur_dict)

        overall_precision[cc] = cur_dict['precision/%02d' % cc]
        overall_recall[cc] = cur_dict['recall/%02d' % cc]
        overall_fmeasure[cc] = cur_dict['fmeasure/%02d' % cc]

    out_dict.update(make_stats('precision', overall_precision))
    out_dict.update(make_stats('recall', overall_recall))
    out_dict.update(make_stats('fmeasure', overall_fmeasure))

    return out_dict
示例#15
0
    def update(self, batch):
        x, y = batch
        y = np.array(y)
        y_pred = None

        if self.model_type == 'nn':
            self.train_loss, self.train_acc = self.model.train_on_batch(x, y)
            y_pred = self.model.predict_on_batch(x).reshape(-1)
            self.train_auc = roc_auc_score(y, y_pred)

        if self.model_type == 'ngrams':
            x = vectorize_select_from_data(x, self.vectorizers, self.selectors)
            self.model.fit(x, y.reshape(-1))
            y_pred = np.array(self.model.predict_proba(x)[:, 1]).reshape(-1)
            y_pred_tensor = K.constant(y_pred, dtype='float64')
            self.train_loss = K.eval(
                binary_crossentropy(y.astype('float'), y_pred_tensor))
            self.train_acc = K.eval(
                binary_accuracy(y.astype('float'), y_pred_tensor))
            self.train_auc = roc_auc_score(y, y_pred)
        self.updates += 1
        return y_pred
示例#16
0
    def _test(self, model=None, return_metrics=False):
        if model is None:
            model = self.model

        test_pred = model.predict(self.xt)
        round_test_pred = np.round(test_pred, decimals=0).astype(int)

        test_fscore = metrics.fscore(self.yt, round_test_pred)
        test_prec = K.eval(
            K.mean(
                binary_accuracy(K.variable(self.yt),
                                K.variable(round_test_pred))))

        if test_fscore > self.best_score:
            self.best_score = test_fscore
            self.best_model = keras_deep_copy_model(model)

        if return_metrics == False:
            self.iteration_test_fscore = test_fscore
            self.iteration_test_prec = test_prec
            self.test_history_list.append(test_fscore)
        else:
            return test_fscore, test_prec
示例#17
0
 def accfun(y0, y1):
     x_pos = K.ones_like(x_p)
     loss_p = K.mean(binary_accuracy(x_pos, x_p))
     loss_f = K.mean(binary_accuracy(x_pos, x_f))
     return 0.5 * (loss_p + loss_f)
def c_binary_accuracy(y_truth, y_pred):
    return metrics.binary_accuracy(y_truth[..., 0], y_pred[..., 0])
示例#19
0
def classifier_accuracy(y_true, y_pred):
    return metrics.binary_accuracy(y_true[..., GT_INDEX.IS_ELLIPSE], y_pred[..., GT_INDEX.IS_ELLIPSE])
示例#20
0
def mask_accuracy(y_true, y_pred):
    idx = tf.where(tf.not_equal(y_true, -1.0))
    y_true = tf.gather_nd(y_true, idx)
    y_pred = tf.gather_nd(y_pred, idx)
    return metrics.binary_accuracy(y_true, y_pred)
示例#21
0
 def accuracy(x, x_decoded):
     acc = metrics.binary_accuracy(x, x_decoded)
     return K.mean(acc)
示例#22
0
    test_x = np.array([years, months, days, hours,
                       minutes])  #.reshape((rows, 5))
    test_x = np.transpose(test_x)
    """
    old stuff - sklearn
    """
    # accuracy = model.score(test_x, test_y)

    model = load_model("{}.hdf5".format(args.input))
    y_pred = model.predict(test_x)

    if backend:
        y_true = cast_to_floatx(y_true)
        y_pred = cast_to_floatx(y_pred)

        accuracy = binary_accuracy(y_true, y_pred)
        accuracy = mean(accuracy)
        value = batch_get_value([accuracy])  # compute the accuracy
        value = value[0]
    else:
        with tf.Session() as sess:
            y_true = tf.cast(y_true, tf.float32)
            y_pred = tf.cast(y_pred, tf.float32)

            accuracy = binary_accuracy(y_true, y_pred)
            accuracy = tf.reduce_mean(accuracy)
            value = sess.run(accuracy)  # compute the accuracy

    # log the accuracy using stdout
    decimals = 100
    percent = np.math.floor(value * 100 * decimals) / decimals
示例#23
0
 def custom_acc(self, y_true, y_pred):
     return binary_accuracy(K.round(y_true), K.round(y_pred))
示例#24
0
def invasion_acc(y_true, y_pred):
    binary_truth = y_true[:, -2] + y_true[:, -1]
    binary_pred = y_pred[:, -2] + y_pred[:, -1]
    return binary_accuracy(binary_truth, binary_pred)
示例#25
0
def accuracy(y_true, y_pred):
    return binary_accuracy(y_true, y_pred)
conv3 = Conv2D(128, (3, 3), padding='valid', activation='relu')(pool1)
pool2 = MaxPool2D((2, 2), padding='valid', strides=2)(conv3)
conv5 = Conv2D(256, (3, 3), padding='valid', activation='relu')(pool2)
up1 = UpSampling2D((2, 2))(conv5)
up2 = UpSampling2D((2, 2))(up1)
norm1 = BatchNormalization()(up2)
final = Conv2D(1, (1, 1), activation='softmax')(norm1)
model3 = Model(inputs=inputs, outputs=final)

model3.compile(loss='binary_crossentropy',
               optimizer=Adam(lr=1e-4),
               metrics=['accuracy'])
callbacks_list = [
    EarlyStopping(monitor='val_acc', patience=1),
    ModelCheckpoint(filepath='/global/scratch/cgroschner/encoder3000train.h5',
                    monitor='val_acc',
                    save_best_only=True)
]

model3.fit(trainX,
           trainY,
           batch_size=20,
           epochs=5,
           verbose=1,
           shuffle=True,
           callbacks=callbacks_list,
           validation_data=(testX, testY))
model.save('/global/scratch/cgroschner/encoder3000train.h5')
predY = model.predict(testX, batch_size=10, verbose=1)
print(metrics.binary_accuracy(testY, predY))
示例#27
0
def ia_acc(y_true, y_pred):
    binary_truth = y_true[:, -1]
    binary_pred = y_pred[:, -1]
    return binary_accuracy(binary_truth, binary_pred)
示例#28
0
def binary_error(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:
    return 1.0 - binary_accuracy(y_true, y_pred)
def binaryAccuracy(y_true, y_pred):
    return binary_accuracy(y_true, K.sigmoid(y_pred))
示例#30
0
def sigm_binary_accuracy(y_true, y_pred):
    return binary_accuracy(y_true, tf.math.sigmoid(y_pred))  #tf.math.sigmoid(y_pred)