def mean_accuracy_metric(y_true, y_pred):
        sum_of_accuracy = categorical_accuracy(y_true[0:lenght_of_input],
                                               y_pred[0:lenght_of_input])

        for i in range(1, number_of_inputs):
            start_index = i * lenght_of_input
            end_index = (i + 1) * lenght_of_input
            accuracy = categorical_accuracy(y_true[start_index:end_index],
                                            y_pred[start_index:end_index])
            sum_of_accuracy = sum_of_accuracy[-1] + accuracy[-1]

        return sum_of_accuracy
Ejemplo n.º 2
0
def market_attribute_accuracy(y_true, y_pred):
    # binary categories
    acc = binary_accuracy(y_true[:, binary_1], y_pred[:, binary_1]) * 0.5
    acc += binary_accuracy(y_true[:, binary_2], y_pred[:, binary_2]) * 0.5
    # top colors
    acc_top_color = categorical_accuracy(y_true[:, up_colors],
                                         y_pred[:, up_colors])
    # down colors
    acc_down_color = categorical_accuracy(y_true[:, down_colors],
                                          y_pred[:, down_colors])

    # print(acc, acc_down_color, acc_top_color)
    return acc * 9 / 11 + acc_down_color * 1 / 11 + acc_top_color * 1 / 11
Ejemplo n.º 3
0
    def __init__(self, Nin, Nh_l,
                 Nout):  # Nin은 입력 크기, Nh_I는 은닉층 크기, Nout은 출력층 크기
        self.X_ph = tf.placeholder(tf.float32, shape=(None, Nin))  # 입력 플레이스홀더
        self.L_ph = tf.placeholder(tf.float32,
                                   shape=(None, Nout))  # 레이블 플레이스홀더

        # Modeling
        H = Dense(Nh_l[0], activation='relu')(
            self.X_ph)  # 입력 플레이스홀더를 넣고, 첫 번째 은닉층 크기만큼 반환하는 은닉층1
        H = Dropout(0.5)(H)  # 드롭아웃
        H = Dense(Nh_l[1], activation='relu')(
            H)  # 입력 플레이스홀더를 넣고, 두 번째 은닉층 크기만큼 반환하는 은닉층1
        H = Dropout(0.25)(H)  # 드롭아웃
        self.Y_tf = Dense(Nout, activation='softmax')(H)  # 출력층, 소프트맥스

        # Operation
        self.Loss_tf = tf.reduce_mean(
            categorical_crossentropy(self.L_ph,
                                     self.Y_tf))  # 손실함수는 레이블과 출력 간 크로스엔트로피
        self.Train_tf = tf.train.AdamOptimizer().minimize(
            self.Loss_tf)  # 최적화함수는 에이담
        self.Acc_tf = categorical_accuracy(self.L_ph,
                                           self.Y_tf)  # 정확도 산출은 케라스 함수로
        self.Init_tf = tf.global_variables_initializer(
        )  # 초기화 함수는 텐서플로 글로벌 전역 초기화 함수
Ejemplo n.º 4
0
def kaggle_sliced_accuracy(y_true, y_pred, slice_weights=[1.] * 11):
    question_slices = [
        slice(0, 3),
        slice(3, 5),
        slice(5, 7),
        slice(7, 9),
        slice(9, 13),
        slice(13, 15),
        slice(15, 18),
        slice(18, 25),
        slice(25, 28),
        slice(28, 31),
        slice(31, 37)
    ]

    accuracy_slices = [
        categorical_accuracy(y_true[:, question_slices[i]],
                             y_pred[:, question_slices[i]]) * slice_weights[i]
        for i in range(len(question_slices))
    ]
    accuracy_slices = T.cast(accuracy_slices, 'float32')
    return {
        'sliced_accuracy_mean': T.mean(accuracy_slices),
        'sliced_accuracy_std': T.std(accuracy_slices)
    }
Ejemplo n.º 5
0
def fnc_score(y_true, y_pred):
    "Assumes two outputs: related and stance"
    y_true_related, y_true_stance = tf.split(y_true, 2, axis=1)
    y_pred_related, y_pred_stance = tf.split(y_pred, 2, axis=1)
    print(y_true_related, y_true_stance, y_pred_related, y_pred_stance)

    a1 = categorical_accuracy(y_true_related, y_pred_related)
    a2 = categorical_accuracy(y_true_stance, y_pred_stance)

    print(a1, a2)
    return (
        tf.multiply(a1, 0.25) + 
        tf.multiply(a2, 0.75)
        #tf.multiply(categorical_accuracy(y_true_related, y_pred_related), 0.25) + 
        #tf.multiply(categorical_accuracy(y_true_stance, y_pred_stance), 0.75)
    )
Ejemplo n.º 6
0
    def _generic_accuracy(y_true, y_pred):
        if K.int_shape(y_pred)[1] == 1:
            return binary_accuracy(y_true, y_pred)
        if K.int_shape(y_true)[-1] == 1:
            return sparse_categorical_accuracy(y_true, y_pred)

        return categorical_accuracy(y_true, y_pred)
Ejemplo n.º 7
0
def ctm_acc1(y_true, y_pred):
    # print("ctm_acc1")
    # true = tf.split(y_true, 3, axis=-1)[0]
    pred_list = tf.split(y_pred, 3, axis=-1)
    pred = pred_list[0] + pred_list[1] + 0.1 * pred_list[2]
    # print(categorical_accuracy(true, pred))
    return categorical_accuracy(y_true, pred)
Ejemplo n.º 8
0
def acc_kldiv(y_in,x):
    """
    Corrected accuracy to be used with custom loss_kldiv
    """
    h = y_in[:,0:NBINS]
    y = y_in[:,NBINS:]

    return categorical_accuracy(y, x)
Ejemplo n.º 9
0
def certain_predictions_acc(y_true, y_pred):
    """
    accuracy of predictions that are certain
    """
    c_pred_ind = _certain_ind(y_pred, uncertain=False)

    return categorical_accuracy(K.gather(y_true, c_pred_ind),
                                K.gather(y_pred, c_pred_ind))
Ejemplo n.º 10
0
def trip_accuracy(y_true, y_pred):
    # Seperate embeddings into the triplets
    num_classes = y_pred._keras_shape[-1]

    trip_pred = K.reshape(y_pred, (-1, 3, num_classes))
    trip_labels = K.reshape(y_true, (-1, 3, num_classes))

    # Return correct classification
    return metrics.categorical_accuracy(trip_labels[:, 0], trip_pred[:, 0])
Ejemplo n.º 11
0
def L_acc(y_true, y_pred):
    # Confidence component (aka objectness)

    y_true_confidence = y_true[:, 0]
    y_pred_confidence = K.round(y_pred[:, 0])

    #confidence_accuracy = K.cast(K.equal(y_true_confidence, K.round(y_pred_confidence)), K.floatx())

    # Class component

    y_true_classes = y_true[:, 4:]
    y_pred_classes = y_pred[:, 4:]

    classes_accuracy = categorical_accuracy(y_true_classes, y_pred_classes)

    #y_true_coord = y_true[:, 1:4]
    #y_pred_coord = y_pred[:, 1:4]

    true_xy = y_true[:, 1:3]
    pred_xy = y_pred[:, 1:3]

    true_r = y_true[:, 3:4]
    pred_r = y_pred[:, 3:4]

    # compute IOU, using the top,left,bottom,right representation.
    true_mins = true_xy - true_r
    true_maxes = true_xy + true_r

    pred_mins = pred_xy - pred_r
    pred_maxes = pred_xy + pred_r

    intersect_mins = K.maximum(pred_mins, true_mins)
    intersect_maxes = K.minimum(pred_maxes, true_maxes)
    intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
    intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]

    pred_areas = 4. * pred_r[..., 0] * pred_r[..., 0]  # a square
    true_areas = 4. * true_r[..., 0] * true_r[..., 0]

    union_areas = pred_areas + true_areas - intersect_areas
    iou_scores = intersect_areas / union_areas

    iou_accuracy = K.cast(iou_scores > 0.6, K.floatx())

    #joint_accuracy = confidence_accuracy * classes_accuracy * iou_accuracy

    joint_accuracy = (y_true_confidence * y_pred_confidence * classes_accuracy * iou_accuracy) + \
                     ((1.0 - y_true_confidence) * (1.0 - y_pred_confidence))     # if background, dont care about class or iou
    '''
    joint_accuracy = tf.Print(joint_accuracy, [K.dtype(confidence_accuracy), 
                                               K.dtype(classes_accuracy),
                                               K.dtype(iou_accuracy),
                                               K.dtype(joint_accuracy)
                                               ], message='confidence, class, iou, joint dtype: ', summarize=20)
    '''

    return joint_accuracy
    def adv_acc(y, _):
        # Generate adversarial examples
        x_adv = fgsm(model, y, eps=eps, clip_min=clip_min, clip_max=clip_max)
        # Consider the attack to be constant
        x_adv = K.stop_gradient(x_adv)

        # Accuracy on the adversarial examples
        preds_age, preds_race, preds_gender = model(x_adv)
        return categorical_accuracy(y, preds_race)
Ejemplo n.º 13
0
def test_sparse_categorical_accuracy_correctness():
    y_a = K.variable(np.random.randint(0, 7, (6,)), dtype=K.floatx())
    y_b = K.variable(np.random.random((6, 7)), dtype=K.floatx())
    # use one_hot embedding to convert sparse labels to equivalent dense labels
    y_a_dense_labels = K.cast(K.one_hot(K.cast(y_a, dtype='int32'), num_classes=7),
                              dtype=K.floatx())
    sparse_categorical_acc = metrics.sparse_categorical_accuracy(y_a, y_b)
    categorical_acc = metrics.categorical_accuracy(y_a_dense_labels, y_b)
    assert np.allclose(K.eval(sparse_categorical_acc), K.eval(categorical_acc))
Ejemplo n.º 14
0
def test_sparse_categorical_accuracy_correctness():
    y_a = K.variable(np.random.randint(0, 7, (6,)), dtype=K.floatx())
    y_b = K.variable(np.random.random((6, 7)), dtype=K.floatx())
    # use one_hot embedding to convert sparse labels to equivalent dense labels
    y_a_dense_labels = K.cast(K.one_hot(K.cast(y_a, dtype='int32'), num_classes=7),
                              dtype=K.floatx())
    sparse_categorical_acc = metrics.sparse_categorical_accuracy(y_a, y_b)
    categorical_acc = metrics.categorical_accuracy(y_a_dense_labels, y_b)
    assert np.allclose(K.eval(sparse_categorical_acc), K.eval(categorical_acc))
Ejemplo n.º 15
0
def true_accuracy(y_true, y_pred):
    '''
    Ignore START_OF_SENTENCE and END_OF_SENTENCE when calculating accuracy.
    Also ignore zero paddings.
    '''
    # ignore SOS, EOS
    trimmed_y_true = y_true[:, 1:-1, :]
    trimmed_y_pred = y_pred[:, 1:-1, :]

    return categorical_accuracy(trimmed_y_true, trimmed_y_pred)
 def adv_acc(y, _):
     # Generate adversarial examples
     y_gender = tf.get_default_graph().get_tensor_by_name("gender_target:0")
     x_adv = fgsm(model, y_gender, eps=eps, clip_min=clip_min, clip_max=clip_max)
     # Consider the attack to be constant
     x_adv = K.stop_gradient(x_adv)
     
     # Accuracy on the adversarial examples
     _, preds_age = model(x_adv)
     return categorical_accuracy(y, preds_age)
Ejemplo n.º 17
0
def acc_reg(y_in,x_in):
    """
    Corrected accuracy to be used with custom loss_reg
    """
    h = y_in[:,0:NBINS]
    y = y_in[:,NBINS:]
    hpred = x_in[:,0:NBINS]
    ypred = x_in[:,NBINS:]

    return categorical_accuracy(y, ypred)
Ejemplo n.º 18
0
def accuracy2(args):
    y_pred, y_true = args
    y_pred = K.softmax(y_pred, axis=-1)
    match_board = []
    for i in range(3):
        tmp = tf.ones_like(y_true) * i
        tmp = tf.cast(tf.equal(y_true, tmp), tf.float32)
        match_board.append(tmp)
    y_true_onehot = tf.concat(match_board, axis=1)
    acc = categorical_accuracy(y_true_onehot, y_pred)
    return acc
Ejemplo n.º 19
0
def evaluate(conf):
    _, x_test, _, _, y_test, _, _, _, _, x_test_units, y_test_units = full_data_pipeline(
        conf)
    model = build_model(conf)
    tu_rolls, tu_features = uncombine_features(x_test_units)
    tu_x = []
    tu_x.append(tu_rolls)
    tu_x.extend(tu_features)
    mod_tu_x = []
    for feat in tu_x:
        mod_tu_x.append(np.stack(feat))
    tu_y = np.stack(y_test_units)
    print('Single sample test results : ')
    evaluate_model(conf, model, mod_tu_x, tu_y)
    print('Full track test results : ')
    pianos_s, feats_s = uncombine_features(x_test)
    y_pred = []
    for s in range(len(pianos_s)):
        piano_s = []
        piano_s.append(pianos_s[s])
        feat_s = []
        for ft in feats_s:
            feat_s.append(ft[s])
        x = prediction_data(conf, piano_s, feat_s)
        mod_x = []
        for feat in x:
            mod_x.append(np.stack(feat))
        y = predict_model(conf, model, mod_x)
        y = np.argmax(y, axis=1)
        votes = np.bincount(y)
        y_pred_s = np.zeros((conf['dataset']['num_class']))
        for i in range(y_pred_s.shape[0]):
            if (i < len(votes)):
                y_pred_s[i] = votes[i]
            else:
                y_pred_s[i] = 0
        y_pred_s = y_pred_s / np.sum(y_pred_s)
        y_pred.append(y_pred_s)
    y_test = np.stack(y_test)
    y_pred = np.stack(y_pred)
    y_test = y_test.astype('float32')
    y_pred = y_pred.astype('float32')
    y_true = K.constant(y_test)
    y_pred = K.constant(y_pred)
    loss = K.categorical_crossentropy(target=y_true, output=y_pred)
    loss = K.eval(loss)
    loss = np.mean(loss)
    acc = categorical_accuracy(y_true, y_pred)
    acc = K.eval(acc)
    acc = np.mean(acc)
    print('Test loss:', loss)
    print('Test accuracy:', acc)
def target_acc(y_true, y_pred, targetCols=(59, 62), val=False):
    tars = y_true[:, :, targetCols[0]:targetCols[1]]
    preds = y_pred[:, :, targetCols[0]:targetCols[1]]
    if not val:
        return metrics.categorical_accuracy(tars, preds)
    else:
        #This only works for a single example at a time
        tars = tars[0, 0, :]
        preds = preds[0, 0, :]
        if np.argmax(tars) == np.argmax(preds):
            return 1
        else:
            return 0
Ejemplo n.º 21
0
    def custom_acc(self, y_true, y_pred):
        y_true_shape = (-1, self.grid_size, self.grid_size,
                        self.bbox_params + len(self.classes))
        y_pred_shape = (-1, self.grid_size, self.grid_size,
                        self.bbox_count * self.bbox_params + len(self.classes))

        y_true = tf.reshape(y_true, y_true_shape, name='reshape_y_true')
        y_pred = tf.reshape(y_pred, y_pred_shape, name='reshape_y_pred')

        # shape=(?, 21, 21, 10),
        predicted_class_prob = y_pred[:, :, :, 10:]
        true_class_prob = y_true[:, :, :, 5:]

        return categorical_accuracy(true_class_prob, predicted_class_prob)
Ejemplo n.º 22
0
def accuracy(package_data, p_class):
    from keras.metrics import categorical_accuracy
    g_class = tf.gather(package_data, indices=[0], axis=-1)
    mask = tf.reshape(
        tf.equal(g_class, Btype.NEG) | tf.equal(g_class, Btype.POSITIVE), [-1])
    p_class = tf.boolean_mask(p_class, mask)

    cate = tf.cast(tf.reshape(tf.equal(g_class, Btype.POSITIVE), [-1]),
                   tf.int32)
    cate = tf.boolean_mask(cate, mask)
    cate_one_hot = tf.reshape(tf.one_hot(cate, tf.constant(2, tf.int32)),
                              [-1, 2])

    return categorical_accuracy(cate_one_hot, tf.reshape(p_class, [-1, 2]))
Ejemplo n.º 23
0
def padded_categorical_accuracy(y_true, y_pred):
    """Accuracy of a batch, ignoring padding.

    >>> sh = [1, 3, 3] # 1 batch size x 3 time steps x 3 categories
    >>> true = tf.constant([0., 0., 1., 0., 0., 1., 1., 0., 0.], shape=sh)
    <[2, 2, 0 (padding)]>
    >>> pred = tf.constant([0, 0.7, 0.3, 0, 0.3, 0.7, 0.5, 0.3, 0.2], shape=sh)
    <[2, 1, 0)]>
    >>> padded_categorical_accuracy(true, pred).eval()) # => 0.5
    """
    padded = tf.squeeze(tf.slice(y_true, [0, 0, 0], [-1, -1, 1]), axis=2)
    mask = K.equal(padded, 0.)
    return categorical_accuracy(tf.boolean_mask(y_true, mask),
                                tf.boolean_mask(y_pred, mask))
Ejemplo n.º 24
0
def model_eval(x, y, model, X_test, Y_test, back='th'):
    """
    Compute the accuracy of a TF model on some data
    :param x: input placeholder
    :param y: output placeholder (for labels)
    :param model: model output predictions
    :param X_test: numpy array with training inputs
    :param Y_test: numpy array with training outputs
    :return: a float with the accuracy value
    """
    # Define sympbolic for accuracy
    input_shape = (None, FLAGS.img_rows, FLAGS.img_cols)
    acc_value = categorical_accuracy(y, model)
    acc_value = K.function([x, y, K.learning_phase()], [acc_value])

    # Init result var
    accuracy = 0.0

    # Compute number of batches
    nb_batches = int(math.ceil(float(len(X_test)) / FLAGS.batch_size))
    assert nb_batches * FLAGS.batch_size >= len(X_test)

    for batch in range(nb_batches):
        if batch % 100 == 0 and batch > 0:
            print("Batch " + str(batch))

        # Must not use the `batch_indices` function here, because it
        # repeats some examples.
        # It's acceptable to repeat during training, but not eval.
        start = batch * FLAGS.batch_size
        end = min(len(X_test), start + FLAGS.batch_size)
        cur_batch_size = end - start + 1

        # The last batch may be smaller than all others, so we need to
        # account for variable batch size here
        if back == 'tf':
            accuracy += cur_batch_size * acc_value.eval(feed_dict={
                x: X_test[start:end],
                y: Y_test[start:end]
            })
        elif back == 'th':
            accuracy += cur_batch_size * acc_value(
                [X_test[start:end], Y_test[start:end], 0])[0]
    assert end >= len(X_test)

    # Divide by number of examples to get final value
    accuracy /= len(X_test)

    return accuracy
Ejemplo n.º 25
0
 def accuracy(self, x, t):
     x_ph = tf.placeholder(tf.float32, shape=[None, self.image_size])
     t_ph = tf.placeholder(tf.float32, shape=[None, self.n_classes])
     y_op = self._encode_y_given_x(x_ph)
     acc_value = categorical_accuracy(t_ph, y_op)
     with tf.Session() as sess:
         if self.filepath:
             self.saver.restore(sess, self.filepath)
         result = self.sess.run(acc_value,
                                feed_dict={
                                    x_ph: x,
                                    t_ph: t,
                                    K.learning_phase(): 1
                                })
     return result
Ejemplo n.º 26
0
def evaluate_ensemble(Best=True):
    '''
    loads and evaluates an ensemle from the models in the model folder.
    '''
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_test = X_test.reshape(10000, 784)
    X_test = X_test.astype('float32')
    X_test /= 255
    Y_test = np_utils.to_categorical(y_test, 10)

    model_dirs = []
    for i in os.listdir('weights'):
        if '.h5' in i:
            if not Best:
                model_dirs.append(i)
            else:
                if 'Best' in i:
                    model_dirs.append(i)

    preds = []
    model = create_model()
    for mfile in model_dirs:
        print(os.path.join('weights', mfile))
        model.load_weights(os.path.join('weights', mfile))
        yPreds = model.predict(X_test, batch_size=128, verbose=0)
        preds.append(yPreds)

    weighted_predictions = np.zeros((X_test.shape[0], 10), dtype='float64')
    weight = 1. / len(preds)
    for prediction in preds:
        weighted_predictions += weight * prediction
    y_pred = weighted_predictions

    print(type(Y_test))
    print(type(y_pred))
    Y_test = tf.convert_to_tensor(Y_test)
    y_pred = tf.convert_to_tensor(y_pred)
    print(type(Y_test))
    print(type(y_pred))

    loss = metrics.categorical_crossentropy(Y_test, y_pred)
    acc = metrics.categorical_accuracy(Y_test, y_pred)
    sess = tf.Session()
    print('--------------------------------------')
    print('ensemble')
    print('Test loss:', loss.eval(session=sess))
    print('error:', str((1. - acc.eval(session=sess)) * 100) + '%')
    print('--------------------------------------')
 def __init__(self, Nin, Nh_l, Nout):
     self.X_ph = tf.placeholder(tf.float32, shape=(None, Nin))
     self.L_ph = tf.placeholder(tf.float32, shape=(None, Nout))
     
     # Modeling
     H = Dense(Nh_l[0], activation='relu')(self.X_ph)
     H = Dropout(0.5)(H)
     H = Dense(Nh_l[1], activation='relu')(H) 
     H = Dropout(0.25)(H)
     self.Y_tf = Dense(Nout, activation='softmax')(H)
     
     # Operation
     self.Loss_tf = tf.reduce_mean(
         categorical_crossentropy(self.L_ph, self.Y_tf))
     self.Train_tf = tf.train.AdamOptimizer().minimize(self.Loss_tf)
     self.Acc_tf = categorical_accuracy(self.L_ph, self.Y_tf)
     self.Init_tf = tf.global_variables_initializer()
Ejemplo n.º 28
0
def L_acc(y_true, y_pred):
    # Localization accuracy: a match is registered only if class is correctly identified with IOU > 0.6

    # Confidence component (aka objectness)

    y_true_confidence = y_true[:, 0]
    y_pred_confidence = K.round(K.sigmoid(y_pred[:, 0]))

    # Class component

    y_true_classes = y_true[:, 4:]
    y_pred_classes = K.softmax(y_pred[:, 4:])

    classes_accuracy = categorical_accuracy(y_true_classes, y_pred_classes)

    true_xy = y_true[:, 1:3]
    pred_xy = K.sigmoid(y_pred[:, 1:3])

    true_r = y_true[:, 3:4]
    pred_r = K.exp(y_pred[:, 3:4])

    # compute IOU, using the top,left,bottom,right representation.
    true_mins = true_xy - true_r
    true_maxes = true_xy + true_r

    pred_mins = pred_xy - pred_r
    pred_maxes = pred_xy + pred_r

    intersect_mins = K.maximum(pred_mins, true_mins)
    intersect_maxes = K.minimum(pred_maxes, true_maxes)
    intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
    intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]

    pred_areas = 4. * pred_r[..., 0] * pred_r[..., 0]  # a square
    true_areas = 4. * true_r[..., 0] * true_r[..., 0]

    union_areas = pred_areas + true_areas - intersect_areas
    iou_scores = intersect_areas / union_areas

    iou_accuracy = K.cast(iou_scores > 0.6, K.floatx())


    joint_accuracy = (y_true_confidence * y_pred_confidence * classes_accuracy * iou_accuracy) + \
                     ((1.0 - y_true_confidence) * (1.0 - y_pred_confidence))     # if background, dont care about class or iou

    return joint_accuracy
Ejemplo n.º 29
0
def test_model(preds, in_images, test_files, chunk_size=64, shuffle=True):
    """Test the model"""
    import tensorflow as tf
    from keras import backend as K
    from keras.objectives import binary_crossentropy 
    import numpy as np
    from keras.metrics import categorical_accuracy
    from tqdm import tqdm
    
    in_labels = tf.placeholder(tf.float32, shape=(None, 2))
    
    cross_entropy = tf.reduce_mean(binary_crossentropy(in_labels, preds))
    accuracy = tf.reduce_mean(categorical_accuracy(in_labels, preds))
    auc = tf.metrics.auc(tf.cast(in_labels, tf.bool), preds)
   
    n_test_events = count_events(test_files)
    chunk_num = int(n_test_events/chunk_size)+1
    preds_all = []
    label_all = []
    
    sess = tf.get_default_session()
    sess.run(tf.local_variables_initializer())
    
    avg_accuracy = 0
    avg_auc = 0
    avg_test_loss = 0
    is_training = tf.get_default_graph().get_tensor_by_name('is_training:0')
    for img_chunk, label_chunk, real_chunk_size in tqdm(chunks(test_files, chunk_size, shuffle=shuffle),total=chunk_num):
        test_loss, accuracy_result, auc_result, preds_result = sess.run([cross_entropy, accuracy, auc, preds],
                        feed_dict={in_images: img_chunk,
                                   in_labels: label_chunk,
                                   K.learning_phase(): 0,
                                   is_training: False})
        avg_test_loss += test_loss * real_chunk_size / n_test_events
        avg_accuracy += accuracy_result * real_chunk_size / n_test_events
        avg_auc += auc_result[0]  * real_chunk_size / n_test_events 
        preds_all.extend(preds_result)
        label_all.extend(label_chunk)
    
    print("test_loss = ", "{:.3f}".format(avg_test_loss))
    print("Test Accuracy:", "{:.3f}".format(avg_accuracy), ", Area under ROC curve:", "{:.3f}".format(avg_auc))
    
    return avg_test_loss, avg_accuracy, avg_auc, np.asarray(preds_all).reshape(n_test_events,2), np.asarray(label_all).reshape(n_test_events,2)
Ejemplo n.º 30
0
def recall(y_true, y_pred, num_aspect_ratios, num_classes):
    '''
    Out of all the default boxes that are not background, how many does the model get right
    Parameters same as loss_with_negative_mining
    :param y_true: 
    :param y_pred: 
    :param num_aspect_ratios: 
    :param num_classes: 
    :return: 
    '''

    zero = tf.constant(0, dtype=tf.int32)
    #     print "y_true", y_true, "y_pred", y_pred
    # remove extra zero padding
    y_true = tf.slice(y_true,
                      begin=[0, 0, 0, 0],
                      size=[-1, -1, -1, num_aspect_ratios])

    y_pred_shape = tf.shape(y_pred)
    y_pred = tf.reshape(y_pred,
                        shape=[
                            y_pred_shape[0], y_pred_shape[1], y_pred_shape[2],
                            num_aspect_ratios, num_classes
                        ])
    #     print "After reshape and slicing: y_true", y_true, "y_pred", y_pred
    y_pred = tf.reshape(y_pred, shape=[-1, num_classes])
    y_true = tf.reshape(y_true, shape=[-1])

    y_true = tf.cast(y_true, tf.int32)

    pos_indices = tf.squeeze(tf.where(tf.not_equal(y_true, zero)))
    print(pos_indices)
    y_true_pos = tf.gather(y_true, pos_indices)
    y_pred_pos = tf.gather(y_pred, pos_indices)  #y_pred[pos_indices]

    y_true_pos_one_hot = tf.one_hot(y_true_pos, depth=num_classes)

    return categorical_accuracy(y_true_pos_one_hot, y_pred_pos)
Ejemplo n.º 31
0
 def train(self):
     self.classifier = self.create_model()
     (x_train, y_train), (x_test, y_test) = mnist.load_data()
     x_train = x_train.reshape(-1, 28, 28, 1)
     x_test = x_test.reshape(-1, 28, 28, 1)
     y_train = to_categorical(y_train, num_classes=10)
     y_test = to_categorical(y_test, num_classes=10)
     callbacks = [
         EarlyStopping(monitor='val_loss', min_delta=1e-6, patience=10)
     ]
     self.classifier.fit(x=x_train,
                         y=y_train,
                         batch_size=100,
                         epochs=50,
                         validation_split=0.1,
                         callbacks=callbacks)
     self.classifier.save('classifier.h5')
     predictions = self.classifier.predict(x_test)
     y_pred = K.constant(predictions)
     y_true = K.constant(y_test)
     accuracy = np.mean(K.eval(categorical_accuracy(y_true, y_pred)))
     print(accuracy)
     return None
Ejemplo n.º 32
0
    net = getattr(keras_helpers, args.network)()
    data_shape = [224, 224, 3]
elif args.network == "GoogLeNet":
    net = getattr(keras_helpers, args.network)()
    data_shape = [224, 224, 3]
else:
    sys.exit("Unknown Network")

fake_data = np.random.rand(args.train_batch, data_shape[0], data_shape[1], data_shape[2])
tmp_fake_labels = np.random.randint(0, high=1000, size=args.train_batch)
fake_labels = np.zeros([args.train_batch, 1000])
for i in range(args.train_batch):
    fake_labels[i, tmp_fake_labels[i]] = 1

loss = categorical_crossentropy(net.y_, net.y)
top1 = categorical_accuracy(net.y_, net.y)
top5 = top_k_categorical_accuracy(net.y_, net.y, 5)

base_lr = 0.02
step = tf.Variable(0, trainable=False, name="Step")
learning_rate = tf.train.exponential_decay(base_lr, step, 1, 0.999964)

weight_list = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name[-3:] == "W:0"]
bias_list = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name[-3:] == "b:0"]

optimizer1 = tf.train.MomentumOptimizer(learning_rate, 0.9)
optimizer2 = tf.train.MomentumOptimizer(tf.scalar_mul(2.0, learning_rate), 0.9)
grads = optimizer1.compute_gradients(loss, var_list=weight_list+bias_list)
w_grads = grads[:len(weight_list)]
b_grads = grads[len(weight_list):]
Ejemplo n.º 33
0
def evaluate_cate(category_size, attribute_size, y_true, y_pred):
    return categorical_accuracy(y_true[:, :category_size], y_pred[:, :category_size])