def conv_model(input_shape, num_classes, kernel_size=(3, 3), pool_size=(2, 2)):
    model = Sequential()

    model.add(Conv2D(32, kernel_size, padding='same', input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(Dropout(0.25))
    model.add(Conv2D(64, kernel_size, padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512, kernel_regularizer=regularizers.l2(0.01)))
    model.add(Activation('relu'))
    model.add(Dropout(0.25))
    model.add(Dense(num_classes, kernel_regularizer=regularizers.l2(0.01)))
    model.add(Dropout(0.25))
    model.add(Activation('softmax'))

    top2 = lambda x, y: top_k_categorical_accuracy(x, y, k=2)
    top3 = lambda x, y: top_k_categorical_accuracy(x, y, k=3)
    top4 = lambda x, y: top_k_categorical_accuracy(x, y, k=4)

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', top2, top3, top4])
    return model
Example #2
0
def test_top_k_categorical_accuracy():
    y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
    y_true = K.variable(np.array([[0, 1, 0], [1, 0, 0]]))
    success_result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred,
                                                               k=3))
    assert success_result == 1
    partial_result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred,
                                                               k=2))
    assert partial_result == 0.5
    failure_result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred,
                                                               k=1))
    assert failure_result == 0
Example #3
0
def test_top_k_categorical_accuracy():
    y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
    y_true = K.variable(np.array([[0, 1, 0], [1, 0, 0]]))
    success_result = K.eval(
        metrics.top_k_categorical_accuracy(y_true, y_pred, k=3))
    assert success_result == 1
    partial_result = K.eval(
        metrics.top_k_categorical_accuracy(y_true, y_pred, k=2))
    assert partial_result == 0.5
    failure_result = K.eval(
        metrics.top_k_categorical_accuracy(y_true, y_pred, k=1))
    assert failure_result == 0
Example #4
0
 def test_top_k_categorical_accuracy(self):
     with self.cached_session():
         y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
         y_true = K.variable(np.array([[0, 1, 0], [1, 0, 0]]))
         result = K.eval(
             metrics.top_k_categorical_accuracy(y_true, y_pred, k=3))
         self.assertEqual(np.mean(result), 1)
         result = K.eval(
             metrics.top_k_categorical_accuracy(y_true, y_pred, k=2))
         self.assertEqual(np.mean(result), 0.5)
         result = K.eval(
             metrics.top_k_categorical_accuracy(y_true, y_pred, k=1))
         self.assertEqual(np.mean(result), 0.)
 def _mrr_metric(self, y_true, y_pred):
     mrr = 0
     current_percentage = 0
     for i in range(1, 26, 1):
         if i == 1:
             mrr = metrics.top_k_categorical_accuracy(y_true, y_pred, k=i)
             current_percentage = metrics.top_k_categorical_accuracy(y_true,
                                                                     y_pred,
                                                                     k=i)
         else:
             t = metrics.top_k_categorical_accuracy(y_true, y_pred, k=i)
             mrr += (t - current_percentage) * (1 / i)
             current_percentage = t
     return mrr
def evaluate_model(model,
                   image_gen,
                   classes,
                   output_metrics,
                   output_image,
                   top_n=None,
                   beta=1.):
    # Iterate all batches for testing
    for batch_num in range(len(image_gen)):
        logger.info('Processing batch %d of %d' % (batch_num, len(image_gen)))
        cur_x, cur_y = image_gen.next()
        cur_y_pred = model.predict(cur_x)
        if batch_num == 0:
            y_actual = cur_y
            y_pred = cur_y_pred
        else:
            y_actual = np.concatenate((y_actual, cur_y))
            y_pred = np.concatenate((y_pred, cur_y_pred))
    y_v = np.argmax(y_actual, axis=1)
    y_p_v = np.argmax(y_pred, axis=1)
    metrics = {}
    if output_image:
        logger.info('Writing confusion matrix to %s' % output_image)
        plot_confusion_matrix(confusion_matrix(y_v, y_p_v), classes,
                              output_image)
    if output_metrics:
        logger.info('Writing metrics to %s' % output_metrics)
        precision, recall, fscore, support = precision_recall_fscore_support(
            y_v, y_p_v, beta=beta)
        metrics = {
            'Precision': precision,
            'Recall': recall,
            'F-Score': fscore,
            'Support': support
        }
        if top_n:
            top_n_value = K.get_value(
                top_k_categorical_accuracy(y_actual, y_pred, k=top_n))
            metrics['Top_{}'.format(top_n)] = float(top_n_value)

            logger.info('Precision: {}, Recall: {}, F-Score: {}, Support: {}, Top_{}: {}'\
                .format(precision, recall, fscore, support, top_n, top_n_value))
        else:
            logger.info(
                'Precision: {}, Recall: {}, F-Score: {}, Support: {}'.format(
                    precision, recall, fscore, support))
        os.makedirs(os.path.dirname(output_metrics), exist_ok=True)
        with open(output_metrics, 'w', encoding='utf-8') as fp:
            if top_n:
                fp.write(
                    'Precision, Recall, F_Score, Top_{}, Support\n'.format(
                        top_n))
                fp.write('{}, {}, {}, {}, {}\n'.format(precision, recall,
                                                       fscore, top_n_value,
                                                       support))
            else:
                fp.write('Precision, Recall, F_Score, Support\n')
                fp.write('{}, {}, {}, {}\n'.format(precision, recall, fscore,
                                                   support))
    return metrics, y_actual, y_pred
def acc_top2(y_true, y_pred):
    """
    :param y_true: 真实值
    :param y_pred: 训练值
    :return: # 计算top-k正确率,当预测值的前k个值中存在目标类别即认为预测正确
    """
    return top_k_categorical_accuracy(y_true, y_pred, k=2)
Example #8
0
def top_3_categorical_accuracy(y_true, y_pred):
    return top_k_categorical_accuracy(K.reshape(y_true,
                                                shape=(-1,
                                                       K.shape(y_true)[-1])),
                                      K.reshape(y_pred,
                                                shape=(-1,
                                                       K.shape(y_pred)[-1])),
                                      k=3)
Example #9
0
def top_5_accuracy(y_true, y_pred):
    """ Calculates top-5 accuracy of the predictions. To be used as evaluation metric in model.compile().

    Arguments:
        y_true -- array-like, true labels
        y_pred -- array-like, predicted labels

    Returns:
        top-5 accuracy
    """
    return top_k_categorical_accuracy(y_true, y_pred, k=5)
Example #10
0
def test_top_k_categorical_accuracy():
    # 2d tests - shape: (batch_size, number_of_categories)
    y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
    y_true = K.variable(np.array([[0, 1, 0], [1, 0, 0]]))
    success_result = K.eval(
        metrics.top_k_categorical_accuracy(y_true, y_pred, k=3))
    assert success_result == 1
    partial_result = K.eval(
        metrics.top_k_categorical_accuracy(y_true, y_pred, k=2))
    assert partial_result == 0.5
    failure_result = K.eval(
        metrics.top_k_categorical_accuracy(y_true, y_pred, k=1))
    assert failure_result == 0

    # 3d tests - example shape: (batch_size, sequence_size, number_of_categories)
    y_pred = K.variable(
        np.array([[[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]],
                  [[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]]))
    y_true = K.variable(
        np.array([[[0, 1, 0], [1, 0, 0]], [[0, 1, 0], [1, 0, 0]]]))
    success_result = K.eval(
        metrics.top_k_categorical_accuracy(y_true, y_pred, k=3))
    assert success_result == 1
    partial_result = K.eval(
        metrics.top_k_categorical_accuracy(y_true, y_pred, k=2))
    assert partial_result == 0.5
    failure_result = K.eval(
        metrics.top_k_categorical_accuracy(y_true, y_pred, k=1))
    assert failure_result == 0
def lstm_model(input_shape_conv, time_distributed_input_shape, num_classes, kernel_size=(3, 3), pool_size=(2, 2),
               mode=None):
    model = Sequential()

    model.add(TimeDistributed(Conv2D(32, kernel_size, padding='same', input_shape=input_shape_conv),
                              input_shape=time_distributed_input_shape))
    model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(Dropout(0.25)))
    model.add(TimeDistributed(Conv2D(64, kernel_size, padding='same')))
    model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(MaxPooling2D(pool_size=pool_size)))
    model.add(TimeDistributed(Dropout(0.25)))
    model.add(TimeDistributed(Flatten()))
    model.add(TimeDistributed(Dense(512, kernel_regularizer=regularizers.l2(0.01))))
    model.add(TimeDistributed(Activation('relu')))

    if mode == None:
        model.add(LSTM(512, return_sequences=False))
    elif mode == 'max':
        model.add(LSTM(512, return_sequences=True))
        model.add(GlobalMaxPooling1D())
    else:
        model.add(LSTM(512, return_sequences=True))
        model.add(GlobalAveragePooling1D())

    model.add(Dense(512, kernel_regularizer=regularizers.l2(0.01)))
    model.add(Activation('relu'))
    model.add(Dropout(0.25))
    model.add(Dense(num_classes, kernel_regularizer=regularizers.l2(0.01)))
    model.add(Dropout(0.25))
    model.add(Activation('softmax'))

    top2 = lambda x, y: top_k_categorical_accuracy(x, y, k=2)
    top3 = lambda x, y: top_k_categorical_accuracy(x, y, k=3)
    top4 = lambda x, y: top_k_categorical_accuracy(x, y, k=4)

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', top2, top3, top4])
    return model
Example #12
0
def _generator_test():
    # Single thread generator test
    generator = sample_generator('../Imagenet5W')

    data, anno = generator.next()
    for i in range(500):
        _data, _anno = generator.next()
        data = np.vstack((data, _data))
        anno = np.vstack((anno, _anno))

    res50 = Net("ResNet50")
    predict_result = res50.full_inference(data)
    print("Kr: %s" % K.get_session().run(top_k_categorical_accuracy(anno, predict_result, k=5)))
    print("My: %s" % top_k_acc(anno, predict_result, 5))
Example #13
0
def _multithread_parser_test():
    samplenames = [item.split('.')[0] for item in os.listdir('../Imagenet5W/Data/CLS-LOC/val/')]

    pool = Pool(processes=10)

    data_anno = pool.map(sample_parser, samplenames)

    data = np.vstack([item[0] for item in data_anno])
    anno = np.vstack([item[1] for item in data_anno])

    res50 = Net("ResNet50")
    predict_result = res50.full_inference(data)
    print("Kr: %s" % K.get_session().run(top_k_categorical_accuracy(anno, predict_result, k=5)))
    print("My: %s" % top_k_acc(anno, predict_result, 5))
        def top_accuracy(true_word_indices, image_vectors):
            l2 = lambda x, axis: K.sqrt(
                K.sum(K.square(x), axis=axis, keepdims=True))
            l2norm = lambda x, axis: x / l2(x, axis)

            l2_words = l2norm(ALL_word_embeds, axis=1)
            l2_images = l2norm(image_vectors, axis=1)

            tiled_words = K.tile(K.expand_dims(l2_words, axis=1), (1, 200, 1))
            tiled_images = K.tile(K.expand_dims(l2_images, axis=1), (1, 20, 1))

            diff = K.squeeze(l2(l2_words - l2_images, axis=2))

            # slice_top3 = lambda x: x[:, 0:3]
            # slice_top1 = lambda x: x[:, 0:1]

            diff_top5 = metrics.top_k_categorical_accuracy(tiled_images, diff)
            return diff_top5
Example #15
0
def top_5_acc(y_true, y_pred):
    return met.top_k_categorical_accuracy(y_true,
                                          tf.cast(y_pred, dtype='float32'),
                                          k=5)
Example #16
0
def top_3_acc(y_true, y_pred):
    """returns proportion of time that the actual was in top 3 predicted"""
    return top_k_categorical_accuracy(y_true, y_pred, k=3)
Example #17
0
 def top_2_categorical_accuracy(self, y_true, y_pred):
     return top_k_categorical_accuracy(y_true, y_pred, k=2)
Example #18
0
def top_3_accuracy(x,y): return top_k_categorical_accuracy(x,y, 3)
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
Example #19
0
def acc_top2(y_true, y_pred):
    return top_k_categorical_accuracy(y_true, y_pred, k=1)
Example #20
0
    data_shape = [224, 224, 3]
elif args.network == "GoogLeNet":
    net = getattr(keras_helpers, args.network)()
    data_shape = [224, 224, 3]
else:
    sys.exit("Unknown Network")

fake_data = np.random.rand(args.train_batch, data_shape[0], data_shape[1], data_shape[2])
tmp_fake_labels = np.random.randint(0, high=1000, size=args.train_batch)
fake_labels = np.zeros([args.train_batch, 1000])
for i in range(args.train_batch):
    fake_labels[i, tmp_fake_labels[i]] = 1

loss = categorical_crossentropy(net.y_, net.y)
top1 = categorical_accuracy(net.y_, net.y)
top5 = top_k_categorical_accuracy(net.y_, net.y, 5)

base_lr = 0.02
step = tf.Variable(0, trainable=False, name="Step")
learning_rate = tf.train.exponential_decay(base_lr, step, 1, 0.999964)

weight_list = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name[-3:] == "W:0"]
bias_list = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name[-3:] == "b:0"]

optimizer1 = tf.train.MomentumOptimizer(learning_rate, 0.9)
optimizer2 = tf.train.MomentumOptimizer(tf.scalar_mul(2.0, learning_rate), 0.9)
grads = optimizer1.compute_gradients(loss, var_list=weight_list+bias_list)
w_grads = grads[:len(weight_list)]
b_grads = grads[len(weight_list):]

train1 = optimizer1.apply_gradients(w_grads, global_step=step)
Example #21
0
          callbacks=[checkpoint],
          validation_data=([sentence_test, countries_test, devices_test], labels_test))

# evaluate the results one last time
model.load_weights(WEIGHTS_PATH)

# scores = model.evaluate(x=[sentence_test, countries_test, devices_test], y=labels_test, batch_size=BATCHSIZE, verbose=1)
#
# print("Final result :")
# for name, score in zip(model.metrics_names, scores):
#     print(name, score)

predictions = model.predict(x=[sentence_test, countries_test, devices_test], batch_size=BATCHSIZE, verbose=1)

k = 5
top_accuracy = top_k_categorical_accuracy(labels_test, predictions, k=k)
top_accuracy = K.get_session().run(top_accuracy)

print()
print("Top %d Accuracy : " % k, top_accuracy)
print()

ground_labels = decode_predictions(labels_test)
label_decoder = get_class_decoder()

results = []

for pred in predictions:
    top_k_preds_indices = pred.argsort()[-k:][::-1]
    result = [(label_decoder.classes_[i], pred[i]) for i in top_k_preds_indices]
    result.sort(key=lambda x: x[-1], reverse=True)
def top_k_metric(y_true, y_pred, **kwargs):
    kk = kwargs.get('k', 3)
    return {
        'top_%d_cat_acc' % kk:
        kmetrics.top_k_categorical_accuracy(y_true, y_pred, k=kk)
    }
Example #23
0
def ctm_acck(y_true, y_pred):
    pred_list = tf.split(y_pred, 5, axis=-1)
    pred = pred_list[0] + pred_list[1] + 0.1 * pred_list[2] + pred_list[
        3] + 0.1 * pred_list[4]
    return top_k_categorical_accuracy(y_true, pred, k=3)
Example #24
0
def top_3_accuracy(x,y): return top_k_categorical_accuracy(x,y,3)

from sklearn.metrics import accuracy_score
def top_3_accuracy(y, z):
    return top_k_categorical_accuracy(y, z, k=3)
Example #26
0
def top5(y1, y2):
    return metrics.top_k_categorical_accuracy(y1, y2, k=5)
Example #27
0
def top_3_categorical_accuracy(y_true, y_pred):
    """A metric function that is used to judge the top-3 performance of our model.
    """
    return top_k_categorical_accuracy(y_true, y_pred, k=3)
Example #28
0
def top_5_accuracy(y_true, y_pred):
    return metrics.top_k_categorical_accuracy(y_true, y_pred, k=5)
Example #29
0
def calculate_test_accuracies(network_name, test_data, one_file, write_to_file, is_LSTM, segment_size=15):
    with open(get_speaker_pickle(test_data), 'rb') as f:
        (X, y, s_list) = pickle.load(f)

    if one_file:
        model = load_model(get_experiment_nets(network_name + '.h5'))
    else:
        json_file = open(get_experiment_nets('cnn_speaker02.json'), 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        model = model_from_json(loaded_model_json)
        # load weights into new model
        model.load_weights(get_experiment_nets('cnn_speaker.02h5'))

    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy', 'categorical_accuracy', ])

    print("Data extraction...")
    X_test, y_test = dg.generate_test_data(X, y, segment_size)
    n_classes = np.amax(y_test) + 1

    print("Data extraction done!")
    if is_LSTM:
        X_test = X_test.reshape(X_test.shape[0], X_test.shape[3], X_test.shape[2])

    print("Test output...")
    im_model = Model(input=model.input, output=model.layers[2].output)
    data_out = im_model.predict(X_test, batch_size=128)
    print(data_out)
    da = np.asarray(data_out)
    np.savetxt("foo.csv", da, delimiter=",")
    with open(test_data + "cluster_out_01", 'wb') as f:
        pickle.dump((da, y, s_list), f, -1)

    output = model.predict(X_test, batch_size=128, verbose=1)
    y_t = np_utils.to_categorical(y_test, n_classes)
    eva = model.evaluate(X_test, y_t, batch_size=128, verbose=2)
    k_nearest2 = K.eval(metrics.top_k_categorical_accuracy(tf.stack(y_t), tf.stack(output), k=2))
    k_nearest3 = K.eval(metrics.top_k_categorical_accuracy(tf.stack(y_t), tf.stack(output), k=3))
    k_nearest5 = K.eval(metrics.top_k_categorical_accuracy(tf.stack(y_t), tf.stack(output), k=5))
    k_nearest10 = K.eval(metrics.top_k_categorical_accuracy(tf.stack(y_t), tf.stack(output), k=10))

    print(output.shape)
    output_sum = np.zeros((n_classes, n_classes))
    output_geom = np.zeros((n_classes, n_classes))
    y_pred_max = np.zeros(n_classes)
    y_pred_median = np.zeros(n_classes)
    for i in range(n_classes):
        indices = np.where(y_test == i)[0]
        speaker_output = np.take(output, indices, axis=0)
        max_val = 0
        for o in speaker_output:
            output_sum[i] = np.add(output_sum[i], o)
            output_geom[i] = np.multiply(output_geom[i], o)

            if np.max(o) > max_val:
                max_val = np.max(o)
                y_pred_max[i] = np.argmax(o)
        output_geom[i] = np.power(output_geom[i], 1 / len(speaker_output))

    y_pred_mean = np.zeros(n_classes)
    y_pred_geom = np.zeros(n_classes)
    for i in range(len(output_sum)):
        y_pred_mean[i] = np.argmax(output_sum[i])
        y_pred_geom[i] = np.argmax(output_sum[i])

    y_correct = np.arange(n_classes)

    print("geometric wrong")
    for j in range(len(y_correct)):
        if y_correct[j] != y_pred_geom[j]:
            print("Speaker: " + str(y_correct[j]) + ", Pred: " + str(y_pred_geom[j]))
            ind = np.argpartition(output_sum[j], -5)[-5:]
            print(np.argmax(output_sum[j]))
            print(ind[np.argsort(output_sum[j][ind])])

    print("mean wrong")
    for j in range(len(y_correct)):
        if y_correct[j] != y_pred_mean[j]:
            print("Speaker: " + str(y_correct[j]) + ", Pred: " + str(y_pred_mean[j]))
            ind = np.argpartition(output_sum[j], -5)[-5:]
            print(np.argmax(output_sum[j]))
            print(ind[np.argsort(output_sum[j][ind])])

    print(model.metrics_names)
    print(eva)
    print("Acc: %.4f" % eva[2])
    print("k2: %.4f" % k_nearest2)
    print("k3: %.4f" % k_nearest3)
    print("k5: %.4f" % k_nearest5)
    print("k10: %.4f" % k_nearest10)
    print("Accuracy (Max.): %.4f" % accuracy_score(y_correct, y_pred_max))
    print("Accuracy (Mean): %.4f" % accuracy_score(y_correct, y_pred_mean))
    print("Accuracy (Geom): %.4f" % accuracy_score(y_correct, y_pred_geom))
    if write_to_file == True:
        with open(get_experiment_logs('test_scores.txt'), 'ab') as f:
            f.write('---------- ' + network_name + '---------------\n')
            f.write("Accuracy: %.4f \n" % eva[2])
            f.write("Accuracy (Max.): %.4f \n" % accuracy_score(y_correct, y_pred_max))
            f.write("Accuracy (Mean): %.4f \n" % accuracy_score(y_correct, y_pred_mean))
            f.write("Accuracy (Geom): %.4f \n" % accuracy_score(y_correct, y_pred_geom))
            f.write("K2: {:.4}, K5: {:.4}, K10: {:.4} \n\n".format(k_nearest2, k_nearest5, k_nearest10))
Example #30
0
def top_3_accuracy(x, y):
    return top_k_categorical_accuracy(x, y, 3)
Example #31
0
def top_5_accuracy(y_true, y_pred):
    return top_k_categorical_accuracy(y_true, y_pred, 5)
def top_3_categ_acc(y_pred,y_true):
    return metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
Example #33
0
 def top_2_accuracy(in_gt, in_pred):
     return top_k_categorical_accuracy(in_gt, in_pred, k=2)
Example #34
0
def top_3_accuracy(y_true, y_pred):
    return top_k_categorical_accuracy(y_true, y_pred, k=3)
Example #35
0
def evaluate_attr(category_size, attribute_size, y_true, y_pred):
    return top_k_categorical_accuracy(y_true[:, category_size:], y_pred[:, category_size:])