def top_k_categorical_accuracy(y_true, y_pred, k=5):
    """Categorical accuracy metric for top-k accuracy.

    Computes the top-k categorical accuracy rate, i.e. success when the
    target class is within the top-k predictions provided.
    """
    return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k))
예제 #2
0
def sequence_top_k_categorical_accuracy(y_true, y_pred, k=5):
    original_shape = K.shape(y_true)
    y_true = K.reshape(y_true, (-1, K.shape(y_true)[-1]))
    y_pred = K.reshape(y_pred, (-1, K.shape(y_pred)[-1]))
    top_k = K.cast(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k), K.floatx())

    return K.reshape(top_k, original_shape[:-1])
예제 #3
0
def sparse_temporal_top_k_categorical_accuracy_per_sequence(
        y_true, y_pred, k=5):
    original_shape = K.shape(y_true)
    y_true = K.reshape(y_true, (-1, K.shape(y_true)[-1]))
    y_pred = K.reshape(y_pred, (-1, K.shape(y_pred)[-1]))
    top_k = K.in_top_k(y_pred, K.cast(K.max(y_true, axis=-1), 'int32'), k)
    perfect = K.min(K.cast(top_k, 'int32'), axis=-1)
    return perfect  #K.expand_dims(perfect, axis=-1)
예제 #4
0
def top_k_categorical_accuracy(y_true, y_pred, k=5):
    """
    :param y_true:
    :param y_pred:
    :param k:
    :return:
    Calculates the top-k categorical accuracy rate, i.e. success when the
    target class is within the top-k predictions provided.
    """
    return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k))
예제 #5
0
def top_k_categorical_accuracy(y_true, y_pred, k=5):
    """
    Description:
        Calculates the top-k categorical accuracy rate - success when the
        target class is within the top-k predictions provided.
    Args:
        y_true (np.ndarray): ground truth class labels
        y_pred (np.ndarray): predicted class labels
        k (int): upper bound for number of predictions that targets are predicted to be in
    Returns:
        top_k_categorical_accuracy (float)
    """

    return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k))
예제 #6
0
def top_5_acc(y_true, y_pred):
    return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), 5), axis=-1)
예제 #7
0
def top_k_acc(y_true, y_pred, k=1):
    return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k), axis=-1)
예제 #8
0
	def top_k_accuracy(y_true, y_pred):
		return K.mean(K.in_top_k(K.cast(y_pred, 'float32'), K.argmax(y_true, axis=-1), k), axis=-1)
예제 #9
0
def accuracy1(y_true, y_pred):
    y_true = K.cast(y_true, y_pred.dtype)
    y_true = K.argmax(y_true)
    res = K.in_top_k(y_pred, y_true, 1)
    return res
예제 #10
0
def top50acc(y_true, y_pred, k=50):
    return K.mean(K.in_top_k(y_pred, K.cast(K.max(y_true, axis=-1), 'int32'), k), axis=-1)
예제 #11
0
def apricot_plus_lite(model,
                      model_name,
                      get_trained_weights,
                      x_train_val,
                      y_train_val,
                      x_val,
                      y_val,
                      x_test,
                      y_test,
                      adjustment_strategy,
                      activation='binary',
                      ver=1,
                      dataset='cifar10',
                      max_count=1,
                      loop_count=100000,
                      random_seed=42):
    weights_dir = os.path.join(WEIGHTS_DIR, 'CNN')
    weights_dir = os.path.join(weights_dir, model_name)
    weights_dir = os.path.join(weights_dir, '{}'.format(ver))

    # create the dir
    if not os.path.isdir(weights_dir):
        os.makedirs(weights_dir)

    if get_trained_weights:
        model.load_weights(os.path.join(weights_dir, 'trained.h5'))

    weights_after_dir = os.path.join(
        weights_dir, 'fixed_{}_{}.h5'.format(adjustment_strategy, activation))

    if not os.path.exists(weights_after_dir):
        model.save_weights(weights_after_dir)

    datagen = ImageDataGenerator(horizontal_flip=True,
                                 width_shift_range=0.125,
                                 height_shift_range=0.125,
                                 fill_mode='constant',
                                 cval=0.)

    datagen.fit(x_train_val)

    # build the fixed model.
    if dataset == 'cifar10':
        img_rows, img_cols = 32, 32
        img_channels = 3
        num_classes = 10
        top_k = 1
    elif dataset == 'cifar100':
        img_rows, img_cols = 32, 32
        img_channels = 3
        num_classes = 100
        top_k = 5
    else:
        pass

    input_tensor = Input(shape=(img_rows, img_cols, img_channels))

    if model_name == 'resnet20':
        fixed_model = build_resnet(img_rows,
                                   img_cols,
                                   img_channels,
                                   num_classes=num_classes,
                                   stack_n=3,
                                   k=top_k)
    elif model_name == 'resnet32':
        fixed_model = build_resnet(img_rows,
                                   img_cols,
                                   img_channels,
                                   num_classes=num_classes,
                                   stack_n=5,
                                   k=top_k)
    elif model_name == 'mobilenet':
        fixed_model = build_mobilenet(input_tensor,
                                      num_classses=num_classes,
                                      k=top_k)
    elif model_name == 'mobilenet_v2':
        fixed_model = build_mobilenet_v2(input_tensor,
                                         num_classses=num_classes,
                                         k=top_k)
    elif model_name == 'densenet':
        fixed_model = build_densenet(input_tensor,
                                     num_classses=num_classes,
                                     k=top_k)

    fixed_model.load_weights(os.path.join(weights_dir, 'trained.h5'))
    # fixed_model = copy.deepcopy(model)

    # evaluate the acc before fixing.
    print('----------origin model----------')
    if dataset in ['cifar100', 'imagenet']:
        _, acc_top_1_train, train_acc = fixed_model.evaluate(
            x_train_val, y_train_val)
        print(
            '[==log==] training acc. before fixing: top-1: {:.4f}, top-5: {:.4f}'
            .format(acc_top_1_train, train_acc))
        _, acc_top_1_val, origin_acc = fixed_model.evaluate(x_val, y_val)
        print(
            '[==log==] validation acc. before fixing: top-1: {:.4f}, top-5: {:.4f}'
            .format(acc_top_1_val, origin_acc))
        _, acc_top_1_test, test_acc = fixed_model.evaluate(x_test, y_test)
        print(
            '[==log==] test acc. before fixing: top-1: {:.4f}, top-5: {:.4f}'.
            format(acc_top_1_test, test_acc))
        logger(weights_dir, '========================')
        logger(
            weights_dir, 'model: {}, adjustment strategy: {}, ver: {}'.format(
                model_name, adjustment_strategy, ver))
        logger(
            weights_dir,
            'TOP-1: train acc.: {:4f}, val acc.: {:4f}, test acc.: {:4f}'.
            format(acc_top_1_train, acc_top_1_val, acc_top_1_test))
        logger(
            weights_dir,
            'TOP-5: train acc.: {:4f}, val acc.: {:4f}, test acc.: {:4f}'.
            format(train_acc, origin_acc, test_acc))

    else:
        _, origin_acc = fixed_model.evaluate(x_val, y_val)
        print('----------origin model----------')
        _, train_acc = fixed_model.evaluate(x_train_val, y_train_val)
        print(
            '[==log==] training acc. before fixing: {:.4f}'.format(train_acc))
        print('[==log==] validation acc. before fixing: {:.4f}'.format(
            origin_acc))
        _, test_acc = fixed_model.evaluate(x_test, y_test)
        print('[==log==] test acc. before fixing: {:.4f}'.format(test_acc))
        logger(weights_dir, '========================')
        logger(
            weights_dir, 'model: {}, adjustment strategy: {}, ver: {}'.format(
                model_name, adjustment_strategy, ver))
        logger(
            weights_dir,
            'train acc.: {:4f}, val acc.: {:4f}, test acc.: {:4f}'.format(
                train_acc, origin_acc, test_acc))

    # start time
    start_time = datetime.now()

    # start fixing
    best_weights = fixed_model.get_weights()
    best_acc = origin_acc

    # find all indices of xs that original model fails on them.
    y_preds = model.predict(x_train_val)
    y_pred_label = np.argmax(y_preds, axis=1)
    y_true = np.argmax(y_train_val, axis=1)

    index_diff = np.nonzero(y_pred_label - y_true)

    fail_xs = x_train_val[index_diff]
    fail_ys = y_train_val[index_diff]
    fail_ys_label = np.argmax(fail_ys, axis=1)
    fail_num = int(np.size(index_diff))

    sub_correct_matrix_path = os.path.join(
        weights_dir, 'corr_matrix_{}.npy'.format(random_seed))
    sub_correct_matrix = None  # 1: predicts correctly, 0: predicts incorrectly.

    sub_weights_list = None

    if not os.path.exists(sub_correct_matrix_path):
        # obtain submodel correctness matrix
        submodels_path = os.path.join(weights_dir, 'submodels')

        for root, dirs, files in os.walk(submodels_path):
            for f in files:
                temp_w_path = os.path.join(root, f)
                fixed_model.load_weights(temp_w_path)
                sub_y_pred = fixed_model.predict(fail_xs)

                # top-1 accuracy
                if not dataset in ['cifar100', 'imagenet']:
                    sub_col = np.argmax(sub_y_pred, axis=1) - fail_ys_label
                    sub_col[sub_col != 0] = 1
                # top-5 accuracy
                else:
                    sub_col = K.in_top_k(sub_y_pred, K.argmax(fail_ys,
                                                              axis=-1), 5)
                    sub_col = K.get_value(sub_col)
                    sub_col = sub_col.astype(int)
                    sub_col = np.ones(shape=sub_col.shape) - sub_col

                if sub_correct_matrix is None:
                    sub_correct_matrix = sub_col.reshape(fail_num, 1)
                else:
                    sub_correct_matrix = np.concatenate(
                        (sub_correct_matrix, sub_col.reshape(fail_num, 1)),
                        axis=1)

            sub_correct_matrix = np.ones(
                shape=sub_correct_matrix.shape
            ) - sub_correct_matrix  # here change 0 to 1 (for correctly predicted case)
            np.save(sub_correct_matrix_path, sub_correct_matrix)

        # for sub in submodels:
        #     sub_y_pred = sub.predict(fail_xs)
        #     sub_col = np.argmax(sub_y_pred, axis=1) - fail_ys_label
        #     sub_col[sub_col != 0] = 1
        #     if sub_correct_matrix is None:
        #         sub_correct_matrix = copy.deepcopy(sub_col.reshape(fail_num, 1))
        #     else:
        #         sub_correct_matrix = np.concatenate((sub_correct_matrix, sub_col.reshape(fail_num, 1)), axis=1)
        # sub_correct_matrix = np.ones(shape=sub_correct_matrix.shape) - sub_correct_matrix
        # np.save(sub_correct_matrix_path, sub_correct_matrix)
    else:
        sub_correct_matrix = np.load(sub_correct_matrix_path)
        # revision
        sub_weights_list = get_submodels_weights(
            fixed_model, model_name, dataset,
            os.path.join(weights_dir, 'submodels'))

    # main loop
    fixed_model.load_weights(weights_after_dir)

    logger(weights_dir, '-----------------')
    logger(weights_dir, 'adjustment strategy {}'.format(adjustment_strategy))
    logger(
        weights_dir,
        'LOOP_COUNT: {}, BATCH_SIZE: {}, learning_rate: {}'.format(
            loop_count, BATCH_SIZE, learning_rate))
    logger(
        weights_dir,
        'PRE_EPOCHS: {}, AFTER_EPOCHS: {}, SUB_EPOCHS: {}, MAX_COUNT: {}'.
        format(PRE_EPOCHS, AFTER_EPOCHS, SUB_EPOCHS, max_count))
    logger(weights_dir, '-----------------')

    for _ in range(loop_count):
        np.random.shuffle(sub_correct_matrix)
        iter_count = 0
        for index in range(sub_correct_matrix.shape[0]):

            if iter_count >= max_count:
                break

            curr_weights = fixed_model.get_weights()
            corr_mat = sub_correct_matrix[index, :]

            # lite version
            corr_w, incorr_w = get_adjustment_weights(corr_mat,
                                                      sub_weights_list,
                                                      adjustment_strategy)
            adjust_w = adjust_weights_func(curr_weights,
                                           corr_w,
                                           incorr_w,
                                           adjustment_strategy,
                                           activation=activation)

            if adjust_w == -1:
                continue

            fixed_model.set_weights(adjust_w)

            if not dataset in ['cifar100', 'imagenet']:
                _, curr_acc = fixed_model.evaluate(x_val, y_val, verbose=0)
            else:
                _, _, curr_acc = fixed_model.evaluate(x_val, y_val, verbose=0)
            print(
                'tried times: {}, validation accuracy after adjustment: {:.4f}'
                .format(index, curr_acc))
            if curr_acc > best_acc:
                best_acc = curr_acc
                fixed_model.save_weights(weights_after_dir)

                if adjustment_strategy <= 3:
                    # Apricot+ further training process
                    if not dataset in ['cifar100', 'imagenet']:
                        checkpoint = ModelCheckpoint(weights_after_dir,
                                                     monitor=MONITOR,
                                                     verbose=1,
                                                     save_best_only=True,
                                                     mode='max')
                    else:
                        checkpoint = ModelCheckpoint(weights_after_dir,
                                                     monitor='val_top_k_acc',
                                                     verbose=1,
                                                     save_best_only=True,
                                                     mode='max')

                    checkpoint.best = best_acc
                    fixed_model.fit_generator(
                        datagen.flow(x_train_val,
                                     y_train_val,
                                     batch_size=BATCH_SIZE),
                        steps_per_epoch=len(x_train_val) // BATCH_SIZE + 1,
                        validation_data=(x_val, y_val),
                        epochs=FURTHER_ADJUSTMENT_EPOCHS,
                        callbacks=[checkpoint])
                    fixed_model.load_weights(weights_after_dir)

                    if not dataset in ['cifar100', 'imagenet']:
                        _, val_acc = fixed_model.evaluate(x_val,
                                                          y_val,
                                                          verbose=0)
                        _, test_acc = fixed_model.evaluate(x_test,
                                                           y_test,
                                                           verbose=0)
                    else:
                        _, _, val_acc = fixed_model.evaluate(x_val,
                                                             y_val,
                                                             verbose=0)
                        _, _, test_acc = fixed_model.evaluate(x_test,
                                                              y_test,
                                                              verbose=0)

                    print('validation acc. after retraining: {:.4f}'.format(
                        val_acc))
                    print(
                        'test acc. after retraining: {:.4f}'.format(test_acc))
                    logger(
                        weights_dir,
                        'Improved, validation acc.: {:.4f}, test acc.:{:.4f}'.
                        format(val_acc, test_acc))

                else:
                    print('-----------------------------')
                    print('evaluate on test dataset.')
                    best_acc = curr_acc
                    best_weights = adjust_w
                    fixed_model.save_weights(weights_after_dir)
                    # evaluation
                    if not dataset in ['cifar100', 'imagenet']:
                        _, val_acc = fixed_model.evaluate(x_val,
                                                          y_val,
                                                          verbose=0)
                        _, test_acc = fixed_model.evaluate(x_test,
                                                           y_test,
                                                           verbose=0)
                    else:
                        _, _, val_acc = fixed_model.evaluate(x_val,
                                                             y_val,
                                                             verbose=0)
                        _, _, test_acc = fixed_model.evaluate(x_test,
                                                              y_test,
                                                              verbose=0)

                    print('validation acc. after retraining: {:.4f}'.format(
                        val_acc))
                    print(
                        'test acc. after retraining: {:.4f}'.format(test_acc))
                    logger(
                        weights_dir,
                        'Improved, validation acc.: {:.4f}, test acc.:{:.4f}'.
                        format(val_acc, test_acc))

            else:
                fixed_model.set_weights(best_weights)

            iter_count += 1

    # further training process.
    if not dataset in ['cifar100', 'imagenet']:
        checkpoint = ModelCheckpoint(weights_after_dir,
                                     monitor=MONITOR,
                                     verbose=1,
                                     save_best_only=True,
                                     mode='max')
    else:
        checkpoint = ModelCheckpoint(weights_after_dir,
                                     monitor='val_top_k_acc',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='max')

    checkpoint.best = best_acc
    fixed_model.fit_generator(datagen.flow(x_train_val,
                                           y_train_val,
                                           batch_size=BATCH_SIZE),
                              steps_per_epoch=len(x_train_val) // BATCH_SIZE +
                              1,
                              validation_data=(x_val, y_val),
                              epochs=FURTHER_ADJUSTMENT_EPOCHS,
                              callbacks=[checkpoint])

    # end time
    end_time = datetime.now()
    time_delta = end_time - start_time
    print('time used for adaptation: {}'.format(str(time_delta)))
    logger(weights_dir, 'time used for adaptation: {}'.format(str(time_delta)))

    fixed_model.load_weights(weights_after_dir)
    best_weights = fixed_model.get_weights()

    if dataset in ['cifar100', 'imagenet']:
        _, acc_top_1_train, train_acc = fixed_model.evaluate(
            x_train_val, y_train_val)
        _, acc_top_1_val, origin_acc = fixed_model.evaluate(x_val, y_val)
        _, acc_top_1_test, test_acc = fixed_model.evaluate(x_test, y_test)

        print(
            'after adjustment and retraining, TOP-1 train acc.: {}, val acc.: {}, test acc.: {}'
            .format(acc_top_1_train, acc_top_1_val, acc_top_1_test))
        print(
            'after adjustment and retraining, TOP-5 train acc.: {}, val acc.: {}, test acc.: {}'
            .format(train_acc, origin_acc, test_acc))

        logger(
            weights_dir,
            'after adjustment and retraining, TOP-1 train acc.: {}, val acc.: {}, test acc.: {}'
            .format(acc_top_1_train, acc_top_1_val, acc_top_1_test))
        logger(
            weights_dir,
            'after adjustment and retraining, TOP-5 train acc.: {}, val acc.: {}, test acc.: {}'
            .format(train_acc, origin_acc, test_acc))

    else:
        _, train_acc = fixed_model.evaluate(x_train_val,
                                            y_train_val,
                                            verbose=0)
        _, val_acc = fixed_model.evaluate(x_val, y_val, verbose=0)
        _, test_acc = fixed_model.evaluate(x_test, y_test, verbose=0)

        print('validation acc. after retraining: {:.4f}'.format(val_acc))
        print('test acc. after retraining: {:.4f}'.format(test_acc))

        logger(
            weights_dir,
            'after adjustment and retraining, train acc.: {}, val acc.: {}, test acc.: {}'
            .format(train_acc, val_acc, test_acc))
 def sparse_top_k_accuracy(y_true, y_pred):
     import keras.backend as K
     y_true = K.reshape(y_true, [-1])
     y_pred = K.reshape(y_pred, [-1, y_pred.shape[-1]])
     return K.in_top_k(y_pred, K.cast(y_true, 'int32'), k)
예제 #13
0
 def accuracy(y_true, y_pred):
     y_true = K.cast(y_true, y_pred.dtype)
     y_true = K.argmax(y_true)
     # y_pred1 = K.argmax(y_pred)
     res = K.in_top_k(y_pred, y_true, k_best)
     return res
예제 #14
0
def sparse_temporal_top_k_categorical_accuracy(y_true, y_pred, k=5):
    original_shape = K.shape(y_true)
    y_true = K.reshape(y_true, (-1, K.shape(y_true)[-1]))
    y_pred = K.reshape(y_pred, (-1, K.shape(y_pred)[-1]))
    top_k = K.in_top_k(y_pred, K.cast(K.max(y_true, axis=-1), 'int32'), k)
    return K.reshape(top_k, original_shape[:-1])
예제 #15
0
def top5acc(y_true, y_pred, k=5):  # top_N_categorical_accuracy
    return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k), axis=-1)
예제 #16
0
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
    return K.mean(K.in_top_k(y_pred, K.cast(K.max(y_true, axis=-1), 'int32'),
                             k),
                  axis=-1)
예제 #17
0
 def compute_acc(y_true,y_pred):
      return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k), axis=-1)
예제 #18
0
def top_k_categorical_accuracy(y_true, y_pred, k=5):
    return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k))
예제 #19
0
def K_AP(y_true, y_pred):
    ts = K.in_top_k(y_pred, y_true, K.sum(y_true))

    return
def top_3_categorical_accuracy(y_true, y_pred, k=3):
    return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k))
def top2_accuracy(y_true, y_pred):
    return K.cast(K.in_top_k(y_pred, y_true, 2), K.floatx())