Example #1
0
def build_train_trigger(model1_dict, th):
    classifier_trigger_dict = {}

    performance = make.make_performance_metrics(**{})

    # Train dict
    L = model1_dict['train']['logits']
    dividend = np.sum(np.exp(L), axis=1)
    P = np.exp(L) / dividend[:, None]
    sort = np.sort(P, axis=1)  # Sort class probabilities
    diff = sort[:, -1] - sort[:, -2]  # Difference
    logits_trigger = np.empty((diff.shape[0], 2))
    logits_trigger[:, 0] = diff < th
    logits_trigger[:, 1] = diff >= th

    pred_model1 = np.argmax(L, axis=1)
    gt_model1 = model1_dict['train']['gt']

    train = make.make_classifier_raw_data(logits_trigger,
                                          (pred_model1 == gt_model1),
                                          np.copy(model1_dict['train']['id']))

    # Test dict
    L = model1_dict['test']['logits']
    dividend = np.sum(np.exp(L), axis=1)
    P = np.exp(L) / dividend[:, None]
    sort = np.sort(P, axis=1)  # Sort class probabilities
    diff = sort[:, -1] - sort[:, -2]  # Difference
    logits_trigger = np.empty((diff.shape[0], 2))
    logits_trigger[:, 0] = diff < th
    logits_trigger[:, 1] = diff >= th

    pred_model1 = np.argmax(L, axis=1)
    gt_model1 = model1_dict['test']['gt']

    test = make.make_classifier_raw_data(logits_trigger,
                                         (pred_model1 == gt_model1),
                                         np.copy(model1_dict['test']['id']))

    classifier_trigger_dict = make.make_classifier_dict(
        "trigger_classifier", "cifar10", train, test, performance)
    io.save_pickle(
        '../../Definitions/Classifiers/tmp/trigger_random_threshold.pkl',
        classifier_trigger_dict)
    classifier = make.make_classifier(
        "trigger_classifier",
        "../../Definitions/Classifiers/tmp/trigger_random_threshold.pkl")
    return classifier
def __get_trigger_raw_data(data, phase):

    if phase == "test":
        data_source = data.source.test_path
    elif phase == "train":
        data_source = data.source.train_path
    else:  # phase == "val":
        data_source = data.source.val_path

    dataset = io.read_pickle(data_source)

    L = dataset['logits']
    P = softmax(L)
    ths = dataset['ths']
    gt = dataset['gt']
    ids = dataset['id']

    sort = np.sort(P, axis=1)
    diff = sort[:, -1] - sort[:, -2]

    sample_size = len(ids)
    Y = np.zeros((sample_size, len(ths) + 1))  # Predictions for the trigger

    Or = np.zeros(sample_size)
    for ith, th in enumerate(ths):
        Y[:, ith] = np.array([diff < th])
        Or = np.logical_or(Or, Y[:, ith])

    Y[:, -1] = np.logical_not(
        Or)  # Executed by classifier before trigger if none matches

    raw_data = make.make_classifier_raw_data(Y, gt == np.argmax(L, axis=1),
                                             ids)
    return raw_data
Example #3
0
def get_dummy_ClassifierRawData(num_c=3, n=5):
    logits = np.random.rand(n, num_c)
    gt = np.random.randint(0, num_c, n)
    id = np.arange(n)

    # Construct the message
    message = make_util.make_classifier_raw_data(logits, gt, id)
    return message
Example #4
0
def build_train_trigger2(model1_dict, th):
    classifier_trigger_dict = {}

    performance = make.make_performance_metrics(**{})

    # Train dict
    L = model1_dict['train']['logits']
    dividend = np.sum(np.exp(L), axis=1)
    P = np.exp(L) / dividend[:, None]
    max_P = np.max(P, axis=1)
    logits_trigger = np.empty((max_P.shape[0], 2))
    logits_trigger[:, 0] = max_P < th
    logits_trigger[:, 1] = max_P >= th

    pred_model1 = np.argmax(L, axis=1)
    gt_model1 = model1_dict['train']['gt']

    train = make.make_classifier_raw_data(logits_trigger,
                                          (pred_model1 == gt_model1),
                                          np.copy(model1_dict['train']['id']))

    # Test dict
    L = model1_dict['test']['logits']
    dividend = np.sum(np.exp(L), axis=1)
    P = np.exp(L) / dividend[:, None]
    max_P = np.max(P, axis=1)
    logits_trigger = np.empty((max_P.shape[0], 2))
    logits_trigger[:, 0] = max_P < th
    logits_trigger[:, 1] = max_P >= th

    pred_model1 = np.argmax(L, axis=1)
    gt_model1 = model1_dict['test']['gt']

    test = make.make_classifier_raw_data(logits_trigger,
                                         (pred_model1 == gt_model1),
                                         np.copy(model1_dict['test']['id']))

    classifier_trigger_dict = make.make_classifier_dict(
        "trigger_classifier", "cifar10", train, test, performance)
    io.save_pickle(
        '../../Definitions/Classifiers/tmp/trigger_random_threshold',
        classifier_trigger_dict)
    classifier = make.make_classifier(
        "trigger_classifier",
        "../../Definitions/Classifiers/tmp/trigger_random_threshold")
    return classifier
Example #5
0
def build_train_trigger3(model1_dict, p):
    classifier_trigger_dict = {}

    performance = make.make_performance_metrics(**{})

    # Train dict
    L = model1_dict['train']['logits']
    logits_trigger = np.empty((L.shape[0], 2))
    logits_trigger[:, 0] = np.random.binomial(1, p, L.shape[0])
    logits_trigger[:, 1] = 1 - logits_trigger[:, 0]

    pred_model1 = np.argmax(L, axis=1)
    gt_model1 = model1_dict['train']['gt']

    train = make.make_classifier_raw_data(logits_trigger,
                                          (pred_model1 == gt_model1),
                                          np.copy(model1_dict['train']['id']))

    # Test dict
    L = model1_dict['test']['logits']
    logits_trigger = np.empty((L.shape[0], 2))
    logits_trigger[:, 0] = np.random.binomial(1, p, L.shape[0])
    logits_trigger[:, 1] = 1 - logits_trigger[:, 0]

    pred_model1 = np.argmax(L, axis=1)
    gt_model1 = model1_dict['test']['gt']

    test = make.make_classifier_raw_data(logits_trigger,
                                         (pred_model1 == gt_model1),
                                         np.copy(model1_dict['test']['id']))

    classifier_trigger_dict = make.make_classifier_dict(
        "trigger_classifier", "cifar10", train, test, performance)
    io.save_pickle(
        '../../Definitions/Classifiers/tmp/trigger_random_threshold',
        classifier_trigger_dict)
    classifier = make.make_classifier(
        "trigger_classifier",
        "../../Definitions/Classifiers/tmp/trigger_random_threshold")
    return classifier
Example #6
0
def __get_trigger_raw_data(data, phase):

    if phase == "test":
        data_source = data.source.test_path
    elif phase == "train":
        data_source = data.source.train_path
    else:
        data_source = data.source.val_path

    dataset = io.read_pickle(data_source)

    P = dataset['predictions']  # Models for voting protocol
    n = dataset['n']
    gt = dataset['gt']
    ids = dataset['ids']

    agree = np.apply_along_axis(np.bincount, 1, P, None, P.shape[2])
    agree_max = np.max(agree, axis=1)
    y = np.transpose(np.append(np.array([agree_max < n]), np.array([agree_max >= n]), axis=0))
    raw_data = make.make_classifier_raw_data(y, gt == np.argmax(P, axis=1), ids)
    return raw_data
def __get_trigger_raw_data(data, phase):

    if phase == "test":
        data_source = data.source.test_path
    elif phase == "train":
        data_source = data.source.train_path
    else:
        data_source = data.source.val_path

    dataset = io.read_pickle(data_source)

    L = dataset['logits']
    P = softmax(L)
    th = dataset['th']
    gt = dataset['gt']
    ids = dataset['id']

    sort = np.sort(P, axis=1)
    diff = np.array(sort[:, -1] - sort[:, -2])
    y = np.column_stack((diff < th, diff >= th)).astype(np.int)
    raw_data = make.make_classifier_raw_data(y, gt == np.argmax(L, axis=1),
                                             ids)
    return raw_data
Example #8
0
def fill_classifier(eval, classifier_dict, contribution, contribution_train, contribution_val):

    metrics = make.make_performance_metrics(**{"time": eval.test['system'].time / len(contribution['gt'].keys()) * 128,
                                               "ops": eval.test['system'].ops / len(contribution['gt'].keys()),
                                               "params": eval.test['system'].params})
    # Test raw data
    test_raw = make.make_classifier_raw_data([], [], [])
    if contribution is not None:
        keys_test = [key for key in contribution['logits'].keys()]
        logits = [contribution['logits'][key] for key in keys_test]
        gt = [contribution['gt'][key] for key in keys_test]
        test_raw = make.make_classifier_raw_data(logits, gt, keys_test)
        classifier_dict['test']['time_instance'] = \
            np.array([contribution['time_instance'][key] for key in keys_test])

    # Train raw data
    train_raw = make.make_classifier_raw_data([], [], [])
    if contribution_train is not None:
        keys_train = [key for key in contribution_train['logits'].keys()]
        logits = [contribution_train['logits'][key] for key in keys_train]
        gt = [contribution_train['gt'][key] for key in keys_train]
        train_raw = make.make_classifier_raw_data(logits, gt, keys_train)
        classifier_dict['train']['time_instance'] = np.array(
            [contribution_train['time_instance'][key] for key in keys_train])

    # Validation raw data
    val_raw = make.make_classifier_raw_data([], [], [])
    if contribution_val is not None:
        keys_val = [key for key in contribution_val['logits'].keys()]
        logits = [contribution_val['logits'][key] for key in keys_val]
        gt = [contribution_val['gt'][key] for key in keys_val]
        val_raw = make.make_classifier_raw_data(logits, gt, keys_val)
        classifier_dict['val']['time_instance'] = np.array(
            [contribution_val['time_instance'][key] for key in keys_val])

    # Fill the dict
    classifier_dict.update(make.make_classifier_dict(classifier_dict['name'], "", train_raw, test_raw, metrics, val_data=val_raw))