示例#1
0
def get_grads(model_options, x_vals, label):

    x_vals = x_vals.reshape((-1, 28 ** 2))
    params, x, y, layers = build_model(model_options)

    softmax_layer = layers[-1]
    softmax_label = softmax_layer[:, label]

    grads_input = tf.gradients(softmax_label, x)[0]

    with tf.Session():
        tf.initialize_all_variables().run()
        res = grads_input.eval(feed_dict={x: x_vals})

        # print(softmax_label.eval(feed_dict={
        #     x: x_vals
        # }))

    return res
示例#2
0
def get_grads(model_options, x_vals, label):

    x_vals = x_vals.reshape((-1, 28**2))
    params, x, y, layers = build_model(model_options)

    softmax_layer = layers[-1]
    softmax_label = softmax_layer[:, label]

    grads_input = tf.gradients(softmax_label, x)[0]

    with tf.Session():
        tf.initialize_all_variables().run()
        res = grads_input.eval(feed_dict={x: x_vals})

        # print(softmax_label.eval(feed_dict={
        #     x: x_vals
        # }))

    return res
示例#3
0
def get_faulty_input_layer(data, model_options):

    save_freq = model_options.get('save_freq', 20)
    label = model_options['label']
    num_features = model_options['num_features']
    min_num_steps = model_options.get('min_num_steps', 0)
    max_num_steps = model_options.get('max_num_steps', np.inf)
    tolerance = model_options.get('tolerance', 0)
    prob_cap_mean = model_options.get('prob_cap_mean', 1)
    prob_cap_min = model_options.get('prob_cap_min', 1)
    num_mono_dec_saves = model_options.get('num_mono_dec_saves', np.inf)
    batch_size = model_options.get('num_examples', 1)
    init_w_train_vals = model_options.get('init_w_train_vals', False)

    x_vals = None
    if init_w_train_vals:
        train_examples = get_data_label(data, label)
        x_vals = np.random.permutation(train_examples)
        x_vals = np.array_split(x_vals, 8)
        x_vals = np.array([np.average(xv, axis=0) for xv in x_vals])

    # Note: convolution + activation + pooling is considered 1 layer
    params, x, y, layers = build_model(model_options, const_params=True, x_values=x_vals)
    params_pf, x_pf, y_pf, layers_pf = build_model(model_options)
    softmax_layer_pf = layers_pf[-1]

    if not (max_num_steps < np.inf or (tolerance > 0 or prob_cap_mean < 1 or prob_cap_min < 1) and min_num_steps < np.inf):
        logger.warn('no exit conditions for backworking CNN inputs')

    eval_cnn_wrapper = functools.partial(eval_cnn, x_pf, softmax_layer_pf, num_features)

    l = []

    with tf.Session() as sess:

        logger.info('building model')
        costs = build_cost(data, x, layers, sess, model_options)
        cost = sum(costs)

        optimize_input_layer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)
        logger.info('starting backworking')

        tf.initialize_all_variables().run()
        t_start = time.time()

        cost_val_old = np.inf
        i = 0

        probs_correct_tracker = ProbTracker(num_mono_dec_saves, batch_size)

        try:
            while True:

                if i >= max_num_steps:
                    break

                t0 = time.time()
                optimize_input_layer.run()
                cost_val_new = cost.eval()
                logger.info('%f seconds in optimization cycle %i with cost %f', time.time() - t0, i, cost_val_new)

                for idx, c in enumerate(costs):
                    layer = layers[idx // 2]
                    logger.debug('layer %s cost %f', layer.name, c.eval())

                if (save_freq and i % save_freq == 0) or i == max_num_steps - 1:
                    input_vals, softmax = eval_cnn_wrapper(x)

                    probs_correct = softmax[:, label]
                    probs_correct_tracker(probs_correct)

                    l.append((input_vals, softmax))

                    prob_correct_mean, prob_correct_min = probs_correct.mean(), probs_correct.min()
                    logger.info('%f / %f min / mean probability of correct label', prob_correct_min, prob_correct_mean)

                    if i >= min_num_steps and (prob_correct_mean > prob_cap_mean or prob_correct_min > prob_cap_min):
                        break

                    m = probs_correct_tracker.get_mask()

                    if m.any():
                        logger.info('reassignment mask %s', m)
                        logger.debug('probability history of %i steps: %s',
                                     num_mono_dec_saves, probs_correct_tracker.values[:, m])
                        shape = [m.sum()] + [model_options['image_dim_size']] * 2
                        input_vals[m] = np.random.rand(*shape)
                        probs_correct_tracker.mark_replaced_inputs(m)
                        sess.run(x.assign(input_vals))

                if abs(cost_val_new - cost_val_old) < tolerance and i >= min_num_steps:
                    break

                i += 1

                cost_val_old = cost_val_new
        except KeyboardInterrupt:
            pass
        logger.info('Average of %f seconds/optimization cycle over %f optimization cycles', (time.time() - t_start) / i, i)

        return l
示例#4
0
def get_faulty_input_layer(data, model_options):

    save_freq = model_options.get('save_freq', 20)
    label = model_options['label']
    num_features = model_options['num_features']
    min_num_steps = model_options.get('min_num_steps', 0)
    max_num_steps = model_options.get('max_num_steps', np.inf)
    tolerance = model_options.get('tolerance', 0)
    prob_cap_mean = model_options.get('prob_cap_mean', 1)
    prob_cap_min = model_options.get('prob_cap_min', 1)
    num_mono_dec_saves = model_options.get('num_mono_dec_saves', np.inf)
    batch_size = model_options.get('num_examples', 1)
    init_w_train_vals = model_options.get('init_w_train_vals', False)

    x_vals = None
    if init_w_train_vals:
        train_examples = get_data_label(data, label)
        x_vals = np.random.permutation(train_examples)
        x_vals = np.array_split(x_vals, 8)
        x_vals = np.array([np.average(xv, axis=0) for xv in x_vals])

    # Note: convolution + activation + pooling is considered 1 layer
    params, x, y, layers = build_model(model_options,
                                       const_params=True,
                                       x_values=x_vals)
    params_pf, x_pf, y_pf, layers_pf = build_model(model_options)
    softmax_layer_pf = layers_pf[-1]

    if not (max_num_steps < np.inf or
            (tolerance > 0 or prob_cap_mean < 1 or prob_cap_min < 1)
            and min_num_steps < np.inf):
        logger.warn('no exit conditions for backworking CNN inputs')

    eval_cnn_wrapper = functools.partial(eval_cnn, x_pf, softmax_layer_pf,
                                         num_features)

    l = []

    with tf.Session() as sess:

        logger.info('building model')
        costs = build_cost(data, x, layers, sess, model_options)
        cost = sum(costs)

        optimize_input_layer = tf.train.AdamOptimizer(
            learning_rate=0.01).minimize(cost)
        logger.info('starting backworking')

        tf.initialize_all_variables().run()
        t_start = time.time()

        cost_val_old = np.inf
        i = 0

        probs_correct_tracker = ProbTracker(num_mono_dec_saves, batch_size)

        try:
            while True:

                if i >= max_num_steps:
                    break

                t0 = time.time()
                optimize_input_layer.run()
                cost_val_new = cost.eval()
                logger.info('%f seconds in optimization cycle %i with cost %f',
                            time.time() - t0, i, cost_val_new)

                for idx, c in enumerate(costs):
                    layer = layers[idx // 2]
                    logger.debug('layer %s cost %f', layer.name, c.eval())

                if (save_freq
                        and i % save_freq == 0) or i == max_num_steps - 1:
                    input_vals, softmax = eval_cnn_wrapper(x)

                    probs_correct = softmax[:, label]
                    probs_correct_tracker(probs_correct)

                    l.append((input_vals, softmax))

                    prob_correct_mean, prob_correct_min = probs_correct.mean(
                    ), probs_correct.min()
                    logger.info(
                        '%f / %f min / mean probability of correct label',
                        prob_correct_min, prob_correct_mean)

                    if i >= min_num_steps and (
                            prob_correct_mean > prob_cap_mean
                            or prob_correct_min > prob_cap_min):
                        break

                    m = probs_correct_tracker.get_mask()

                    if m.any():
                        logger.info('reassignment mask %s', m)
                        logger.debug('probability history of %i steps: %s',
                                     num_mono_dec_saves,
                                     probs_correct_tracker.values[:, m])
                        shape = [m.sum()
                                 ] + [model_options['image_dim_size']] * 2
                        input_vals[m] = np.random.rand(*shape)
                        probs_correct_tracker.mark_replaced_inputs(m)
                        sess.run(x.assign(input_vals))

                if abs(cost_val_new -
                       cost_val_old) < tolerance and i >= min_num_steps:
                    break

                i += 1

                cost_val_old = cost_val_new
        except KeyboardInterrupt:
            pass
        logger.info(
            'Average of %f seconds/optimization cycle over %f optimization cycles',
            (time.time() - t_start) / i, i)

        return l