コード例 #1
0
def gen_starting_points(model,
                        ys,
                        ys_target,
                        goal,
                        dataset_name,
                        session,
                        pred_fn,
                        cache=None):
    ''' Generate starting points which are already adversarial according to the adversarial goal.

    :param model: The model.
    :param ys: True labels.
    :param ys_target: Targetted labels.
    :param goal: Adversarial goal.
    :param dataset_name: The dataset's name. All valid values are ``'cifar10'`` and ``'imagenet'``.
    :param session: ``tf.Session`` for loading dataset.
    :param pred_fn: A function which accepts a batch of model inputs as a numpy array and returns the model's
        predictions.
    :param cache: A cache for reusing generated starting points. A dictionary. Same cache shall not be shared between
        different model and adversarial goal.
    :return: Starting points as a numpy array.
    '''
    if cache is None:
        cache = dict()

    starting_points = np.zeros((len(ys), *model.x_shape),
                               dtype=model.x_dtype.as_numpy_dtype)

    if goal in ('ut', 'tm'):
        for index, y in enumerate(ys):
            y = int(y)
            if y not in cache:
                while True:
                    x = np.random.uniform(low=model.x_min,
                                          high=model.x_max,
                                          size=(1, *model.x_shape))
                    x = x.astype(model.x_dtype.as_numpy_dtype)
                    x_pred = pred_fn(x)[0]
                    if x_pred != y:
                        cache[y] = x[0]
                        break
            starting_points[index] = cache[y]
    else:
        for index, y in enumerate(ys_target):
            if y not in cache:
                if dataset_name == 'cifar10':
                    dataset = cifar10.load_dataset_for_classifier(
                        model, target_label=y).batch(1)
                else:
                    dataset = imagenet.load_dataset_for_classifier(
                        model, target_label=y).batch(1)
                for _, x, _ in dataset_to_iterator(dataset, session):
                    x_pred = pred_fn(x)[0]
                    if x_pred == y:
                        cache[y] = x[0]
                        break
            starting_points[index] = cache[y]

    return starting_points
コード例 #2
0
ファイル: attack_cli.py プロジェクト: Fugoes/realsafe
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)

    print('Loading model...')
    model = load_model_from_path(args.model).load(session)

    print('Loading dataset...')
    if args.dataset == 'cifar10':
        from ares.dataset import cifar10
        dataset = cifar10.load_dataset_for_classifier(model,
                                                      offset=args.offset,
                                                      load_target=True)
    else:
        from ares.dataset import imagenet
        dataset = imagenet.load_dataset_for_classifier(model,
                                                       offset=args.offset,
                                                       load_target=True)
    dataset = dataset.take(args.count)

    print('Loading attack...')
    attack_name, batch_size, dataset_name = args.method, args.batch_size, args.dataset
    goal, distance_metric = args.goal, args.distance_metric

    kwargs = dict()
    for kwarg in ('learning_rate', 'cw_loss_c', 'samples_per_draw',
                  'init_distortion'):
        attr = getattr(args, kwarg)
        if attr is not None:
            kwargs[kwarg] = attr
    if args.dimension_reduction_height is not None and args.dimension_reduction_width is not None:
        kwargs['dimension_reduction'] = (args.dimension_reduction_height,
コード例 #3
0
    '../example/imagenet/resnet_v2_alp.py',
    '../example/imagenet/resnet152_fd.py',
    '../example/imagenet/inception_v3_jpeg.py',
    '../example/imagenet/inception_v3_bit.py',
    '../example/imagenet/inception_v3_rand.py',
    '../example/imagenet/inception_v3_randmix.py',
]

rs = dict()
for model_path_short in MODELS:
    print('Loading {}...'.format(model_path_short))
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                              model_path_short)
    model = load_model_from_path(model_path).load(session)
    dataset = imagenet.load_dataset_for_classifier(model,
                                                   offset=0,
                                                   load_target=True).take(1000)
    xs_ph = tf.placeholder(model.x_dtype, shape=(None, *model.x_shape))
    labels = model.labels(xs_ph)

    accs = []
    for _ in range(10):
        for i_batch, (_, xs, ys, ys_target) in enumerate(
                dataset_to_iterator(dataset.batch(batch_size), session)):
            predictions = session.run(labels, feed_dict={xs_ph: xs})
            acc = np.equal(predictions, ys).astype(np.float32).mean()
            accs.append(acc)
            print('n={}..{} acc={:3f}'.format(
                i_batch * batch_size, i_batch * batch_size + batch_size - 1,
                acc))
    rs[model_path_short] = np.mean(accs)