Ejemplo n.º 1
0
dataset = f['dataset']
lb = f['shrink_lb']
ub = f['shrink_ub']
is_conv = f['is_conv'].item()
img = f['image']
norm = f['norm_func']
denorm = f['denorm_func']
over_lb = f['orig_over_lb']
over_ub = f['orig_over_ub']
norm = dill.loads(norm)
denorm = dill.loads(denorm)

filename, file_extension = os.path.splitext(args.netname)
is_trained_with_pytorch = file_extension == ".pyt"

model, is_conv, means, stds, layers = read_tensorflow_net(
    args.netname, len(lb), is_trained_with_pytorch)

# Create specLB/UB
if dataset == 'mnist':
    specLB = np.clip(img - epsilon, 0, 1)
    specUB = np.clip(img + epsilon, 0, 1)
elif dataset == 'cifar10':
    if (is_trained_with_pytorch):
        specLB = np.clip(img - epsilon, 0, 1)
        specUB = np.clip(img + epsilon, 0, 1)
    else:
        specLB = np.clip(img - epsilon, -0.5, 0.5)
        specUB = np.clip(img + epsilon, -0.5, 0.5)
if is_trained_with_pytorch:
    norm(specLB, dataset, mean, std, is_conv)
    norm(specUB, dataset, mean, std, is_conv)
Ejemplo n.º 2
0
def create_tf_model(netname, dataset, im, model_name):
    import tensorflow as tf
    from read_net_file import read_tensorflow_net
    from clever_wolf import CutModel
    sess = tf.Session()
    filename, file_extension = os.path.splitext(netname)
    is_trained_with_pytorch = file_extension == ".pyt"

    if (dataset == 'mnist'):
        num_pixels = 784
    elif (dataset == 'cifar10'):
        num_pixels = 3072
    elif (dataset == 'acasxu'):
        num_pixels = 5
    elif (dataset == 'mortgage'):
        num_pixels = 172
    model, is_conv, means, stds, layers = read_tensorflow_net(
        netname, num_pixels, is_trained_with_pytorch)
    pixel_size = np.array([1.0 / 256.0] * num_pixels)
    pgd_means = np.zeros((num_pixels, 1))
    pgd_stds = np.ones((num_pixels, 1))

    zeros = np.zeros((num_pixels))
    ones = np.ones((num_pixels))
    if is_trained_with_pytorch:
        normalize(zeros, dataset, means, stds, is_conv)
        normalize(ones, dataset, means, stds, is_conv)

    if is_trained_with_pytorch:
        im_copy = np.copy(im)
        normalize(im_copy, dataset, means, stds, is_conv)
        if dataset == 'mnist':
            pgd_means[:] = means[0]
            pgd_stds[:] = stds[0]
            pixel_size = pixel_size / stds[0]
        elif dataset == 'cifar10':
            if is_conv:
                count = 0
                for i in range(1024):
                    pixel_size[count] = pixel_size[count] / stds[0]
                    pgd_means[count] = means[0]
                    pgd_stds[count] = stds[0]
                    count = count + 1
                    pixel_size[count] = pixel_size[count] / stds[1]
                    pgd_means[count] = means[1]
                    pgd_stds[count] = stds[1]
                    count = count + 1
                    pixel_size[count] = pixel_size[count] / stds[2]
                    pgd_means[count] = means[2]
                    pgd_stds[count] = stds[2]
                    count = count + 1
            else:
                for i in range(1024):
                    pixel_size[i] = pixel_size[i] / stds[0]
                    pgd_means[i] = means[0]
                    pgd_stds[i] = stds[0]
                    pixel_size[i + 1024] = pixel_size[i + 1024] / stds[1]
                    pgd_means[i + 1024] = means[1]
                    pgd_stds[i + 1024] = stds[1]
                    pixel_size[i + 2048] = pixel_size[i + 2048] / stds[2]
                    pgd_means[i + 2048] = means[2]
                    pgd_stds[i + 2048] = stds[2]
        elif dataset == 'mortgage' or dataset == 'acasxu':
            pgd_means[:, 0] = means
            pgd_stds[:, 0] = stds
            pixel_size = np.array([1.0] * num_pixels) / stds
        else:
            # TODO Hack - works only on MNIST and CIFAR10 and mortgage and ACAS Xu
            assert False
    else:
        assert dataset == 'mnist'
        im_copy = np.copy(im)

    print('Model created')
    tf_out = tf.get_default_graph().get_tensor_by_name(model.name)
    tf_in = tf.get_default_graph().get_tensor_by_name('x:0')
    print('Tensors created')

    out = sess.run(tf_out, feed_dict={tf_in: im_copy})
    print('Tf out computed')
    if model_name is None:
        cut_model = CutModel(sess, tf_in, tf_out, np.argmax(out), pixel_size)
    else:
        cut_model = CutModel.load(model_name, sess, tf_in, tf_out,
                                  np.argmax(out))
    print('Cut model created')
    return cut_model, is_conv, means, stds, im_copy, pgd_means, pgd_stds, layers, zeros, ones
Ejemplo n.º 3
0
            f.write('ReLU\n')
        if args.invert and i == len(ws) - 1:
            f.write(str((-ws[i]).T.tolist()) + '\n')
            f.write(str((-bs[i]).tolist()) + '\n')
        else:
            f.write(str(ws[i].T.tolist()) + '\n')
            f.write(str(bs[i].tolist()) + '\n')

from read_net_file import read_tensorflow_net
spec = open(args.spec, 'r').read()
spec = parse_input_box(spec)
spec_lb = np.array(spec)[0,:,0]
spec_ub = np.array(spec)[0,:,1]


model_tf, is_conv, means_r, stds_r, layers = read_tensorflow_net(args.output, 5, True)

assert np.all( means_r - means == 0.0 )
assert np.all( stds_r - stds == 0.0 )

normalize(spec_lb, means, stds, is_conv)
normalize(spec_ub, means, stds, is_conv)

tf_out = tf.get_default_graph().get_tensor_by_name( model_tf.name )
tf_in = tf.get_default_graph().get_tensor_by_name( 'x:0' )
sess = tf.Session()

out_lb = sess.run( tf_out, feed_dict={tf_in: spec_lb} )
out_ub = sess.run( tf_out, feed_dict={tf_in: spec_ub} )

runnable = rt.prepare(model, 'CPU')