Beispiel #1
0
def build_code(data_dir, eval_data, sample_count, batch_size=100):
    vgg = Vgg16()
    with tf.Graph().as_default():
        #global_step = tf.train.get_or_create_global_step()
        with tf.device(
                '/cpu:0'):  #Make use of CPU memory to reduce GPU memory usage
            images, labels = cifar10_input.inputs(eval_data=eval_data,
                                                  data_dir=data_dir,
                                                  batch_size=batch_size)

        vgg.build(images)

        all_codes = None
        all_labels = None
        with tf.train.MonitoredSession() as sess:
            for _ in tqdm(range(sample_count // batch_size)):
                labels_batch, codes_batch = sess.run([labels, vgg.relu6])
                if all_codes is None:
                    all_codes = codes_batch
                    all_labels = labels_batch
                else:
                    all_codes = np.concatenate((all_codes, codes_batch))
                    all_labels = np.concatenate((all_labels, labels_batch))

    return all_codes, all_labels
Beispiel #2
0
def train():
    vgg = Vgg16()

    #Test images for VGG only need to be loaded once
    test_codes, test_labels = may_load_test_code(FLAGS.data_dir)

    with tf.Graph().as_default():
        #global_step = tf.train.get_or_create_global_step()
        #Make use of CPU memory to avoid GPU memory allocation problem
        with tf.device('/cpu:0'):
            #images, labels = cifar10_input.inputs(eval_data=False,
            #                                    data_dir=FLAGS.data_dir,
            #                                    batch_size=FLAGS.batch_size)
            images, labels = cifar10_input.distorted_inputs(
                data_dir=FLAGS.data_dir, batch_size=FLAGS.batch_size)

        vgg.build(images)

        with tf.device('/cpu:0'):
            input_ = tf.placeholder(tf.float64,
                                    shape=[None, 4096],
                                    name='cifar-input')
            labels_ = tf.placeholder(tf.int32, [None], name='cifar-output')

        #Append a model for cifar classification
        logits = cifar_model(input_)
        cross_entropy = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_,
                                                           logits=logits))
        train_step = tf.train.AdamOptimizer().minimize(cross_entropy)

        predicted = tf.nn.softmax(logits)
        correct_pred = tf.equal(tf.argmax(predicted, 1, output_type=tf.int32),
                                labels_)
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
        tf.summary.scalar('accuracy', accuracy)
        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(FLAGS.log_dir)
        test_dict = {input_: test_codes, labels_: test_labels}
        with tf.train.MonitoredSession() as sess:
            for i in range(FLAGS.max_steps + 1):
                labels_batch, codes_batch = sess.run([labels, vgg.relu6])
                train_dict = {input_: codes_batch, labels_: labels_batch}
                _, loss_val = sess.run([train_step, cross_entropy],
                                       feed_dict=train_dict)

                if i % FLAGS.log_frequency == 0:
                    acc, summary = sess.run([accuracy, summary_op],
                                            feed_dict=test_dict)
                    summary_writer.add_summary(summary, i)
                    print("Step:%4d, Loss:%f Test Accuracy:%f" %
                          (i, loss_val, acc))

        summary_writer.close()
Beispiel #3
0
    def __init__(self, img_name, path):
        self.img_name = img_name
        self.path = path
        self.image = self.load_image(os.path.join(path, img_name))
        self.alpha = 0.75
        
        self.synset = [l.strip() for l in open("./test_data/synset.txt").readlines()]

        self.images = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))
        self.vgg16_net = Vgg16()
        self.vgg16_net.build(self.images)
Beispiel #4
0
    def reset(self, curr_attack):
        self.alpha = 0.75
        self.vgg16_net = Vgg16()
        self.vgg16_net.build(self.images)

        self.model = foolbox.models.TensorFlowModel(self.images,
                                                    self.vgg16_net.fc8, (0, 1),
                                                    self.vgg16_net)
        self.attack = self.poss_attacks[curr_attack](self.model)
        self.data_str = ""
        self.pre_softmax = self.model.forward_one(self.image)
        self.conf = foolbox.utils.softmax(self.pre_softmax)
        self.idx = np.argmax(self.conf)
        self.category = ' '.join(self.synset[self.idx].split()[1:])
    def __init__(self, config, mode, load_vocab=False):
        """
        Intial Setups
        """

        if mode == "inference":
            self.vgg_model = Vgg16()
        self.config = config
        self.intializer = tf.contrib.layers.xavier_initializer()
        # self.intializer = tf.random_uniform_initializer(
        #     minval=-self.config.initializer_scale,
        #     maxval=self.config.initializer_scale
        # )
        self.mode = mode
        self.load_vocab = load_vocab

        # All Image Related Variables

        # A float32 tensor of shape [height, width, channels]
        self.inference_image = None

        # A float32 tensor of shape [batch_size,feature_size]
        self.image_features = None
        # A float32 tensor with shape [batch_size, embedding_size]
        self.image_embeddings = None
        # All Caption Related values
        # A int32 tensor with shape [batch_size, padded_length]
        self.input_sequence = None
        # A int32 tensor with shape [batch_size, padded_length, embedding_size]
        self.input_sequence_embeddings = None
        # A int32 tensor with shape [batch_size, padded_length]
        self.target_sequence = None
        # A int32 tensor with shape [batch_size, padded_length]
        self.input_mask = None

        # All Losses
        # A float32 scalar
        self.total_loss = None
        # A float32 tensor with shape [batch_size*padded_length]
        self.target_cross_entropy_loss = None
        # A float32 tensor with shape [batch_size*padded_length]
        self.target_cross_entropy_loss_weights = None

        # All softmax probablities
        # A float32 tensor of shape [padded_length, vocab_size]
        self.softmax_score = None

        # Global Step Variable
        self.global_step = None
Beispiel #6
0
    def __init__(self, img_name, path, attack_name):
        self.img_name = img_name
        self.path = path
        self.image = self.load_image(os.path.join(path, img_name))
        self.alpha = 0.75

        self.synset = [
            l.strip() for l in open("./test_data/synset.txt").readlines()
        ]

        self.images = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))
        self.vgg16_net = Vgg16()
        self.vgg16_net.build(self.images)
        poss_attacks = self.get_attacks()
        if attack_name in poss_attacks:
            self.attack_class = poss_attacks[attack_name]
        else:
            print(attack_name, "is unknown attack")
            sys.exit("\n\n")
Beispiel #7
0

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Test foolbox")
    parser.add_argument("-i",
                        "--image",
                        type=str,
                        default="./test_data/tiger.jpeg")  #kitten2.png")

    args = vars(parser.parse_args())
    image = load_image(args["image"])
    synset = [l.strip() for l in open("./test_data/synset.txt").readlines()]

    images = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))

    logits2 = Vgg16()
    logits2.build(images)

    with tf.device('/GPU:0'):
        with tf.Session() as sess:
            batch = image.reshape((1, 224, 224, 3))
            feed_dict = {images: batch}

            vgg = Vgg16()
            with tf.name_scope("content_vgg"):
                vgg.build(images)

            [
                prob, fc8, fc7, fc6, conv5_1, conv4_1, conv3_1, conv2_1,
                conv1_1, bgr, rgb_sc, rgb
            ] = sess.run([

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Test foolbox")
    parser.add_argument("-i",
                        "--image",
                        type=str,
                        default="./test_data/kitten2.png")

    args = vars(parser.parse_args())
    image = get_sample_image(args["image"])
    synset = [l.strip() for l in open("./test_data/synset.txt").readlines()]

    images = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))

    logits2 = Vgg16()
    logits2.build(images)

    with foolbox.models.TensorFlowModel(images, logits2.fc8,
                                        (0, 255)) as model:

        idx = np.argmax(model.forward_one(image))
        import pdb
        pdb.set_trace()
        raw_conf = model.forward_one(image)
        raw_max = raw_conf.max()
        raw_conf2 = raw_conf - raw_max
        raw_conf2_exp = np.exp(raw_conf2)
        raw_conf_norm = 1. / raw_conf2_exp.sum()
        conf = raw_conf2_exp * raw_conf_norm
        category = ' '.join(synset[idx].split()[1:])