Esempio n. 1
0
def setup_tpu_session(master):
  """Initializes and returns a Keras/TF session connected the TPU `master`."""
  session = tf_session.Session(
      target=master, config=config_pb2.ConfigProto(isolate_session_state=True))
  K.set_session(session)
  K.get_session().run(tpu.initialize_system())
  return session
Esempio n. 2
0
    def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:
                for weight in layer.weights:
                    mapped_weight_name = weight.name.replace(':', '_')
                    tf_summary.histogram(mapped_weight_name, weight)
                    if self.write_grads:
                        grads = model.optimizer.get_gradients(
                            model.total_loss, weight)

                        def is_indexed_slices(grad):
                            return type(grad).__name__ == 'IndexedSlices'

                        grads = [
                            grad.values if is_indexed_slices(grad) else grad
                            for grad in grads
                        ]
                        tf_summary.histogram(
                            '{}_grad'.format(mapped_weight_name), grads)
                    if self.write_images:
                        w_img = array_ops.squeeze(weight)
                        shape = K.int_shape(w_img)
                        if len(shape) == 2:  # dense layer kernel case
                            if shape[0] > shape[1]:
                                w_img = array_ops.transpose(w_img)
                                shape = K.int_shape(w_img)
                            w_img = array_ops.reshape(
                                w_img, [1, shape[0], shape[1], 1])
                        elif len(shape) == 3:  # convnet case
                            if K.image_data_format() == 'channels_last':
                                # switch to channels_first to display
                                # every kernel as a separate image
                                w_img = array_ops.transpose(w_img,
                                                            perm=[2, 0, 1])
                                shape = K.int_shape(w_img)
                            w_img = array_ops.reshape(
                                w_img, [shape[0], shape[1], shape[2], 1])
                        elif len(shape) == 1:  # bias case
                            w_img = array_ops.reshape(w_img,
                                                      [1, shape[0], 1, 1])
                        else:
                            # not possible to handle 3D convnets etc.
                            continue

                        shape = K.int_shape(w_img)
                        assert len(shape) == 4 and shape[-1] in [1, 3, 4]
                        tf_summary.image(mapped_weight_name, w_img)

                if hasattr(layer, 'output'):
                    tf_summary.histogram('{}_out'.format(layer.name),
                                         layer.output)
        self.merged = tf_summary.merge_all()

        if self.write_graph:
            self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
        else:
            self.writer = tf_summary.FileWriter(self.log_dir)
Esempio n. 3
0
    def __call__(self, inputs):
        assert isinstance(inputs, list)

        # Strip sample weight from inputs
        if (self.execution_mode == model_fn_lib.ModeKeys.TRAIN
                or self.execution_mode == model_fn_lib.ModeKeys.EVAL):
            input_tensors = self.model._feed_inputs + self.model._feed_targets
            inputs = inputs[:len(input_tensors)]
        else:
            input_tensors = self.model._feed_inputs

        shard_inputs = self._split_tensors(inputs)
        del inputs  # To avoid accident usage.

        # Compute an input specification (used to generate infeed enqueue and
        # dequeue operations).  We use the shape from our input array and the
        # dtype from our model.  A user may pass in a float64 for a float32
        # input: for model compatibility we still must generate a float32 infeed.
        input_specs = []

        # We use the shape and dtype from the first shard to compute the input
        # metadata (`input_specs`); all replicas have the same type and shape.
        for tensor, ary in zip(input_tensors, shard_inputs[0]):
            input_specs.append(
                tensor_spec.TensorSpec(ary.shape, tensor.dtype,
                                       _valid_name(tensor.name)))

        # XLA requires every operation in the graph has a fixed shape.  To
        # handle varying batch sizes we recompile a new sub-graph for each
        # unique input shape.
        shape_key = tuple(
            [tuple(spec.shape.as_list()) for spec in input_specs])

        if shape_key not in self._compilation_cache:
            logging.info('New input shapes; (re-)compiling: mode=%s, %s',
                         self.execution_mode, input_specs)
            new_tpu_model_ops = self._specialize_model(input_specs)
            self._compilation_cache[shape_key] = new_tpu_model_ops
            self._test_model_compiles(new_tpu_model_ops)

        tpu_model_ops = self._compilation_cache[shape_key]

        infeed_dict = {}
        for infeed_tensors, inputs in zip(tpu_model_ops.infeed_tensors,
                                          shard_inputs):
            for tensor, value in zip(infeed_tensors, inputs):
                infeed_dict[tensor] = value

        session = K.get_session()
        _, _, outfeed_outputs = session.run([
            tpu_model_ops.infeed_op, tpu_model_ops.execute_op,
            tpu_model_ops.outfeed_op
        ], infeed_dict)

        # TODO(xiejw): Decide how to reduce outputs, or just discard all but first.
        return outfeed_outputs[:len(outfeed_outputs) // self.num_replicas]
Esempio n. 4
0
	def __init__(self,configuration,content_image,style_image):

		self.conf = configuration
		self.style_im = tf.Variable(self._loadImage(style_image))
		self.content_im = tf.Variable(self._loadImage(content_image))

		self.createModel()
		self.createLoss()

		# Important line. For using VGG16+imagenet weights we need to load
		# the session. If we don't and we do global_variables_initializer
		# the weights will be randomly initialized.
		self.sess = K.get_session()
Esempio n. 5
0
  def set_model(self, model):
    self.model = model
    self.sess = K.get_session()
    if self.histogram_freq and self.merged is None:
      for layer in self.model.layers:
        for weight in layer.weights:
          mapped_weight_name = weight.name.replace(':', '_')
          tf_summary.histogram(mapped_weight_name, weight)
          if self.write_grads:
            grads = model.optimizer.get_gradients(model.total_loss, weight)

            def is_indexed_slices(grad):
              return type(grad).__name__ == 'IndexedSlices'

            grads = [grad.values if is_indexed_slices(grad) else grad
                     for grad in grads]
            tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
          if self.write_images:
            w_img = array_ops.squeeze(weight)
            shape = K.int_shape(w_img)
            if len(shape) == 2:  # dense layer kernel case
              if shape[0] > shape[1]:
                w_img = array_ops.transpose(w_img)
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
            elif len(shape) == 3:  # convnet case
              if K.image_data_format() == 'channels_last':
                # switch to channels_first to display
                # every kernel as a separate image
                w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img,
                                        [shape[0], shape[1], shape[2], 1])
            elif len(shape) == 1:  # bias case
              w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
            else:
              # not possible to handle 3D convnets etc.
              continue

            shape = K.int_shape(w_img)
            assert len(shape) == 4 and shape[-1] in [1, 3, 4]
            tf_summary.image(mapped_weight_name, w_img)

        if hasattr(layer, 'output'):
          tf_summary.histogram('{}_out'.format(layer.name), layer.output)
    self.merged = tf_summary.merge_all()

    if self.write_graph:
      self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
    else:
      self.writer = tf_summary.FileWriter(self.log_dir)
def shutdown_tpu_session(session=None):
    """Shutdown the TPU attached to session.

  This should be called to cleanly shut down the TPU system before the client
  exits.

  Args:
    session: Session to shutdown, or None to use the default session.

  Returns:

  """
    if session is None:
        session = K.get_session()

    session.run(tpu.shutdown_system())
Esempio n. 7
0
def shutdown_tpu_session(session=None):
  """Shutdown the TPU attached to session.

  This should be called to cleanly shut down the TPU system before the client
  exits.

  Args:
    session: Session to shutdown, or None to use the default session.

  Returns:

  """
  if session is None:
    session = K.get_session()

  session.run(tpu.shutdown_system())
    def __call__(self, inputs):
        assert isinstance(inputs, list)

        # Strip sample weight from inputs
        if (self.execution_mode == model_fn_lib.ModeKeys.TRAIN
                or self.execution_mode == model_fn_lib.ModeKeys.EVAL):
            input_tensors = self.model._feed_inputs + self.model._feed_targets
            inputs = inputs[:len(input_tensors)]
        else:
            input_tensors = self.model._feed_inputs

        # Compute an input specification (used to generate infeed enqueue and
        # dequeue operations).  We use the shape from our input array and the
        # dtype from our model.  A user may pass in a float64 for a float32
        # input: for model compatibility we still must generate a float32 infeed.
        input_specs = []
        for tensor, ary in zip(input_tensors, inputs):
            input_specs.append(
                tensor_spec.TensorSpec(ary.shape, tensor.dtype,
                                       _valid_name(tensor.name)))

        # XLA requires every operation in the graph has a fixed shape.  To
        # handle varying batch sizes we recompile a new sub-graph for each
        # unique input shape.
        shape_key = tuple(
            [tuple(spec.shape.as_list()) for spec in input_specs])

        if shape_key not in self._compilation_cache:
            logging.info('New input shapes; (re-)compiling: mode=%s, %s',
                         self.execution_mode, input_specs)
            self._compilation_cache[shape_key] = self._specialize_model(
                input_specs)

        compiled_model = self._compilation_cache[shape_key]

        infeed_dict = {}
        for tensor, value in zip(compiled_model.infeed_tensors, inputs):
            infeed_dict[tensor] = value

        session = K.get_session()
        _, _, outfeed_outputs = session.run([
            compiled_model.infeed_op, compiled_model.tpu_execute_op,
            compiled_model.outfeed_op
        ], infeed_dict)

        return outfeed_outputs
Esempio n. 9
0
    def _test_model_compiles(self, tpu_model_ops):
        """Verifies that the given TPUModelOp can be compiled via XLA."""
        session = K.get_session()

        logging.info('Started compiling')
        start_time = time.clock()

        result = session.run(tpu_model_ops.compile_op)
        proto = tpu_compilation_result.CompilationResultProto()
        proto.ParseFromString(result)
        if proto.status_error_message:
            raise RuntimeError('Compilation failed: {}'.format(
                proto.status_error_message))

        end_time = time.clock()
        logging.info('Finished compiling. Time elapsed: %s secs',
                     end_time - start_time)
Esempio n. 10
0
  def __call__(self, inputs):
    assert isinstance(inputs, list)

    # Strip sample weight from inputs
    if (self.execution_mode == model_fn_lib.ModeKeys.TRAIN or
        self.execution_mode == model_fn_lib.ModeKeys.EVAL):
      input_tensors = self.model._feed_inputs + self.model._feed_targets
      inputs = inputs[:len(input_tensors)]
    else:
      input_tensors = self.model._feed_inputs

    # Compute an input specification (used to generate infeed enqueue and
    # dequeue operations).  We use the shape from our input array and the
    # dtype from our model.  A user may pass in a float64 for a float32
    # input: for model compatibility we still must generate a float32 infeed.
    input_specs = []
    for tensor, ary in zip(input_tensors, inputs):
      input_specs.append(
          tensor_spec.TensorSpec(ary.shape, tensor.dtype,
                                 _valid_name(tensor.name)))

    # XLA requires every operation in the graph has a fixed shape.  To
    # handle varying batch sizes we recompile a new sub-graph for each
    # unique input shape.
    shape_key = tuple([tuple(spec.shape.as_list()) for spec in input_specs])

    if shape_key not in self._compilation_cache:
      logging.info('New input shapes; (re-)compiling: mode=%s, %s',
                   self.execution_mode, input_specs)
      self._compilation_cache[shape_key] = self._specialize_model(input_specs)

    compiled_model = self._compilation_cache[shape_key]

    infeed_dict = {}
    for tensor, value in zip(compiled_model.infeed_tensors, inputs):
      infeed_dict[tensor] = value

    session = K.get_session()
    _, _, outfeed_outputs = session.run([
        compiled_model.infeed_op, compiled_model.tpu_execute_op,
        compiled_model.outfeed_op
    ], infeed_dict)

    return outfeed_outputs
    def _specialize_model(self, input_specs):
        """Specialize `self.model` (a Keras model) for the given input shapes."""
        # Re-create our input and output layers inside our subgraph.  They will be
        # attached to the true computation when we clone our model in `tpu_fn`.
        K.set_learning_phase(
            self.execution_mode == model_fn_lib.ModeKeys.TRAIN)

        # functools.partial and callable objects are not supported by tpu.rewrite
        def _model_fn():
            """Compute fit/eval/predict for the TPU."""
            is_training = self.execution_mode == model_fn_lib.ModeKeys.TRAIN
            is_test = self.execution_mode == model_fn_lib.ModeKeys.EVAL
            is_predict = self.execution_mode == model_fn_lib.ModeKeys.PREDICT

            # During train/eval, we infeed our features as well as labels.
            if is_training or is_test:
                infeed_layers = self.model._input_layers + self.model._output_layers
            else:
                infeed_layers = self.model._input_layers

            # Generate our infeed operation to read features & labels.
            infeed_tensors = tpu_ops.infeed_dequeue_tuple(
                dtypes=[spec.dtype for spec in input_specs],
                shapes=[spec.shape for spec in input_specs],
                name='infeed-%s' % self.execution_mode)

            assert len(infeed_tensors) == len(infeed_layers), (
                'Infeed inputs did not match model: %s vs %s',
                (infeed_layers, infeed_tensors))

            tpu_targets = []
            tpu_inputs = []

            # Sort infeed outputs into inputs and labels for calling our Keras model.
            for tensor, layer in zip(infeed_tensors, infeed_layers):
                if layer in self.model._input_layers:
                    tpu_inputs.append(
                        layers.Input(name=layer.name, tensor=tensor))
                if layer in self.model._output_layers:
                    tpu_targets.append(tensor)

            optimizer = self.model.optimizer
            optimizer.iterations = training_util.get_or_create_global_step()

            # Call our model with our infeed inputs (re-using the weights).
            model_outputs = self.model(tpu_inputs)
            child_model = models.Model(inputs=tpu_inputs,
                                       outputs=model_outputs)
            if is_training or is_test:
                child_model.compile(
                    optimizer=self.model.optimizer,
                    loss=self.model.loss,
                    loss_weights=self.model.loss_weights,
                    metrics=self.model.metrics,
                    weighted_metrics=self.model.weighted_metrics,
                    target_tensors=tpu_targets,
                )

            # Compute our outfeed depending on the execution mode
            if is_training:
                child_model._make_train_function()
                self._outfeed_spec = [
                    tensor_spec.TensorSpec(tensor.shape, tensor.dtype,
                                           tensor.name)
                    for tensor in child_model.train_function.outputs
                ]
                return [
                    child_model.train_function.updates_op,
                    tpu_ops.outfeed_enqueue_tuple(
                        child_model.train_function.outputs,
                        name='oufeed-enqueue-train')
                ]
            elif is_test:
                child_model._make_test_function()
                self._outfeed_spec = [
                    tensor_spec.TensorSpec(tensor.shape, tensor.dtype,
                                           tensor.name)
                    for tensor in child_model.test_function.outputs
                ]
                return [
                    tpu_ops.outfeed_enqueue_tuple(
                        child_model.test_function.outputs,
                        name='outfeed-enqueue-test')
                ]
            elif is_predict:
                child_model._make_predict_function()
                self._outfeed_spec = [
                    tensor_spec.TensorSpec(tensor.shape, tensor.dtype,
                                           tensor.name)
                    for tensor in child_model.predict_function.outputs
                ]
                return [
                    tpu_ops.outfeed_enqueue_tuple(
                        child_model.predict_function.outputs,
                        name='outfeed-enqueue-predict',
                    )
                ]
            else:
                assert False, 'Unexpected execution mode: %s' % self.execution_mode

        # Capture outfeed metadata computed during the rewrite.
        self._outfeed_spec = None

        tpu_execute_op = tpu.rewrite(_model_fn)

        K._initialize_variables(
            K.get_session())  # pylint-disable: protected-access

        # Generate CPU side operations to enqueue features/labels and dequeue
        # outputs from the model call.
        with ops.device('/device:TPU:0'):
            infeed_tensors = []
            for spec in input_specs:
                infeed_tensors.append(
                    array_ops.placeholder(dtype=spec.dtype,
                                          shape=spec.shape,
                                          name='infeed-enqueue-%s' %
                                          spec.name))

            infeed_op = tpu_ops.infeed_enqueue_tuple(
                infeed_tensors, [spec.shape for spec in input_specs],
                name='infeed-enqueue-%s' % self.execution_mode)

            outfeed_op = tpu_ops.outfeed_dequeue_tuple(
                dtypes=[spec.dtype for spec in self._outfeed_spec],
                shapes=[spec.shape for spec in self._outfeed_spec],
                name='outfeed-dequeue-%s' % self.execution_mode)

        return CompiledTPUOp(tpu_execute_op, infeed_tensors, infeed_op,
                             outfeed_op)
Esempio n. 12
0
def _get_available_devices():
    return [x.name for x in K.get_session().list_devices()]
Esempio n. 13
0
def _get_available_devices():
  return [x.name for x in K.get_session().list_devices()]
# load mudules
import tensorflow as tf
import numpy as np
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import urllib, cStringIO
import os

# load the model
model = tf.keras.applications.ResNet50()

# load compute_margin class
from compute_margin import margin
from tensorflow.python.keras._impl.keras import backend as K
m = margin(tf.log(model.output), model.input, K.get_session(), [K.learning_phase()])

# open the file
i = 0
#f = open('/mnt/nfs/nfsshare/user_homes/zybill/imagenet_data/fall11_urls.txt')
f = open('./imagenet_data/fall11_urls.txt')

# make the write directory
#write_dir = '/mnt/nfs/nfsshare/user_homes/zybill/results_imagenet/'
write_dir = './results/imagenet_adversarials/'
if not os.path.exists(write_dir):
    os.makedirs(write_dir)

# run compute margin
input_shape = (224, 224)
for i in xrange(100):