Exemple #1
0
    def _train(self):
        tf.disable_eager_execution()
        ps_tasks = 0
        worker_replicas = 1
        worker_job_name = 'lonely_worker'
        task = 0
        is_chief = True
        master = ''
        graph_rewriter_fn = None
        # loading and reading  the config file
        configs = create_configs_from_pipeline_proto(self.pipeline)
        model_config = configs['model']
        train_config = configs['train_config']
        input_config = configs['train_input_config']
        # creating the tf object detection api model (from the config parameters)
        model_fn = functools.partial(model_builder.build, model_config=model_config, is_training=True)

        def get_next(config):
            return dataset_builder.make_initializable_iterator(dataset_builder.build(config)).get_next()

        create_input_dict_fn = functools.partial(get_next, input_config)
        if 'graph_rewriter_config' in configs:
            graph_rewriter_fn = graph_rewriter_builder.build(configs['graph_rewriter_config'], is_training=True)
        # training the model with the new parameters
        trainer.train(create_input_dict_fn, model_fn, train_config, master, task, 1, worker_replicas, False, ps_tasks,
                      worker_job_name, is_chief, str(self._out_folder), graph_hook_fn=graph_rewriter_fn)
Exemple #2
0
 def setUp(self) -> None:
     import tensorflow as tf
     tf.reset_default_graph()
     if "_eager" in self._testMethodName:
         tf.enable_eager_execution()
     else:
         tf.disable_eager_execution()
Exemple #3
0
    def _setup_tfgraph(*args):
        import tensorflow as tf
        tf.disable_eager_execution()
        tf.reset_default_graph()
        from delira.models import AbstractTfGraphNetwork
        from delira.training.backends.tf_graph.utils import \
            initialize_uninitialized

        class Model(AbstractTfGraphNetwork):
            def __init__(self):
                super().__init__()
                self.dense = tf.keras.layers.Dense(1, activation="relu")

                data = tf.placeholder(shape=[None, 1], dtype=tf.float32)

                labels = tf.placeholder_with_default(tf.zeros(
                    [tf.shape(data)[0], 1]),
                                                     shape=[None, 1])

                preds_train = self.dense(data)
                preds_eval = self.dense(data)

                self.inputs["data"] = data
                self.inputs["labels"] = labels
                self.outputs_train["pred"] = preds_train
                self.outputs_eval["pred"] = preds_eval

        model = Model()
        initialize_uninitialized(model._sess)
        return model
Exemple #4
0
def main(unused_args):
  # Eager execution is enabled by default in TF 2.0, but generated example
  # tests are still using non-eager features (e.g. `tf.placeholder`).
  tf.disable_eager_execution()

  options = generate_examples_lib.Options()

  options.output_path = FLAGS.output_path
  options.zip_to_output = FLAGS.zip_to_output
  options.toco = FLAGS.toco
  options.known_bugs_are_errors = FLAGS.known_bugs_are_errors
  options.ignore_converter_errors = FLAGS.ignore_converter_errors
  options.save_graphdefs = FLAGS.save_graphdefs
  options.run_with_flex = FLAGS.run_with_flex
  options.make_edgetpu_tests = FLAGS.make_edgetpu_tests
  options.make_forward_compat_test = FLAGS.make_forward_compat_test
  options.tflite_convert_function = toco_convert.toco_convert
  options.no_tests_limit = FLAGS.no_tests_limit
  options.no_conversion_report = FLAGS.no_conversion_report

  if FLAGS.test_sets:
    test_sets = FLAGS.test_sets.split(",")
    generate_examples_lib.generate_multi_set_examples(options, test_sets)
  else:
    generate_examples_lib.generate_examples(options)
Exemple #5
0
    def _init_class_grads(self, label=None):
        # pylint: disable=E0401
        import tensorflow as tf
        if tf.__version__[0] == '2':
            import tensorflow.compat.v1 as tf
            tf.disable_eager_execution()

        if not hasattr(self, '_class_grads'):
            self._class_grads = [None for _ in range(self.nb_classes())]

        # Construct the class gradients graph
        if label is None:
            if None in self._class_grads:
                self._class_grads = [
                    tf.gradients(self._output[:, i], self._input_ph)[0]
                    for i in range(self.nb_classes())
                ]

        elif isinstance(label, int):
            if self._class_grads[label] is None:
                self._class_grads[label] = tf.gradients(
                    self._output[:, label], self._input_ph)[0]

        else:
            for unique_label in np.unique(label):
                if self._class_grads[unique_label] is None:
                    self._class_grads[unique_label] = tf.gradients(
                        self._output[:, unique_label], self._input_ph)[0]
Exemple #6
0
    def __init__(self, model, config):
        import tensorflow.compat.v1 as tf
        tf.disable_eager_execution()
        self.config = config
        self.model = model
        self.net_input_dim = config['net_input_dim']
        self.att_input_dim = config['att_input_dim']
        self.net_shape = config['net_shape']
        self.att_shape = config['att_shape']
        self.drop_prob = config['drop_prob']
        self.beta = config['beta']
        self.gamma = config['gamma']
        self.alpha = config['alpha']
        self.learning_rate = config['learning_rate']
        self.batch_size = config['batch_size']
        self.num_epochs = config['num_epochs']
        self.model_path = config['model_path']

        self.x = tf.placeholder(tf.float32, [None, self.net_input_dim])
        self.z = tf.placeholder(tf.float32, [None, self.att_input_dim])
        self.w = tf.placeholder(tf.float32, [None, None])

        self.neg_x = tf.placeholder(tf.float32, [None, self.net_input_dim])
        self.neg_z = tf.placeholder(tf.float32, [None, self.att_input_dim])
        self.neg_w = tf.placeholder(tf.float32, [None, None])

        self.optimizer, self.loss = self._build_training_graph()
        self.net_H, self.att_H, self.H = self._build_eval_graph()

        gpu_config = tf.ConfigProto()
        gpu_config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=gpu_config)
        self.sess.run(tf.global_variables_initializer())
        self.saver = tf.train.Saver()
Exemple #7
0
 def initialize_variables(self):
     import tensorflow as tf
     if tf.__version__[0] == '2':
         print('Adjusting for tensorflow 2.0')
         tf = tf.compat.v1
         tf.disable_eager_execution()
     self._session.run(tf.global_variables_initializer())
     self.saver = tf.train.Saver(max_to_keep=100, allow_empty=True)
Exemple #8
0
    def run(self):
        src_path = os.path.abspath('./phi/tf/cuda/src')
        build_path = os.path.abspath('./phi/tf/cuda/build')
        print('Source Path:\t' + src_path)
        print('Build Path:\t' + build_path)

        # Get TF Compile/Link Flags and write to env
        import tensorflow as tf
        if tf.__version__[0] == '2':
            print('Adjusting for tensorflow 2.0')
            tf = tf.compat.v1
            tf.disable_eager_execution()
        tf_cflags = tf.sysconfig.get_compile_flags()
        tf_lflags = tf.sysconfig.get_link_flags()

        # Remove old build files
        if os.path.isdir(build_path):
            print('Removing old build files from %s' % build_path)
            for file in os.listdir(build_path):
                os.remove(os.path.join(build_path, file))
        else:
            print('Creating build directory at %s' % build_path)
            os.mkdir(build_path)

        print('Compiling CUDA code...')
        # Build the Laplace Matrix Generation CUDA Kernels
        subprocess.check_call([
            self.nvcc, '-std=c++11', '-c', '-o',
            os.path.join(build_path, 'laplace_op.cu.o'),
            os.path.join(src_path, 'laplace_op.cu.cc'), '-x', 'cu',
            '-Xcompiler', '-fPIC'
        ] + tf_cflags)

        # Build the Laplace Matrix Generation Custom Op
        # This is only needed for the Laplace Matrix Generation Benchmark
        subprocess.check_call([
            self.gcc, '-std=c++11', '-shared', '-o',
            os.path.join(build_path, 'laplace_op.so'),
            os.path.join(src_path, 'laplace_op.cc'),
            os.path.join(build_path, 'laplace_op.cu.o'), '-fPIC'
        ] + tf_cflags + tf_lflags)

        # Build the Pressure Solver CUDA Kernels
        subprocess.check_call([
            self.nvcc, '-std=c++11', '-c', '-lcublas', '-o',
            os.path.join(build_path, 'pressure_solve_op.cu.o'),
            os.path.join(src_path, 'pressure_solve_op.cu.cc'), '-x', 'cu',
            '-Xcompiler', '-fPIC'
        ] + tf_cflags)

        # Build the Pressure Solver Custom Op
        subprocess.check_call([
            self.gcc, '-std=c++11', '-shared', '-o',
            os.path.join(build_path, 'pressure_solve_op.so'),
            os.path.join(src_path, 'pressure_solve_op.cc'),
            os.path.join(build_path, 'pressure_solve_op.cu.o'),
            os.path.join(build_path, 'laplace_op.cu.o'), '-fPIC'
        ] + tf_cflags + tf_lflags)
Exemple #9
0
    def get_activations(self, x, layer, batch_size=128):
        """
        Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and
        `nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by
        calling `layer_names`.

        :param x: Input for computing the activations.
        :type x: `np.ndarray`
        :param layer: Layer for computing the activations
        :type layer: `int` or `str`
        :param batch_size: Size of batches.
        :type batch_size: `int`
        :return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.
        :rtype: `np.ndarray`
        """
        # pylint: disable=E0401
        import tensorflow as tf
        if tf.__version__[0] == '2':
            import tensorflow.compat.v1 as tf
            tf.disable_eager_execution()

        # Get the computational graph
        with self._sess.graph.as_default():
            graph = tf.get_default_graph()

        if isinstance(layer, six.string_types):  # basestring for Python 2 (str, unicode) support
            if layer not in self._layer_names:
                raise ValueError("Layer name %s is not part of the graph." % layer)
            layer_tensor = graph.get_tensor_by_name(layer)

        elif isinstance(layer, (int, np.integer)):
            layer_tensor = graph.get_tensor_by_name(self._layer_names[layer])

        else:
            raise TypeError("Layer must be of type `str` or `int`. Received %s" % layer)

        # Apply preprocessing
        x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)

        # Run prediction with batch processing
        results = []
        num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))
        for m in range(num_batch):
            # Batch indexes
            begin, end = m * batch_size, min((m + 1) * batch_size, x_preprocessed.shape[0])

            # Create feed_dict
            feed_dict = {self._input_ph: x_preprocessed[begin:end]}
            feed_dict.update(self._feed_dict)

            # Run prediction for the current batch
            layer_output = self._sess.run(layer_tensor, feed_dict=feed_dict)
            results.append(layer_output)

        results = np.concatenate(results)

        return results
Exemple #10
0
    def __init__(self, sess, iterator, iterator_type, iterator_arg, size,
                 batch_size):
        """
        Create a data generator wrapper for TensorFlow. Supported iterators: initializable, reinitializable, feedable.

        :param sess: TensorFlow session.
        :type sess: `tf.Session`
        :param iterator: Data iterator from TensorFlow.
        :type iterator: `tensorflow.python.data.ops.iterator_ops.Iterator`
        :param iterator_type: Type of the iterator. Supported types: `initializable`, `reinitializable`, `feedable`.
        :type iterator_type: `string`
        :param iterator_arg: Argument to initialize the iterator. It is either a feed_dict used for the initializable
        and feedable mode, or an init_op used for the reinitializable mode.
        :type iterator_arg: `dict`, `tuple` or `tensorflow.python.framework.ops.Operation`
        :param size: Total size of the dataset.
        :type size: `int`
        :param batch_size: Size of the minibatches.
        :type batch_size: `int`
        :raises: `TypeError`, `ValueError`
        """
        # pylint: disable=E0401
        import tensorflow as tf
        if tf.__version__[0] == '2':
            import tensorflow.compat.v1 as tf
            tf.disable_eager_execution()

        super(TFDataGenerator, self).__init__(size=size, batch_size=batch_size)
        self.sess = sess
        self.iterator = iterator
        self.iterator_type = iterator_type
        self.iterator_arg = iterator_arg

        if not isinstance(iterator, tf.data.Iterator):
            raise TypeError("Only support object tf.data.Iterator")

        if iterator_type == 'initializable':
            if not isinstance(iterator_arg, dict):
                raise TypeError(
                    "Need to pass a dictionary for iterator type %s" %
                    iterator_type)
        elif iterator_type == 'reinitializable':
            if not isinstance(iterator_arg, tf.Operation):
                raise TypeError(
                    "Need to pass a tensorflow operation for iterator type %s"
                    % iterator_type)
        elif iterator_type == 'feedable':
            if not isinstance(iterator_arg, tuple):
                raise TypeError("Need to pass a tuple for iterator type %s" %
                                iterator_type)
        else:
            raise TypeError("Iterator type %s not supported" % iterator_type)
Exemple #11
0
    def __init__(self, shape, para, data, layer_idx, activation_fun1,
                 activation_fun2):
        import tensorflow.compat.v1 as tf
        tf.disable_eager_execution()
        self.para = para
        self.data = data
        self.layer_idx = layer_idx

        self.x = tf.placeholder(tf.float32, [None, shape[0]])
        self.dropout = tf.placeholder(tf.float32)
        self.lr = tf.placeholder(tf.float32)

        self.var_list = []
        # 设定变量作用域SAE
        with tf.variable_scope('SAE') as scope:
            stddev = 1.0 / np.sqrt(shape[0])
            # self.x_c = gaussian_noise_layer(self.x, 0.001)
            # Dropout就是在不同的训练过程中随机扔掉一部分神经元。
            # 也就是让某个神经元的激活值以一定的概率p,让其停止工作,这次训练过程中不更新权值,也不参加神经网络的计算。
            # 但是它的权重得保留下来(只是暂时不更新而已),因为下次样本输入时它可能又得工作了
            self.x_c = tf.nn.dropout(self.x, self.dropout)
            self.W1 = tf.Variable(tf.random_normal([shape[0], shape[1]],
                                                   stddev=stddev),
                                  name="W1")
            self.b1 = tf.Variable(tf.zeros([shape[1]], name='b1'))
            self.h = tf.add(tf.matmul(self.x_c, self.W1), self.b1)
            if activation_fun1 != None:
                self.h = activation_fun1(self.h)
            self.h = tf.nn.dropout(self.h, self.dropout)

            stddev = 1.0 / np.sqrt(shape[1])
            self.W2 = tf.Variable(tf.random_normal([shape[1], shape[0]],
                                                   stddev=stddev),
                                  name='W2')
            self.b2 = tf.Variable(tf.zeros([shape[0]], name='b2'))
            self.x_hat = tf.add(tf.matmul(self.h, self.W2), self.b2)
            if activation_fun2 != None:
                self.x_hat = activation_fun2(self.x_hat)

            self.var_list.extend([self.W1, self.b1, self.W2, self.b2])

        self.loss = tf.reduce_mean(tf.square(self.x - self.x_hat))
        # self.loss = -tf.reduce_mean(self.x * tf.log(self.x_hat)+(1-self.x)*tf.log(1-self.x_hat))
        self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(
            loss=self.loss, var_list=self.var_list)

        gpu_config = tf.ConfigProto()
        gpu_config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=gpu_config)
        self.sess.run(tf.global_variables_initializer())
Exemple #12
0
    def _get_layers(self):
        """
        Return the hidden layers in the model, if applicable.

        :return: The hidden layers in the model, input and output layers excluded.
        :rtype: `list`
        """
        # pylint: disable=E0401
        import tensorflow as tf
        if tf.__version__[0] == '2':
            import tensorflow.compat.v1 as tf
            tf.disable_eager_execution()

        # Get the computational graph
        with self._sess.graph.as_default():
            graph = tf.get_default_graph()

        # Get the list of operators and heuristically filter them
        tmp_list = []
        ops = graph.get_operations()

        # pylint: disable=R1702
        for op in ops:
            if op.values():
                if op.values()[0].get_shape() is not None:
                    if op.values()[0].get_shape().ndims is not None:
                        if len(op.values()[0].get_shape().as_list()) > 1:
                            if op.values()[0].get_shape().as_list()[0] is None:
                                if op.values()[0].get_shape().as_list(
                                )[1] is not None:
                                    if not op.values()[0].name.startswith(
                                            "gradients"):
                                        if not op.values()[0].name.startswith(
                                                "softmax_cross_entropy_loss"):
                                            if not op.type == "Placeholder":
                                                tmp_list.append(
                                                    op.values()[0].name)

        # Shorten the list
        if not tmp_list:
            return tmp_list

        result = [tmp_list[-1]]
        for name in reversed(tmp_list[:-1]):
            if result[0].split("/")[0] != name.split("/")[0]:
                result = [name] + result
        logger.info('Inferred %i hidden layers on TensorFlow classifier.',
                    len(result))

        return result
Exemple #13
0
    def test_load_save(self):
        import tensorflow as tf
        tf.disable_eager_execution()
        from delira.io.tf import load_checkpoint, save_checkpoint
        from delira.models import AbstractTfGraphNetwork
        from delira.training.backends import initialize_uninitialized

        import numpy as np

        class DummyNetwork(AbstractTfGraphNetwork):
            def __init__(self, in_channels, n_outputs):
                super().__init__(in_channels=in_channels, n_outputs=n_outputs)
                self.net = self._build_model(in_channels, n_outputs)

            @staticmethod
            def _build_model(in_channels, n_outputs):
                return tf.keras.models.Sequential(
                    layers=[
                        tf.keras.layers.Dense(
                            64,
                            input_shape=in_channels,
                            bias_initializer='glorot_uniform'),
                        tf.keras.layers.ReLU(),
                        tf.keras.layers.Dense(
                            n_outputs,
                            bias_initializer='glorot_uniform')])

        net = DummyNetwork((32,), 1)
        initialize_uninitialized(net._sess)

        vars_1 = net._sess.run(tf.global_variables())

        save_checkpoint("./model", model=net)

        net._sess.run(tf.initializers.global_variables())

        vars_2 = net._sess.run(tf.global_variables())

        load_checkpoint("./model", model=net)

        vars_3 = net._sess.run(tf.global_variables())

        for var_1, var_2 in zip(vars_1, vars_2):
            with self.subTest(var_1=var_1, var2=var_2):
                self.assertTrue(np.all(var_1 != var_2))

        for var_1, var_3 in zip(vars_1, vars_3):
            with self.subTest(var_1=var_1, var_3=var_3):
                self.assertTrue(np.all(var_1 == var_3))
Exemple #14
0
def main(argv):
    del argv  # Unused.
    tf.disable_eager_execution()
    weight = np.load(FLAGS.chainer_model_path)

    if FLAGS.keras_model_path == "":
        FLAGS.keras_model_path = FLAGS.chainer_model_path.replace(
            ".npz", "_Keras.h5")
    if FLAGS.tfjs_model_path == "":
        FLAGS.tfjs_model_path = FLAGS.chainer_model_path.replace(
            ".npz", "_tfjs")

    print('Tensorflow.js model path: ', FLAGS.tfjs_model_path)

    if FLAGS.arch == 'resnet128':
        get_generator = partial(get_resnet128_keras_generator,
                                input_dim=128,
                                ch=1024)
    elif FLAGS.arch == 'resnet256':
        get_generator = partial(get_resnet256_keras_generator,
                                input_dim=128,
                                ch=1024)
    elif FLAGS.arch == 'dcgan64':
        get_generator = partial(get_dcgan64_keras_generator,
                                input_dim=128,
                                ch=512)
    else:
        raise ValueError('Unknow --arch %s' % FLAGS.arch)

    generator = get_generator(weight=weight)
    print('Keras summary')
    generator.summary()
    logging.info('Saving keras model (weights) to %s', FLAGS.keras_model_path)
    generator.save_weights(FLAGS.keras_model_path)
    del generator
    # this avoids lambda initilizers in generator, which whould cause error in tfjs.
    generator = get_generator()
    generator.load_weights(FLAGS.keras_model_path)
    generator.save_weights(FLAGS.keras_model_path)

    logging.info('Saving tensorflow.js model to %s', FLAGS.tfjs_model_path)
    os.system('mkdir -p "%s"' % FLAGS.tfjs_model_path)
    tfjs.converters.save_keras_model(generator, FLAGS.tfjs_model_path)

    sample_output_dir = FLAGS.keras_model_path + '.sample'
    logging.info('Sampling images, saving to %s', sample_output_dir)
    os.system('mkdir -p "%s"' % sample_output_dir)
    for index in range(10):
        generate_images(generator, sample_output_dir, index)
Exemple #15
0
def show_tf_graph():
    tf.disable_eager_execution()  # TODO: This is TF v1 only
    d = ds_tf.frame_sequence_dataset('data')
    d_merged = ds_tf.frame_sequence_dataset('data', merge_channels=True)

    it = d.make_one_shot_iterator()
    ne = it.get_next()

    it = d_merged.make_one_shot_iterator()
    ne_merged = it.get_next()

    with tf.Session() as sess:
        for _ in range(3):
            image_pair, image_pair_444 = sess.run([ne, ne_merged])
            visualize(image_pair, image_pair_444)
Exemple #16
0
def show_tf_graph():
    tf.disable_eager_execution()
    d = ds_tf.frame_pairs_dataset('data')
    d_rgb = ds_tf.frame_pairs_dataset('data', merge_channels=True)

    it = d.make_one_shot_iterator()
    ne = it.get_next()

    it = d_rgb.make_one_shot_iterator()
    ne_rgb = it.get_next()

    with tf.Session() as sess:
        for _ in range(3):
            image_pair, image_pair_444 = sess.run([ne, ne_rgb])
            visualize(image_pair, image_pair_444)
Exemple #17
0
    def build_style_map_model(self, layers=[1, 4, 7], smooth=1, mean=1):
        import tensorflow as tf
        from netty import module_style as ms
        from netty import model_variational as mv
        from tensorflow.keras.layers import Input, Lambda
        from tensorflow.keras.models import Model

        tf.disable_eager_execution()
        tf.keras.backend.clear_session()

        vgg = ms.model_vgg.build({})
        vgg = ms.extract_layers(vgg, layers)
        mask_input = Input((None, None), name="MINPUT")
        style_input = Input((None, None, 3), name="SINPUT")
        render_input = Input((None, None, 3), name="RINPUT")

        def resize_fn(args):
            x = args[0]
            y = args[1]
            return tf.compat.v2.image.resize(tf.expand_dims(x, -1),
                                             tf.shape(y[0])[:2],
                                             antialias=True)[..., 0]

        resize = Lambda(resize_fn)
        apmask = ms.apply_mask()
        loss_layer = ms.loss_l(1 / len(vgg.outputs))
        gram_layer = ms.gram_l(0)
        mask_gram_layer = ms.mask_gram_l()

        vgg_a = vgg(style_input)
        vgg_b = vgg(render_input)

        losses = []
        for a, b in zip(vgg_a, vgg_b):
            mask = resize([mask_input, a])
            a = apmask([a, mask])
            loss = loss_layer([gram_layer(a), gram_layer(b)])
            losses.append(loss)
        loss = Lambda(lambda x: tf.expand_dims(tf.reduce_sum(x), 0))(losses)

        var = mv.build({"weight": smooth, "power": 1.25})
        var_loss = var(Lambda(lambda x: tf.expand_dims(x, -1))(mask_input))
        loss = Lambda(lambda x: x[0] + x[1])([loss, var_loss])
        mean_loss = Lambda(lambda x: tf.square(tf.reduce_mean(x) - 1) * mean)(
            mask_input)
        loss = Lambda(lambda x: x[0] + x[1])([loss, mean_loss])
        self.style_map_model = Model([mask_input, style_input, render_input],
                                     [loss])
    def _create_tfclassifier():
        """
        To create a simple TensorFlowClassifier for testing.
        :return:
        """
        import tensorflow as tf

        if tf.__version__[0] == "2":
            import tensorflow.compat.v1 as tf

            tf.disable_eager_execution()

        # Define input and output placeholders
        input_ph = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
        labels_ph = tf.placeholder(tf.int32, shape=[None, 10])

        # Define the TensorFlow graph
        conv = tf.layers.conv2d(input_ph, 4, 5, activation=tf.nn.relu)
        conv = tf.layers.max_pooling2d(conv, 2, 2)
        fc = tf.layers.flatten(conv)

        # Logits layer
        logits = tf.layers.dense(fc, 10)

        # Train operator
        loss = tf.reduce_mean(
            tf.losses.softmax_cross_entropy(logits=logits,
                                            onehot_labels=labels_ph))
        optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
        train = optimizer.minimize(loss)

        # TensorFlow session and initialization
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())

        # Create the classifier
        tfc = TensorFlowClassifier(
            input_ph=input_ph,
            output=logits,
            labels_ph=labels_ph,
            train=train,
            loss=loss,
            learning=None,
            sess=sess,
            clip_values=(0, 1),
        )

        return tfc
Exemple #19
0
    def solve(self, divergence, domain, pressure_guess):
        assert isinstance(domain, FluidDomain)
        active_mask = domain.active_tensor(extend=1)
        fluid_mask = domain.accessible_tensor(extend=1)
        dimensions = list(divergence.shape[1:-1])
        N = int(np.prod(dimensions))

        if math.choose_backend(divergence).matches_name('TensorFlow'):
            import tensorflow as tf
            if tf.__version__[0] == '2':
                print('Adjusting for tensorflow 2.0')
                tf = tf.compat.v1
                tf.disable_eager_execution()
            sidx, sorting = sparse_indices(dimensions)
            sval_data = sparse_values(dimensions, active_mask, fluid_mask,
                                      sorting)
            A = tf.SparseTensor(indices=sidx,
                                values=sval_data,
                                dense_shape=[N, N])
        else:
            A = sparse_pressure_matrix(dimensions, active_mask, fluid_mask)

        if self.autodiff:
            return sparse_cg(divergence,
                             A,
                             self.max_iterations,
                             pressure_guess,
                             self.accuracy,
                             back_prop=True)
        else:

            def pressure_gradient(op, grad):
                return sparse_cg(grad, A, max_gradient_iterations, None,
                                 self.gradient_accuracy)[0]

            pressure, iteration = math.with_custom_gradient(
                sparse_cg, [
                    divergence, A, self.max_iterations, pressure_guess,
                    self.accuracy
                ],
                pressure_gradient,
                input_index=0,
                output_index=0,
                name_base='scg_pressure_solve')

            max_gradient_iterations = iteration if self.max_gradient_iterations == 'mirror' else self.max_gradient_iterations
            return pressure, iteration
def run_eager_execution():
    tf.enable_eager_execution()

    tensor = tf.range(10)
    tf.print("tensors:", tensor, output_stream=sys.stdout)
    # tensors: [0 1 2 ... 7 8 9]

    print(np.square(tensor))
    # [ 0  1  4  9 16 25 36 49 64 81]

    tensor = tf.square(tensor)
    tf.print("tensors:", tensor, output_stream=sys.stdout)
    # tensors: [0 1 4 ... 49 64 81]

    tf.disable_eager_execution()

    return
Exemple #21
0
def test_pytorch_in_tensorflow_eager_mode():
    tf.enable_eager_execution()
    tfe = tf.contrib.eager

    def pytorch_expr(a, b):
        return 3 * a + 4 * b * b

    x = tfpyth.eager_tensorflow_from_torch(pytorch_expr)

    assert tf.math.equal(
        x(tf.convert_to_tensor(1.0), tf.convert_to_tensor(3.0)), 39.0)

    dx = tfe.gradients_function(x)
    assert all(
        tf.math.equal(dx(tf.convert_to_tensor(1.0), tf.convert_to_tensor(3.0)),
                      [3.0, 24.0]))
    tf.disable_eager_execution()
def normalise_imgs_in_dir(dir_name: str, target_max_length: int = 512) -> None:
    '''

    :param dir_name:            dir name of images to be normalised
    :param target_max_length:   images will be resize to target_max_length*target_max_length
    :return:                    None
    '''
    tf.enable_eager_execution()
    imgs = os.listdir(os.path.join(path_base, dir_name))
    for i in imgs:
        img = cv2.imread(os.path.join(path_base, dir_name,
                                      i)).astype(np.float32)
        result_img = tf.image.resize_image_with_pad(img, target_max_length,
                                                    target_max_length)
        result_img = np.array(result_img, dtype=np.uint8)
        cv2.imwrite(os.path.join(path_base, dir_name, i), result_img)
        print(i)
    tf.disable_eager_execution()
Exemple #23
0
    def setUp(self) -> None:
        if check_for_tf_graph_backend():
            import tensorflow as tf
            tf.disable_eager_execution()
            from delira.training import TfGraphExperiment

            config = DeliraConfig()
            config.fixed_params = {
                "model": {},
                "training": {
                    "losses": {
                        "CE":
                            tf.losses.softmax_cross_entropy},
                    "optimizer_cls": tf.train.AdamOptimizer,
                    "optimizer_params": {"learning_rate": 1e-3},
                    "num_epochs": 2,
                    "metrics": {"mae": mean_absolute_error},
                    "lr_sched_cls": None,
                    "lr_sched_params": {}}
            }
            model_cls = DummyNetworkTfGraph
            experiment_cls = TfGraphExperiment

        else:
            config = None
            model_cls = None
            experiment_cls = None

        len_train = 100
        len_test = 50

        self._test_cases = [
            {
                "config": config,
                "network_cls": model_cls,
                "len_train": len_train,
                "len_test": len_test,
                "key_mapping": {"data": "data"},
            }
        ]
        self._experiment_cls = experiment_cls

        super().setUp()
Exemple #24
0
def get_num_samples(record_dir):
    """
    get tfrecord numbers
    :param record_file:
    :return:
    """
    # check record file format
    # record_list = glob.glob(os.path.join(self.record_dir, '*.record'))
    file_pattern = os.path.join(record_dir, '*.record')
    input_files = tf.io.gfile.glob(file_pattern)
    num_samples = 0
    print("counting number of sample, please waiting...")
    # convert to dynamic mode
    tf.enable_eager_execution()
    for _ in tf.data.TFRecordDataset(input_files):
        num_samples += 1
    # recover to static mode
    tf.disable_eager_execution()
    return num_samples
Exemple #25
0
def load_graph(model_path):
    # check tf version to be compatible with TF 2.x
    global tf
    if tf.__version__.startswith('2'):
        import tensorflow.compat.v1 as tf
        tf.disable_eager_execution()

    # We parse the graph_def file
    with tf.gfile.GFile(model_path, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

    # We load the graph_def in the default graph
    with tf.Graph().as_default() as graph:
        tf.import_graph_def(graph_def,
                            input_map=None,
                            return_elements=None,
                            name="graph",
                            op_dict=None,
                            producer_op_list=None)
    return graph
Exemple #26
0
 def test_increase_frequency(self, name, flatten):
   tf.disable_eager_execution()
   x = np.array([1.0, 0.1123, 0.7463], dtype=np.float32)
   dim = 3
   # pylint:disable=bad-whitespace
   expected = [
       [0,        -1,        0,        1,         0,          1       ],
       [0.345528,  0.938409, 0.648492, 0.761221,  0.987292,   0.158916],
       [0.715278, -0.69884, -0.99973, -0.0232457, 0.0464788, -0.998919]
   ]
   # pylint:enable=bad-whitespace
   expected = np.array(expected, dtype=np.float32)
   if flatten:
     expected = np.reshape(expected, [-1])
   y = math_util.increase_frequency(
       tf.constant(x, dtype=tf.float32), dim, flatten=flatten, interleave=True)
   with self.session() as sess:
     out = sess.run(y)
   distance = float(np.sum(np.abs(expected - out)))
   self.assertLess(distance, FLOAT_DISTANCE_EPS,
                   f'Expected {expected} but got {out}')
Exemple #27
0
import contextlib
import logging
import os
import threading

import numpy as np
import tensorflow as tf
if tf.__version__[0] == '2':
    logging.info('Adjusting for tensorflow 2.0')
    tf = tf.compat.v1
    tf.disable_eager_execution()
from phi import struct

from .profiling import Timeliner
from .util import isplaceholder, istensor


class Session(object):
    def __init__(self, scene, session=None):
        self._scene = scene
        self._session = session if session is not None else tf.Session()
        assert self._session.graph == tf.get_default_graph(
        ), 'Session %s does not reference the current TensorFlow graph.'
        self.graph = tf.get_default_graph()
        self.summary_writers = {}
        self.summary_directory = os.path.abspath(
            scene.subpath('summary')) if scene is not None else None
        self.profiling_directory = scene.subpath(
            "profile") if scene is not None else None
        self.trace_count = 0
        self.saver = None
Exemple #28
0
    def __setstate__(self, state):
        """
        Use to ensure `TensorFlowClassifier` can be unpickled.

        :param state: State dictionary with instance parameters to restore.
        :type state: `dict`
        """
        self.__dict__.update(state)

        # Load and update all functionality related to TensorFlow
        # pylint: disable=E0611, E0401
        import os
        import tensorflow as tf
        if tf.__version__[0] == '2':
            import tensorflow.compat.v1 as tf
            tf.disable_eager_execution()
        from tensorflow.python.saved_model import tag_constants
        from art import DATA_PATH

        full_path = os.path.join(DATA_PATH, state['model_name'])

        graph = tf.Graph()
        sess = tf.Session(graph=graph)
        loaded = tf.saved_model.loader.load(sess, [tag_constants.SERVING],
                                            full_path)

        # Recover session
        self._sess = sess

        # Recover input_ph
        input_tensor_name = loaded.signature_def['predict'].inputs[
            'SavedInputPhD'].name
        self._input_ph = graph.get_tensor_by_name(input_tensor_name)

        # Recover output layer
        self._output = graph.get_tensor_by_name(state['_output'])

        # Recover labels' placeholder if any
        if state['_labels_ph'] is not None:
            self._labels_ph = graph.get_tensor_by_name(state['_labels_ph'])

        # Recover loss if any
        if state['_loss'] is not None:
            self._loss = graph.get_tensor_by_name(state['_loss'])

        # Recover loss_grads if any
        if state['_loss_grads']:
            self._loss_grads = graph.get_tensor_by_name(state['_loss_grads'])
        else:
            self.__dict__.pop('_loss_grads', None)

        # Recover learning if any
        if state['_learning'] is not None:
            self._learning = graph.get_tensor_by_name(state['_learning'])

        # Recover train if any
        if state['_train'] is not None:
            self._train = graph.get_operation_by_name(state['_train'])

        # Recover class_grads if any
        if state['_class_grads']:
            self._class_grads = [
                ts if ts is None else graph.get_tensor_by_name(ts)
                for ts in state['_class_grads']
            ]
        else:
            self.__dict__.pop('_class_grads', None)

        self.__dict__.pop('model_name', None)
Exemple #29
0
    def __init__(self,
                 input_ph,
                 output,
                 labels_ph=None,
                 train=None,
                 loss=None,
                 learning=None,
                 sess=None,
                 channel_index=3,
                 clip_values=None,
                 defences=None,
                 preprocessing=(0, 1)):
        """
        Initialization specific to TensorFlow models implementation.

        :param input_ph: The input placeholder.
        :type input_ph: `tf.Placeholder`
        :param output: The output layer of the model. This can be logits, probabilities or anything else. Logits
               output should be preferred where possible to ensure attack efficiency.
        :type output: `tf.Tensor`
        :param labels_ph: The labels placeholder of the model. This parameter is necessary when training the model and
               when computing gradients w.r.t. the loss function.
        :type labels_ph: `tf.Tensor`
        :param train: The train tensor for fitting, including an optimizer. Use this parameter only when training the
               model.
        :type train: `tf.Tensor`
        :param loss: The loss function for which to compute gradients. This parameter is necessary when training the
               model and when computing gradients w.r.t. the loss function.
        :type loss: `tf.Tensor`
        :param learning: The placeholder to indicate if the model is training.
        :type learning: `tf.Placeholder` of type bool.
        :param sess: Computation session.
        :type sess: `tf.Session`
        :param channel_index: Index of the axis in data containing the color channels or features.
        :type channel_index: `int`
        :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and
               maximum values allowed for features. If floats are provided, these will be used as the range of all
               features. If arrays are provided, each value will be considered the bound for a feature, thus
               the shape of clip values needs to match the total number of features.
        :type clip_values: `tuple`
        :param defences: Defences to be activated with the classifier.
        :type defences: `str` or `list(str)`
        :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
               used for data preprocessing. The first value will be subtracted from the input. The input will then
               be divided by the second one.
        :type preprocessing: `tuple`
        """
        # pylint: disable=E0401
        import tensorflow as tf
        if tf.__version__[0] == '2':
            import tensorflow.compat.v1 as tf
            tf.disable_eager_execution()

        super(TensorFlowClassifier, self).__init__(clip_values=clip_values,
                                                   channel_index=channel_index,
                                                   defences=defences,
                                                   preprocessing=preprocessing)
        self._nb_classes = int(output.get_shape()[-1])
        self._input_shape = tuple(input_ph.get_shape().as_list()[1:])
        self._input_ph = input_ph
        self._output = output
        self._labels_ph = labels_ph
        self._train = train
        self._loss = loss
        self._learning = learning
        self._feed_dict = {}

        # Assign session
        if sess is None:
            raise ValueError("A session cannot be None.")
        self._sess = sess

        # Get the internal layers
        self._layer_names = self._get_layers()

        # Get the loss gradients graph
        if self._loss is not None:
            self._loss_grads = tf.gradients(self._loss, self._input_ph)[0]
def test_tf_keras_eager_env(out_dir):
    tf.enable_eager_execution()
    train_model(out_dir, eager=False, steps=["train"])
    tf.disable_eager_execution()