Esempio n. 1
0
    def test_compute_densitity_network(self):
        """Verify density computation of the network defined in setUp().

        The test uses a pre-computed density over the network.

        We do not cover multiple cnn's coexisting in the graph, because
        TensorFlow adds some strange nodes if multiple grpahs are present.
        """
        tf.reset_default_graph()

        # Build an identical network, with a different scope.
        with tf.variable_scope(name_or_scope="cnn"):
            # Declare input
            input_placeholder = tf.placeholder(tf.float32,
                                               shape=(None, 28, 28, 1))

            # Do the parsing
            sequence_to_net(self.net_nsc, input_placeholder)

        # Computed beforehand
        target_density = 1.2756756756756757  # Before: 1.281767955801105
        # Assert the value
        self.assertEqual(
            compute_network_density(tf.get_default_graph(), "cnn"),
            target_density)
Esempio n. 2
0
    def test_sequence_to_net_concat_nonused(self):
        """Verify that the concatenation of non-used layers is correct."""
        tf.reset_default_graph()
        custom_graph_dir = self.graphs_dir + "/test02"
        if os.path.isdir(custom_graph_dir):
            shutil.rmtree(custom_graph_dir)

        with tf.variable_scope(name_or_scope="cnn"):
            net_nsc = [
                (1, 4, 0, 0, 0),  # Layer 1: Identity(input)
                (2, 1, 1, 1, 0),  # Layer 2: Convolution(Layer1)
                (3, 1, 3, 2, 0),  # Layer 3: Convolution(Layer2)
                (4, 1, 1, 1, 0),  # Layer 4: Convolution(Layer1)
                (5, 1, 5, 4, 0),  # Layer 5: Convolution(Layer4)
                (6, 2, 3, 1, 0),  # Layer 7: MaxPooling(Layer1)
                (7, 1, 1, 6, 0),  # Layer 8: Convolution(Layer7)
                (8, 7, 0, 0, 0),  # Layer 10: Terminal
            ]

            # Declare input
            input_placeholder = tf.placeholder(tf.float32,
                                               shape=(None, 28, 28, 1))

            # Do the parsing
            sequence_to_net(net_nsc, input_placeholder)

            # Save the graph
            file_writer = tf.summary.FileWriter(custom_graph_dir,
                                                tf.get_default_graph())
            file_writer.close()

            # graph = tf.get_default_graph()
        self.assertTrue(os.path.isdir(custom_graph_dir))
Esempio n. 3
0
    def test_sequence_to_net_addlayer(self):
        """Verify that the safe_add() method works correctly."""
        tf.reset_default_graph()
        custom_graph_dir = self.graphs_dir + "/test04"
        if os.path.isdir(custom_graph_dir):
            shutil.rmtree(custom_graph_dir)

        with tf.variable_scope(name_or_scope="cnn"):
            net_nsc = [
                (1, 4, 0, 0, 0),  # Layer 1: Identity(input)
                (2, 1, 1, 1, 0),  # Layer 2: Convolution(Layer1)
                (3, 1, 3, 2, 0),  # Layer 3: Convolution(Layer2)
                (4, 1, 1, 1, 0),  # Layer 4: Convolution(Layer1)
                (5, 1, 5, 4, 0),  # Layer 5: Convolution(Layer4)
                (6, 5, 0, 3, 5),  # Layer 6: Add(Layer3, Layer5)
                (7, 2, 3, 1, 0),  # Layer 7: MaxPooling(Layer1)
                (8, 1, 1, 7, 0),  # Layer 8: Convolution(Layer7)
                (9, 5, 0, 6, 8),  # Layer 9: Add(Layer6, Layer8)
                (10, 7, 0, 0, 0),  # Layer 10: Terminal
            ]

            # Declare input
            input_placeholder = tf.placeholder(tf.float32,
                                               shape=(None, 28, 28, 1))

            # Do the parsing
            sequence_to_net(net_nsc, input_placeholder)

            # Save the graph
            file_writer = tf.summary.FileWriter(custom_graph_dir,
                                                tf.get_default_graph())
            file_writer.close()

        self.assertTrue(os.path.isdir(custom_graph_dir))
Esempio n. 4
0
    def test_sequence_to_net_short(self):
        """Verify that the short NSC sequence is build correctly."""
        tf.reset_default_graph()
        custom_graph_dir = self.graphs_dir + "/test03"
        if os.path.isdir(custom_graph_dir):
            shutil.rmtree(custom_graph_dir)

        with tf.variable_scope(name_or_scope="cnn"):
            net_nsc = [
                (1, 4, 0, 0, 0),  # Layer 1: Identity(input)
                (2, 1, 3, 1, 0),  # Layer 2: Convolution(Layer1)
                (3, 1, 3, 2, 0),  # Layer 3: Convolution(Layer2)
                (4, 5, 0, 1, 3),  # Layer 4: Convolution(Layer1)
                (5, 7, 0, 0, 0),  # Layer 5: Convolution(Layer4)
            ]

            # Declare input
            input_placeholder = tf.placeholder(tf.float32,
                                               shape=(None, 28, 28, 1))

            # Do the parsing
            sequence_to_net(net_nsc, input_placeholder)

            # Save the graph
            file_writer = tf.summary.FileWriter(custom_graph_dir,
                                                tf.get_default_graph())
            file_writer.close()

        self.assertTrue(os.path.isdir(custom_graph_dir))
Esempio n. 5
0
    def test_compute_flops(self):
        """Verify the FLOPs computation of the network defined in setUp().

        The test uses a pre-computed FLOPs value over the network.

        This test case handles the case where more than 1 graph scopes are
        co-existing in the TensorFlow workspace, so that we verify that the
        FLOPs computation is not getting affected by those operations. This is
        needed because the computation of FLOPs depends on the correct
        filtering of the scope using regex.
        """
        tf.reset_default_graph()

        # Build a first neural network with one scope.
        with tf.variable_scope(name_or_scope="tnn"):
            # Declare input
            input_placeholder = tf.placeholder(tf.float32,
                                               shape=(None, 28, 28, 1))

            # Do the parsing
            sequence_to_net(self.net_nsc, input_placeholder)

        # Build a second network, identical to the first one but in a different
        # scope.
        with tf.variable_scope(name_or_scope="cnn"):
            # Declare input
            input_placeholder = tf.placeholder(tf.float32,
                                               shape=(None, 28, 28, 1))

            # Do the parsing
            sequence_to_net(self.net_nsc, input_placeholder)

        # Value computed beforehad
        target_flops = 75146
        # Make the assert operation.
        self.assertEqual(
            compute_network_flops(tf.get_default_graph(), "cnn",
                                  "workspace/flops_test"), target_flops)
Esempio n. 6
0
        def model_fn(features, labels, mode):
            with tf.variable_scope(self.variable_scope):
                # 1. Define the input placeholder
                if len(self.input_shape) == 2:
                    net_input = tf.reshape(tensor=features["x"],
                                           shape=[-1] +
                                           list(self.input_shape) + [1],
                                           name="L0_RESHAPE")
                else:
                    net_input = features["x"]

                # 2. Simply call the network
                self.tf_partial_network = sequence_to_net(
                    sequence=self.encoded_network, input_tensor=net_input)

                # 3. Build the Fully-Connected layers after block.
                with tf.name_scope("L_FC"):
                    # Flatten and connect to the Dense Layer
                    ll_flat = tf.layers.flatten(inputs=self.tf_partial_network,
                                                name="Flatten")
                    dense_layer = tf.layers.dense(inputs=ll_flat,
                                                  units=1024,
                                                  activation=tf.nn.relu,
                                                  name="DENSE")
                    dropout_layer = tf.layers.dropout(
                        inputs=dense_layer,
                        rate=0.4,
                        # pylint: disable=no-member
                        training=mode == tf.estimator.ModeKeys.TRAIN,
                        name="DROPOUT")

                # 4. Build the Prediction Layer based on a Softmax
                with tf.name_scope("L_PRED"):
                    # Logits layer
                    logits_layer = tf.layers.dense(inputs=dropout_layer,
                                                   units=self.n_clases,
                                                   name="PL_Logits")

                    predictions = {
                        "classes":
                        tf.argmax(input=logits_layer,
                                  axis=1,
                                  name="PL_Classes"),
                        "probabilities":
                        tf.nn.softmax(logits=logits_layer, name="PL_Softmax")
                    }

                    # If we are asked for prediction only, we return the
                    # prediction and stop adding nodes to the graph.
                    # pylint: disable=no-member
                    if mode == tf.estimator.ModeKeys.PREDICT:
                        return tf.estimator.EstimatorSpec(
                            mode=mode, predictions=predictions)

                # 4. Build the training nodes
                with tf.name_scope("L_TRAIN"):
                    # Loss
                    loss_layer = tf.losses.sparse_softmax_cross_entropy(
                        labels=labels, logits=logits_layer)

                    # Training Op
                    # pylint: disable=no-member
                    if mode == tf.estimator.ModeKeys.TRAIN:
                        # The optimizer via Gradient Descent (we can change it)
                        optimizer = tf.train.AdamOptimizer(learning_rate=0.001,
                                                           beta1=0.9,
                                                           beta2=0.999,
                                                           epsilon=10e-08,
                                                           name="OPT")
                        # We say that we want to optimize the loss layer using
                        # the optimizer.
                        train_op = optimizer.minimize(
                            loss=loss_layer,
                            global_step=tf.train.get_global_step(),
                            name="OPT_MIN")
                        # And return
                        # pylint: disable=no-member
                        return tf.estimator.EstimatorSpec(mode=mode,
                                                          loss=loss_layer,
                                                          train_op=train_op)

                # 5. Build the evaluation nodes.
                with tf.name_scope("L_EVAL"):
                    # Evaluation metric is accuracy
                    eval_metric_ops = {
                        "accuracy":
                        tf.metrics.accuracy(labels=labels,
                                            predictions=predictions["classes"],
                                            name="ACC")
                    }

                    # pylint: disable=no-member
                    return tf.estimator.EstimatorSpec(
                        mode=mode,
                        loss=loss_layer,
                        eval_metric_ops=eval_metric_ops)