コード例 #1
0
    def _inference(self):
        """
        Builds the graph as far as is required for running the model forward to make predictions
        :return: input placeholder, output, label placeholder
        """
        x = tf.placeholder(tf.float32,
                           shape=[None, 784],
                           name=self.input_placeholder_name)
        y_ = tf.placeholder(tf.float32,
                            shape=[None, 10],
                            name=self.label_placeholder_name)

        # first layer variables
        W_fc1 = cnnu.weight_variable([784, neurons1])
        b_fc1 = cnnu.bias_variable([neurons1])

        # second layer variables
        W_fc2 = cnnu.weight_variable([neurons1, neurons2])
        b_fc2 = cnnu.bias_variable([neurons2])

        # second layer variables
        W_fc3 = cnnu.weight_variable([neurons2, neurons3])
        b_fc3 = cnnu.bias_variable([neurons3])

        # put the model in the nodes

        layer1 = tf.nn.relu(tf.matmul(x, W_fc1) + b_fc1)
        layer2 = tf.nn.relu(tf.matmul(layer1, W_fc2) + b_fc2)
        y = tf.add(tf.matmul(layer2, W_fc3), b_fc3, name=self.output_node_name)

        return x, y_, y
コード例 #2
0
    def _inference(self):
        """
        Builds the graph as far as is required for running the model forward to make predictions
        :return: input placeholder, output, label placeholder
        """
        x = tf.placeholder(tf.float32,
                           shape=[None, 784],
                           name=self.input_placeholder_name)
        y_ = tf.placeholder(tf.float32,
                            shape=[None, 10],
                            name=self.label_placeholder_name)

        # create the layers
        x_image = tf.reshape(
            x, [-1, 28, 28, 1])  # give to the input the shape of the images

        # first convolution pooling layer
        W_conv1 = cnnu.weight_variable([5, 5, 1, 32])
        b_conv1 = cnnu.bias_variable([32])

        h_conv1 = tf.nn.relu(cnnu.conv2d(x_image, W_conv1) + b_conv1)
        h_pool1 = cnnu.max_pool_2x2(h_conv1)

        # second convolution pooling layer
        W_conv2 = cnnu.weight_variable([5, 5, 32, 64])
        b_conv2 = cnnu.bias_variable([64])
        h_conv2 = tf.nn.relu(cnnu.conv2d(h_pool1, W_conv2) + b_conv2)
        h_pool2 = cnnu.max_pool_2x2(h_conv2)

        # densely connected layer
        W_fc1 = cnnu.weight_variable([7 * 7 * 64, 1024])
        b_fc1 = cnnu.bias_variable([1024])

        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

        # readout layer
        W_fc2 = cnnu.weight_variable([1024, 10])
        b_fc2 = cnnu.bias_variable([10])

        y_conv = tf.add(tf.matmul(h_fc1, W_fc2),
                        b_fc2,
                        name=self.output_node_name)

        return x, y_conv, y_
コード例 #3
0
    def _inference(self, training=True):
        """Build the CIFAR-10 model.
        Returns:
          Logits.
        """
        # We instantiate all variables using tf.get_variable() so they can be shared between train and
        # inference graph. using get_variable it does not create another variable is there's already a variable
        # with the same name.

        if not training:
            x = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name=self.input_placeholder_name)
            y_ = tf.placeholder(tf.float32, shape=[None, 10], name=self.label_placeholder_name)
        else:
            x = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name=self.input_placeholder_name+'_train')
            y_ = tf.placeholder(tf.float32, shape=[None, 10], name=self.label_placeholder_name)

        # reuse variable only for inference
        with tf.variable_scope('network', reuse=not training):
            img = x
            # do preprocessing if needed
            img = self.pre_process(images=img, training=training)

            # conv1
            kernel1 = cnnu.weight_variable([5, 5, 3, 64], name='kernel1')
            conv1 = tf.nn.conv2d(img, kernel1, [1, 1, 1, 1], padding='SAME')
            biases1 = cnnu.bias_variable([64], name='bias1')
            pre_activation = tf.nn.bias_add(conv1, biases1)
            relu1 = tf.nn.relu(pre_activation)

            # pool1
            pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                                   padding='SAME', name='pool1')
            # norm1
            norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
                             name='norm1')

            # conv2
            kernel2 = cnnu.weight_variable([5, 5, 64, 96], name='kernel2')
            conv2 = tf.nn.conv2d(norm1, kernel2, [1, 1, 1, 1], padding='SAME')
            biases2 = cnnu.bias_variable([96], name='bias2')
            pre_activation2 = tf.nn.bias_add(conv2, biases2)
            relu2 = tf.nn.relu(pre_activation2)

            # norm2
            norm2 = tf.nn.lrn(relu2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
                              name='norm2')
            # pool2
            pool2 = tf.nn.max_pool(norm2, ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1], padding='SAME', name='pool2')

            # local3
            # Move everything into depth so we can perform a single matrix multiply.
            reshape = tf.reshape(pool2, [-1, 6 * 6 * 96])
            weights_1 = cnnu.weight_variable([6 * 6 * 96, 256], name='local_weights_3')
            biases_1 = cnnu.bias_variable([256], name='local_bias_3')
            local3 = tf.nn.relu(tf.matmul(reshape, weights_1) + biases_1, name='local3')

            # local4
            weights_2 = cnnu.weight_variable([256, 192], name='local_weights_4')
            biases_2 = cnnu.bias_variable([192], name='local_bias4')
            local4 = tf.nn.relu(tf.matmul(local3, weights_2) + biases_2, name='local4')

            # linear layer(WX + b),
            # We don't apply softmax here because
            # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
            # and performs the softmax internally for efficiency.
            weights_final = cnnu.weight_variable([192, NUM_CLASSES], name='final_fc_weights')
            biases_final = cnnu.bias_variable([NUM_CLASSES], name='final_fc_bias')
            # get only the last part of the output node since it contains also the scope
            softmax_linear = tf.add(tf.matmul(local4, weights_final), biases_final,
                                        name=self.output_node_name.split('/')[1])

            return x, softmax_linear, y_
    def _inference(self):
        """Build the CIFAR-10 model.
        Args:
          images: Images returned from distorted_inputs() or inputs().
        Returns:
          Logits.
        """
        # We instantiate all variables using tf.get_variable() instead of
        # tf.Variable() in order to share variables across multiple GPU training runs.
        # If we only ran this model on a single GPU, we could simplify this function
        # by replacing all instances of tf.get_variable() with tf.Variable().
        #

        x = tf.placeholder(tf.float32,
                           shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3],
                           name=self.input_placeholder_name)
        y_ = tf.placeholder(tf.float32,
                            shape=[None, 10],
                            name=self.label_placeholder_name)
        # conv1
        kernel1 = cnnu.weight_variable([5, 5, 3, 64])
        conv1 = tf.nn.conv2d(x, kernel1, [1, 1, 1, 1], padding='SAME')
        biases1 = cnnu.bias_variable([64])
        pre_activation = tf.nn.bias_add(conv1, biases1)
        relu1 = tf.nn.relu(pre_activation)

        # pool1
        pool1 = tf.nn.max_pool(relu1,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool1')
        # norm1
        norm1 = tf.nn.lrn(pool1,
                          4,
                          bias=1.0,
                          alpha=0.001 / 9.0,
                          beta=0.75,
                          name='norm1')

        # conv2
        kernel2 = cnnu.weight_variable([5, 5, 64, 64])
        conv2 = tf.nn.conv2d(norm1, kernel2, [1, 1, 1, 1], padding='SAME')
        biases2 = cnnu.bias_variable([64])
        pre_activation2 = tf.nn.bias_add(conv2, biases2)
        relu2 = tf.nn.relu(pre_activation2)

        # norm2
        norm2 = tf.nn.lrn(relu2,
                          4,
                          bias=1.0,
                          alpha=0.001 / 9.0,
                          beta=0.75,
                          name='norm2')
        # pool2
        pool2 = tf.nn.max_pool(norm2,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool2')

        # local3
        # Move everything into depth so we can perform a single matrix multiply.
        reshape = tf.reshape(pool2, [-1, 8 * 8 * 64])
        weights_1 = cnnu.weight_variable([8 * 8 * 64, 384])

        biases_1 = cnnu.bias_variable([384])
        local3 = tf.nn.relu(tf.matmul(reshape, weights_1) + biases_1,
                            name='local3')

        # local4
        weights_2 = cnnu.weight_variable([384, 192])
        biases_2 = cnnu.bias_variable([192])
        local4 = tf.nn.relu(tf.matmul(local3, weights_2) + biases_2,
                            name='local4')

        # linear layer(WX + b),
        # We don't apply softmax here because
        # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
        # and performs the softmax internally for efficiency.
        weights_final = cnnu.weight_variable([192, NUM_CLASSES])
        biases_final = cnnu.bias_variable([NUM_CLASSES])
        softmax_linear = tf.add(tf.matmul(local4, weights_final),
                                biases_final,
                                name=self.output_node_name)

        return x, softmax_linear, y_