예제 #1
0
    def make_fcnet(self):
        n_ffnet_inputs = self.n_ffnet_input
        n_ffnet_outputs = self.n_ffnet_output

        y_image = self.fcnet_input

        print("FCNET: Inputs: ", n_ffnet_inputs, " outputs: ", n_ffnet_outputs)

        W_fc1 = my_ops.weight_variable([n_ffnet_inputs, 40], 0.1)
        b_fc1 = my_ops.bias_variable([40])

        W_fc2 = my_ops.weight_variable([40, 10], 0.1)
        b_fc2 = my_ops.bias_variable([10])

        W_fc3 = my_ops.weight_variable([10, 1], 0.1)
        b_fc3 = my_ops.bias_variable([1])

        h1 = tf.tanh(tf.matmul(y_image, W_fc1) + b_fc1)
        h2 = tf.tanh(tf.matmul(h1, W_fc2) + b_fc2)
        self.y_fc = tf.tanh(tf.matmul(h2, W_fc3) + b_fc3)

        self.fcloss = tf.squared_difference(self.y_fc, self.fcnet_target)
        self.fcnet_train_step = tf.train.AdamOptimizer(
            self.learning_rate).minimize(self.fcloss)
        self.fcaccuracy = tf.reduce_mean(self.fcloss)
예제 #2
0
    def make_ffnet(self):


        n_ffnet_inputs = self.n_ffnet_input
        n_ffnet_outputs = self.n_ffnet_output
        print ("FFNET: in: ", n_ffnet_inputs, " hid: ", self.n_ffnet_hidden, " out: ", n_ffnet_outputs)


        W_layer1 = my_ops.weight_variable([n_ffnet_inputs, self.n_ffnet_hidden[0]])
        b_layer1 = my_ops.bias_variable([self.n_ffnet_hidden[0]])

        W_layer2 = my_ops.weight_variable([self.n_ffnet_hidden[0], self.n_ffnet_hidden[1]])
        b_layer2 = my_ops.bias_variable([self.n_ffnet_hidden[1]])

        W_layer3 = my_ops.weight_variable([self.n_ffnet_hidden[1], n_ffnet_outputs])
        b_layer3 = my_ops.bias_variable([n_ffnet_outputs])

        h_1 = tf.nn.relu(tf.matmul(self.ffnet_input, W_layer1) + b_layer1)
        h_2 = tf.nn.relu(tf.matmul(h_1, W_layer2) + b_layer2)

        # dropout
        #print("output shape: ", self.ffnet_output.get_shape(), "target shape: ", self.ffnet_target.get_shape())
        #print("W3: ", W_layer3.get_shape(), " bias3: ", b_layer3.get_shape())

        self.ffnet_output = tf.matmul(h_2, W_layer3) + b_layer3
        #print("output shape: ", self.ffnet_output.get_shape(), "target shape: ", self.ffnet_target.get_shape())
        #print("W3: ", W_layer3.get_shape(), " bias3: ", b_layer3.get_shape())

        self.loss = tf.squared_difference(self.ffnet_output, self.ffnet_target)

        self.ffnet_train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)

        self.accuracy = tf.reduce_mean(self.loss)
    def __init__(self, name, config, is_train):
        self.name = name
        self.is_train = is_train
        self.reuse = None

        with tf.variable_scope(self.name, reuse=self.reuse):
            G_W1 = utils.weight_variable([3, 3, 1, 32], name="G_W1")
            G_b1 = utils.bias_variable([32], name="G_b1")

            G_W2 = utils.weight_variable([3, 3, 32, 64], name="G_W2")
            G_b2 = utils.bias_variable([64], name="G_b2")

            G_W3 = utils.weight_variable([3, 3, 64, 64], name="G_W3")
            G_b3 = utils.bias_variable([64], name="G_b3")

            G_W4 = utils.weight_variable([3, 3, 64, 128], name="G_W4")
            G_b4 = utils.bias_variable([128], name="G_b4")

            G_W5 = utils.weight_variable([3, 3, 128, 128], name="G_W5")
            G_b5 = utils.bias_variable([128], name="G_b5")

            G_W6 = utils.weight_variable([3, 3, 128, 128], name="G_W6")
            G_b6 = utils.bias_variable([128], name="G_b6")

            G_W7 = utils.weight_variable([3, 3, 128, 64], name="G_W7")
            G_b7 = utils.bias_variable([64], name="G_b7")

            G_W8 = utils.weight_variable([1, 1, 64, 32], name="G_W8")
            G_b8 = utils.bias_variable([32], name="G_b8")

            G_W9 = utils.weight_variable([3, 3, 32, config.ClusterNo],
                                         name="G_W9")
            G_b9 = utils.bias_variable([config.ClusterNo], name="G_b9")

            self.Param = {
                'G_W1': G_W1,
                'G_b1': G_b1,
                'G_W2': G_W2,
                'G_b2': G_b2,
                'G_W3': G_W3,
                'G_b3': G_b3,
                'G_W4': G_W4,
                'G_b4': G_b4,
                'G_W5': G_W5,
                'G_b5': G_b5,
                'G_W6': G_W6,
                'G_b6': G_b6,
                'G_W7': G_W7,
                'G_b7': G_b7,
                'G_W8': G_W8,
                'G_b8': G_b8,
                'G_W9': G_W9,
                'G_b9': G_b9
            }

        if self.reuse is None:
            self.var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                              scope=self.name)
            self.saver = tf.train.Saver(self.var_list)
            self.reuse = True
 def __init__(self, name, config, is_train):
   self.name = name
   self.is_train = is_train
   self.reuse = None
   
   with tf.variable_scope(self.name, reuse=self.reuse):
       G_W1 = utils.weight_variable([3, 3, 1, 32], name="G_W1")
       G_b1 = utils.bias_variable([32], name="G_b1")
       
       G_W2 = utils.weight_variable([3, 3, 32, 64], name="G_W2")
       G_b2 = utils.bias_variable([64], name="G_b2")
       
       G_W3 = utils.weight_variable([3, 3, 64, 64], name="G_W3")
       G_b3 = utils.bias_variable([64], name="G_b3")
       
       G_W4 = utils.weight_variable([3, 3, 64, 128], name="G_W4")
       G_b4 = utils.bias_variable([128], name="G_b4")
       
       G_W5 = utils.weight_variable([3, 3, 128, 128], name="G_W5")
       G_b5 = utils.bias_variable([128], name="G_b5")
       
       G_W6 = utils.weight_variable([3, 3, 128, 128], name="G_W6") 
       G_b6 = utils.bias_variable([128], name="G_b6")
       
       G_W7 = utils.weight_variable([3, 3, 128, 64], name="G_W7") 
       G_b7 = utils.bias_variable([64], name="G_b7")
       
       G_W8 = utils.weight_variable([1, 1, 64, 32], name="G_W8")
       G_b8 = utils.bias_variable([32], name="G_b8")
       
       G_W9 = utils.weight_variable([3, 3, 32, config.ClusterNo], name="G_W9")
       G_b9 = utils.bias_variable([config.ClusterNo], name="G_b9")
       
       self.Param = {'G_W1':G_W1, 'G_b1':G_b1, 
                'G_W2':G_W2, 'G_b2':G_b2,  
                'G_W3':G_W3, 'G_b3':G_b3, 
                'G_W4':G_W4, 'G_b4':G_b4, 
                'G_W5':G_W5, 'G_b5':G_b5, 
                'G_W6':G_W6, 'G_b6':G_b6, 
                'G_W7':G_W7, 'G_b7':G_b7, 
                'G_W8':G_W8, 'G_b8':G_b8, 
                'G_W9':G_W9, 'G_b9':G_b9 }
     
   if self.reuse is None:
         self.var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
         self.saver = tf.train.Saver(self.var_list)
         self.reuse = True         
예제 #5
0
    def make_convnet(self):
        n_ffnet_inputs = self.n_ffnet_input
        n_ffnet_outputs = self.n_ffnet_output

        print("COVNET: Inputs: ", n_ffnet_inputs, " outputs: ", n_ffnet_outputs)

        with tf.name_scope('reshape'):
            x_image = tf.reshape(self.covnet_input, [-1, self.resolution[0], self.resolution[1], 1])

        with tf.name_scope('conv1'):
            W_conv1 = my_ops.weight_variable([5, 5, 1, 32])
            b_conv1 = my_ops.bias_variable([32])
            h_conv1 = tf.nn.relu(my_ops.conv2d(x_image, W_conv1) + b_conv1)

        with tf.name_scope('pool1'):
            h_pool1 = my_ops.max_pool_2x2(h_conv1)

        with tf.name_scope('conv2'):
            W_conv2 = my_ops.weight_variable([5, 5, 32, 64])
            b_conv2 = my_ops.bias_variable([64])
            h_conv2 = tf.nn.relu(my_ops.conv2d(h_pool1, W_conv2) + b_conv2)

        with tf.name_scope('pool2'):
            h_pool2 = my_ops.max_pool_2x2(h_conv2)

        with tf.name_scope('fc1'):
            W_fc1 = my_ops.weight_variable([int(self.resolution[0]/4) * int(self.resolution[1]/4) * 64, 64])
            b_fc1 = my_ops.bias_variable([64])

            h_pool2_flat = tf.reshape(h_pool2, [-1, int(self.resolution[0]/4) * int(self.resolution[1]/4) * 64])
            h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

        # single output:
        with tf.name_scope('fc2'):
            W_fc2 = my_ops.weight_variable([64, 1])
            b_fc2 = my_ops.bias_variable([1])

        self.y_conv = tf.tanh(tf.matmul(h_fc1, W_fc2) + b_fc2)
        self.covloss = tf.squared_difference(self.y_conv, self.covnet_target)
        self.covnet_train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.covloss)
        self.covaccuracy = tf.reduce_mean(self.covloss)
예제 #6
0
# Network Parameters
num_input = 28  # MNIST data input (img shape: 28*28)
timesteps = 28  # Timesteps
num_hidden_units = 128  # Number of hidden units of the RNN
n_classes = 10  # Number of classes, one class per digit

# Create the graph for the linear model
# Placeholders for inputs (x) and outputs(y)
x = tf.placeholder(tf.float32, shape=[None, timesteps, num_input], name='X')
y = tf.placeholder(tf.float32, shape=[None, n_classes], name='Y')

# create weight matrix initialized randomely from N~(0, 0.01)
W = weight_variable(shape=[num_hidden_units, n_classes])

# create bias vector initialized as zero
b = bias_variable(shape=[n_classes])

output_logits = RNN(x, W, b, timesteps, num_hidden_units)
y_pred = tf.nn.softmax(output_logits)

# Model predictions
cls_prediction = tf.argmax(output_logits, axis=1, name='predictions')

# Define the loss function, optimizer, and accuracy
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
    labels=y, logits=output_logits),
                      name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                   name='Adam-op').minimize(loss)
correct_prediction = tf.equal(tf.argmax(output_logits, 1),
                              tf.argmax(y, 1),
예제 #7
0
    def make_convnet(self):
        #         # self.resolution is [width, height]
        #         x_image = tf.reshape(self.covnet_input, [-1, self.resolution[0], self.resolution[1], 1])
        #
        #         n_features_maps = 20
        #         filter_width = int(2)
        #         filter_height = int(2)
        #
        #         W_conv1 = my_ops.weight_variable([filter_width, filter_height, 1, n_features_maps], 0.001)
        #         b_conv1 = my_ops.bias_variable([n_features_maps])
        #         h_conv1 = tf.nn.tanh(my_ops.conv2d(x_image, W_conv1) + b_conv1)
        #
        #         h_pool1 = my_ops.max_pool_2x2(h_conv1)
        # #        h_pool1 = h_conv1
        #
        #         W_fc1 = my_ops.weight_variable([int(self.resolution[0]/2) * int(self.resolution[1]/2) * n_features_maps, 20], 0.001)
        #         b_fc1 = my_ops.bias_variable([20])
        #
        #         h_pool1_flat = tf.reshape(h_pool1, [-1, int(self.resolution[0]/2) * int(self.resolution[1]/2) * n_features_maps])
        #         h_fc1 = tf.nn.tanh(tf.matmul(h_pool1_flat, W_fc1) + b_fc1)
        #
        #         # single output:
        #         W_fc2 = my_ops.weight_variable([20, 1], 0.01)
        #         b_fc2 = my_ops.bias_variable([1])
        #
        #         self.y_conv = tf.tanh(tf.matmul(h_fc1, W_fc2) + b_fc2)
        #
        #         self.covloss = tf.squared_difference(self.y_conv, self.covnet_target)
        #         self.covnet_train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.covloss)
        #         self.covaccuracy = tf.reduce_mean(self.covloss)

        with tf.name_scope('reshape'):
            x_image = tf.reshape(
                self.covnet_input,
                [-1, self.resolution[0], self.resolution[1], 1])

        # First convolutional layer - maps one grayscale image to 32 feature maps.
        with tf.name_scope('conv1'):
            W_conv1 = my_ops.weight_variable([5, 5, 1, 8], 0.1)
            b_conv1 = my_ops.bias_variable([8])
            h_conv1 = tf.nn.relu(my_ops.conv2d(x_image, W_conv1) + b_conv1)

        # Pooling layer - downsamples by 2X.
        with tf.name_scope('pool1'):
            h_pool1 = my_ops.max_pool_2x2(h_conv1)

        # Second convolutional layer -- maps 32 feature maps to 64.
        with tf.name_scope('conv2'):
            W_conv2 = my_ops.weight_variable([5, 5, 8, 8], 0.1)
            b_conv2 = my_ops.bias_variable([8])
            h_conv2 = tf.nn.relu(my_ops.conv2d(h_pool1, W_conv2) + b_conv2)

        # Second pooling layer.
        with tf.name_scope('pool2'):
            h_pool2 = my_ops.max_pool_2x2(h_conv2)

        # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
        # is down to 7x7x64 feature maps -- maps this to 1024 features.
        with tf.name_scope('fc1'):
            W_fc1 = my_ops.weight_variable([
                int(self.resolution[0] / 4) * int(self.resolution[1] / 4) * 8,
                10
            ], 0.001)
            b_fc1 = my_ops.bias_variable([10])

            h_pool2_flat = tf.reshape(h_pool2, [
                -1,
                int(self.resolution[0] / 4) * int(self.resolution[1] / 4) * 8
            ])
            h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

        # Map the 1024 features to 10 classes, one for each digit
        with tf.name_scope('fc2'):
            W_fc2 = my_ops.weight_variable([10, 1], 0.1)
            b_fc2 = my_ops.bias_variable([1])

            self.y_conv = tf.tanh(tf.matmul(h_fc1, W_fc2) + b_fc2)
            self.covloss = tf.squared_difference(self.y_conv,
                                                 self.covnet_target)
            self.covnet_train_step = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.covloss)
            self.covaccuracy = tf.reduce_mean(self.covloss)
예제 #8
0
    def model(self, input):
        """
        Bakes the CNN architecture into a computational graph
        :param input: Tensor to contain the input image. May be a `tf.Variable` or `tf.placeholder`.
        :return: softmax outputs and output logits before softmax activation as 2-tuple
        """

        # (50, 100, 1) -> (50, 100, 64)
        with tf.variable_scope("conv0"):
            conv0_weights = weights_variable_xavier(
                [3, 3, self._input_channels, 64], name=CONV0_WEIGHTS)
            conv0_bias = bias_variable([64], value=0.1, name=CONV0_BIAS)
            conv0_z = conv2d(input, conv0_weights) + conv0_bias
            conv0_a = tf.nn.relu(conv0_z)

        # (50, 100, 64) -> (50, 100, 64)
        with tf.variable_scope("conv1"):
            conv1_weights = weights_variable_xavier([3, 3, 64, 64],
                                                    name=CONV1_WEIGHTS)
            conv1_bias = bias_variable([64], value=0.1, name=CONV1_BIAS)
            conv1_z = conv2d(conv0_a, conv1_weights) + conv1_bias
            conv1_a = tf.nn.relu(conv1_z)

        # (50, 100, 64) -> (25, 50, 64)
        # TODO does variable_scope make sense if pooling layers don't even have variables?
        with tf.variable_scope("pool0"):
            pool0 = tf.layers.max_pooling2d(conv1_a,
                                            pool_size=[2, 2],
                                            strides=2,
                                            padding="same")

        # (25, 50, 64) -> (25, 50, 128)
        with tf.variable_scope("conv2"):
            conv2_weights = weights_variable_xavier([3, 3, 64, 128],
                                                    name=CONV2_WEIGHTS)
            conv2_bias = bias_variable([128], value=0.1, name=CONV2_BIAS)
            conv2_z = conv2d(pool0, conv2_weights) + conv2_bias
            conv2_a = tf.nn.relu(conv2_z)

        # (25, 50, 128) -> (25, 50, 128)
        with tf.variable_scope("conv3"):
            conv3_weights = weights_variable_xavier([3, 3, 128, 128],
                                                    name=CONV3_WEIGHTS)
            conv3_bias = bias_variable([128], value=0.1, name=CONV3_BIAS)
            conv3_z = conv2d(conv2_a, conv3_weights) + conv3_bias
            conv3_a = tf.nn.relu(conv3_z)

        # (25, 50, 128) -> (25, 50, 128)
        with tf.variable_scope("pool1"):
            pool1 = tf.layers.max_pooling2d(conv3_a,
                                            pool_size=[2, 2],
                                            strides=1,
                                            padding="same")

        # (25, 50, 128) -> (25, 50, 256)
        with tf.variable_scope("conv4"):
            conv4_weights = weights_variable_xavier([3, 3, 128, 256],
                                                    name=CONV4_WEIGHTS)
            conv4_bias = bias_variable([256], value=0.1, name=CONV4_BIAS)
            conv4_z = conv2d(pool1, conv4_weights) + conv4_bias
            conv4_a = tf.nn.relu(conv4_z)

        # (25, 50, 256) -> (25, 50, 256)
        with tf.variable_scope("conv5"):
            conv5_weights = weights_variable_xavier([3, 3, 256, 256],
                                                    name=CONV5_WEIGHTS)
            conv5_bias = bias_variable([256], value=0.1, name=CONV5_BIAS)
            conv5_z = conv2d(conv4_a, conv5_weights) + conv5_bias
            conv5_a = tf.nn.relu(conv5_z)

        # (25, 50, 256) -> (13, 25, 256)
        with tf.variable_scope("pool2"):
            pool2 = tf.layers.max_pooling2d(conv5_a,
                                            pool_size=[2, 2],
                                            strides=2,
                                            padding="same")

        # (13, 25, 256) -> (13, 25, 512)
        with tf.variable_scope("conv6"):
            conv6_weights = weights_variable_xavier([3, 3, 256, 512],
                                                    name=CONV6_WEIGHTS)
            conv6_bias = bias_variable([512], value=0.1, name=CONV6_BIAS)
            conv6_z = conv2d(pool2, conv6_weights) + conv6_bias
            conv6_a = tf.nn.relu(conv6_z)

        # (13, 25, 512) -> (13, 25, 512)
        with tf.variable_scope("pool3"):
            pool3 = tf.layers.max_pooling2d(conv6_a,
                                            pool_size=[2, 2],
                                            strides=1,
                                            padding="same")

        # (13, 25, 512) -> (13, 25, 512)
        with tf.variable_scope("conv7"):
            conv7_weights = weights_variable_xavier([3, 3, 512, 512],
                                                    name=CONV7_WEIGHTS)
            conv7_bias = bias_variable([512], value=0.1, name=CONV7_BIAS)
            conv7_z = conv2d(pool3, conv7_weights) + conv7_bias
            conv7_a = tf.nn.relu(conv7_z)

        # (13, 25, 512) -> (7, 13, 512)
        with tf.variable_scope("pool4"):
            pool4 = tf.layers.max_pooling2d(conv7_a,
                                            pool_size=[2, 2],
                                            strides=2,
                                            padding="same")

        flatten = tf.reshape(pool4, [-1, 7 * 13 * 512])

        with tf.variable_scope("fc0"):
            fc0_weights = weights_variable_truncated_normal(
                [7 * 13 * 512, 1024], stddev=0.005, name=FC0_WEIGHTS)
            fc0_bias = bias_variable([1024], value=0.1, name=FC0_BIAS)
            fc0_z = tf.matmul(flatten, fc0_weights) + fc0_bias
            fc0_a = tf.nn.relu(fc0_z)
            dropout_0 = tf.layers.dropout(fc0_a, rate=self._drop_rate)

        with tf.variable_scope("fc1"):
            fc1_weights = weights_variable_truncated_normal([1024, 2048],
                                                            stddev=0.005,
                                                            name=FC1_WEIGHTS)
            fc1_bias = bias_variable([2048], value=0.1, name=FC1_BIAS)
            fc1_z = tf.matmul(dropout_0, fc1_weights) + fc1_bias
            fc1_a = tf.nn.relu(fc1_z)
            dropout_1 = tf.layers.dropout(fc1_a, rate=self._drop_rate)

        # Output layers
        with tf.variable_scope("char0"):
            char0_weights = weights_variable_xavier(
                [2048, self._num_distinct_chars + 1], name=FC_CHAR0_WEIGHTS)
            char0_bias = bias_variable([self._num_distinct_chars + 1],
                                       name=FC_CHAR0_BIAS)
            char0_logits = tf.matmul(dropout_1, char0_weights) + char0_bias
            char0_out = tf.nn.softmax(char0_logits)

        with tf.variable_scope("char1"):
            char1_weights = weights_variable_xavier(
                [2048, self._num_distinct_chars + 1], name=FC_CHAR1_WEIGHTS)
            char1_bias = bias_variable([self._num_distinct_chars + 1],
                                       name=FC_CHAR1_BIAS)
            char1_logits = tf.matmul(dropout_1, char1_weights) + char1_bias
            char1_out = tf.nn.softmax(char1_logits)

        with tf.variable_scope("char2"):
            char2_weights = weights_variable_xavier(
                [2048, self._num_distinct_chars + 1], name=FC_CHAR2_WEIGHTS)
            char2_bias = bias_variable([self._num_distinct_chars + 1],
                                       name=FC_CHAR2_BIAS)
            char2_logits = tf.matmul(dropout_1, char2_weights) + char2_bias
            char2_out = tf.nn.softmax(char2_logits)

        with tf.variable_scope("char3"):
            char3_weights = weights_variable_xavier(
                [2048, self._num_distinct_chars + 1], name=FC_CHAR3_WEIGHTS)
            char3_bias = bias_variable([self._num_distinct_chars + 1],
                                       name=FC_CHAR3_BIAS)
            char3_logits = tf.matmul(dropout_1, char3_weights) + char3_bias
            char3_out = tf.nn.softmax(char3_logits)

        with tf.variable_scope("char4"):
            char4_weights = weights_variable_xavier(
                [2048, self._num_distinct_chars + 1], name=FC_CHAR4_WEIGHTS)
            char4_bias = bias_variable([self._num_distinct_chars + 1],
                                       name=FC_CHAR4_BIAS)
            char4_logits = tf.matmul(dropout_1, char4_weights) + char4_bias
            char4_out = tf.nn.softmax(char4_logits)

        with tf.variable_scope("char5"):
            char5_weights = weights_variable_xavier(
                [2048, self._num_distinct_chars + 1], name=FC_CHAR5_WEIGHTS)
            char5_bias = bias_variable([self._num_distinct_chars + 1],
                                       name=FC_CHAR5_BIAS)
            char5_logits = tf.matmul(dropout_1, char5_weights) + char5_bias
            char5_out = tf.nn.softmax(char5_logits)

        with tf.variable_scope("char6"):
            char6_weights = weights_variable_xavier(
                [2048, self._num_distinct_chars + 1], name=FC_CHAR6_WEIGHTS)
            char6_bias = bias_variable([self._num_distinct_chars + 1],
                                       name=FC_CHAR6_BIAS)
            char6_logits = tf.matmul(dropout_1, char6_weights) + char6_bias
            char6_out = tf.nn.softmax(char6_logits)

        # Keep track of weight variables
        self._weight_vars[CONV0_WEIGHTS] = conv0_weights
        self._weight_vars[CONV1_WEIGHTS] = conv1_weights
        self._weight_vars[CONV2_WEIGHTS] = conv2_weights
        self._weight_vars[CONV3_WEIGHTS] = conv3_weights
        self._weight_vars[CONV4_WEIGHTS] = conv4_weights
        self._weight_vars[CONV5_WEIGHTS] = conv5_weights
        self._weight_vars[CONV6_WEIGHTS] = conv6_weights
        self._weight_vars[CONV7_WEIGHTS] = conv7_weights
        self._weight_vars[FC0_WEIGHTS] = fc0_weights
        self._weight_vars[FC1_WEIGHTS] = fc1_weights

        self._weight_vars[CONV0_BIAS] = conv0_bias
        self._weight_vars[CONV1_BIAS] = conv1_bias
        self._weight_vars[CONV2_BIAS] = conv2_bias
        self._weight_vars[CONV3_BIAS] = conv3_bias
        self._weight_vars[CONV4_BIAS] = conv4_bias
        self._weight_vars[CONV5_BIAS] = conv5_bias
        self._weight_vars[CONV6_BIAS] = conv6_bias
        self._weight_vars[CONV7_BIAS] = conv7_bias
        self._weight_vars[FC0_BIAS] = fc0_bias
        self._weight_vars[FC1_BIAS] = fc1_bias

        self._weight_vars[FC_CHAR0_WEIGHTS] = char0_weights
        self._weight_vars[FC_CHAR1_WEIGHTS] = char1_weights
        self._weight_vars[FC_CHAR2_WEIGHTS] = char2_weights
        self._weight_vars[FC_CHAR3_WEIGHTS] = char3_weights
        self._weight_vars[FC_CHAR4_WEIGHTS] = char4_weights
        self._weight_vars[FC_CHAR5_WEIGHTS] = char5_weights
        self._weight_vars[FC_CHAR6_WEIGHTS] = char6_weights

        self._weight_vars[FC_CHAR0_BIAS] = char0_bias
        self._weight_vars[FC_CHAR1_BIAS] = char1_bias
        self._weight_vars[FC_CHAR2_BIAS] = char2_bias
        self._weight_vars[FC_CHAR3_BIAS] = char3_bias
        self._weight_vars[FC_CHAR4_BIAS] = char4_bias
        self._weight_vars[FC_CHAR5_BIAS] = char5_bias
        self._weight_vars[FC_CHAR6_BIAS] = char6_bias

        # Combine output and output logits
        outputs = tf.stack([
            char0_out, char1_out, char2_out, char3_out, char4_out, char5_out,
            char6_out
        ],
                           axis=1)
        logits = tf.stack([
            char0_logits, char1_logits, char2_logits, char3_logits,
            char4_logits, char5_logits, char6_logits
        ],
                          axis=1)

        return outputs, logits