def inference(self, input_x):
     with tf.variable_scope('fc'):
         weights3 = variable('weights', [2048 * 50],
                             tf.contrib.layers.xavier_initializer())
         biases3 = variable('biases', [50], tf.constant_initializer(0.0))
         logits = tf.matmul(input_x, tf.reshape(weights3,
                                                [2048, 50])) + biases3
     return logits
Example #2
0
    def inference(self, input_x):
        with tf.variable_scope('fc6'):
            weights = variable(
                'weights', [self.input_dim * self.num_classes],
                tf.initializers.truncated_normal(mean=0.0, stddev=0.01))
            biases = variable('biases', [self.num_classes],
                              tf.initializers.constant(value=0.01))

            logits = tf.matmul(
                input_x, tf.reshape(
                    weights, [self.input_dim, self.num_classes])) + biases

        return logits
Example #3
0
    def inference(self, input_x):

        with tf.variable_scope('embedding_layer'):
            embedding_users = variable_with_weight_decay(
                "embedding_users", [self.num_users * self.embedding_size],
                stddev=1.0 / math.sqrt(float(self.embedding_size)),
                wd=self.weight_decay)
            embedding_items = variable_with_weight_decay(
                "embedding_items", [self.num_items * self.embedding_size],
                stddev=1.0 / math.sqrt(float(self.embedding_size)),
                wd=self.weight_decay)
            user_embedding = tf.nn.embedding_lookup(tf.reshape(
                embedding_users, (self.num_users, self.embedding_size)),
                                                    input_x[:, 0],
                                                    name="user_embedding")
            item_embedding = tf.nn.embedding_lookup(tf.reshape(
                embedding_items, (self.num_items, self.embedding_size)),
                                                    input_x[:, 1],
                                                    name="item_embedding")

            bias_users = variable("bias_users", [self.num_users],
                                  tf.constant_initializer(0.0))
            bias_items = variable("bias_items", [self.num_items],
                                  tf.constant_initializer(0.0))
            user_bias = tf.nn.embedding_lookup(tf.reshape(
                bias_users, (self.num_users, 1)),
                                               input_x[:, 0],
                                               name="user_bias")
            item_bias = tf.nn.embedding_lookup(tf.reshape(
                bias_items, (self.num_items, 1)),
                                               input_x[:, 1],
                                               name="item_bias")
            global_bias = variable("global_bias", [1],
                                   tf.constant_initializer(0.0))

        with tf.variable_scope('interaction_layer'):
            rating = tf.squeeze(tf.reduce_sum(
                user_embedding * item_embedding, axis=1, keep_dims=True) +
                                user_bias + item_bias + global_bias,
                                name="rating_squeeze")

        return rating
    def conv1d_softplus(self, input_x, conv_patch_size, input_channels,
                        output_channels, stride, wd):
        weights = variable_with_weight_decay(
            'weights', [conv_patch_size * input_channels * output_channels],
            stddev=0.05,
            wd=wd)
        biases = variable('biases', [output_channels],
                          tf.constant_initializer(0.0))
        weights_reshaped = tf.reshape(
            weights, [conv_patch_size, input_channels, output_channels])
        hidden = tf.nn.relu(conv1d(input_x, weights_reshaped, stride) + biases)

        return hidden
Example #5
0
 def fnn_layer(self, hidden_input, output_dim):
     input_dim = hidden_input.get_shape()[1].value
     print("input dim for hidden layer: %s" % input_dim)
     weights = variable_with_weight_decay(
         'weights',
         [input_dim * output_dim],
         stddev=1.0 / math.sqrt(float(input_dim)),
         # wd=0
         wd=self.weight_decay)
     bias = variable('biases', [output_dim], tf.constant_initializer(0.0))
     hidden_output = tf.matmul(
         hidden_input, tf.reshape(weights,
                                  (int(input_dim), int(output_dim)))) + bias
     return hidden_output
Example #6
0
    def conv2d_softplus(self, input_x, conv_patch_size, input_channels,
                        output_channels, stride):
        weights = variable_with_weight_decay(
            'weights', [
                conv_patch_size * conv_patch_size * input_channels *
                output_channels
            ],
            stddev=2.0 / math.sqrt(
                float(conv_patch_size * conv_patch_size * input_channels)),
            wd=self.weight_decay)
        biases = variable('biases', [output_channels],
                          tf.constant_initializer(0.0))
        weights_reshaped = tf.reshape(weights, [
            conv_patch_size, conv_patch_size, input_channels, output_channels
        ])
        hidden = tf.nn.tanh(conv2d(input_x, weights_reshaped, stride) + biases)

        return hidden
Example #7
0
    def inference(self, input_x):        
        # first fc layer
        with tf.variable_scope('fc1'):
            weights1 = variable(
                'weights', 
                [self.input_dim * self.num_hidden_units],
                tf.contrib.layers.xavier_initializer())            
            biases1 = variable(
                'biases',
                [self.num_hidden_units],
                tf.constant_initializer(0.0))

            hidden = tf.matmul(input_x, tf.reshape(weights1, [self.input_dim, self.num_hidden_units])) + biases1
            hidden = tf.nn.relu(hidden)
        # second fc layer 
        with tf.variable_scope('fc2'):
            weights2 = variable(
                'weights', 
                [self.num_hidden_units * self.num_hidden_units],
                tf.contrib.layers.xavier_initializer())            
            biases2 = variable(
                'biases',
                [self.num_hidden_units],
                tf.constant_initializer(0.0))

            fc2_out = tf.matmul(hidden, tf.reshape(weights2, [self.num_hidden_units, self.num_hidden_units])) + biases2
            self.fc2_out = tf.nn.relu(fc2_out)

        # third fc layer
        with tf.variable_scope('fc3'):
            self.weights3 = variable(
                'weights', 
                [self.num_hidden_units * self.num_classes],
                tf.contrib.layers.xavier_initializer())            
            self.biases3 = variable(
                'biases',
                [self.num_classes],
                tf.constant_initializer(0.0))

            logits = tf.matmul(self.fc2_out, tf.reshape(self.weights3, [self.num_hidden_units, self.num_classes])) + self.biases3

        return logits
Example #8
0
    def inference(self, input_x):
        if self.idx == 31:
            # first fc layer
            with tf.variable_scope('fc1'):
                weights1 = variable(
                    'weights', 
                    [self.input_dim * 4096],
                    tf.contrib.layers.xavier_initializer())            
                biases1 = variable(
                    'biases',
                    [4096],
                    tf.constant_initializer(0.0))

                hidden = tf.matmul(input_x, tf.reshape(weights1, [self.input_dim, 4096])) + biases1
                hidden = tf.nn.relu(hidden)

            # dropout
            with tf.variable_scope('do1'):
                hidden = tf.layers.dropout(hidden, rate=0.5)

            # second fc layer 
            with tf.variable_scope('fc2'):
                weights2 = variable(
                    'weights', 
                    [4096 * 4096],
                    tf.contrib.layers.xavier_initializer())            
                biases2 = variable(
                    'biases',
                    [4096],
                    tf.constant_initializer(0.0))

                hidden = tf.matmul(hidden, tf.reshape(weights2, [4096, 4096])) + biases2
                hidden = tf.nn.relu(hidden)

            # dropout
            with tf.variable_scope('do2'):
                hidden = tf.layers.dropout(hidden, rate=0.5)

            # last fc layer
            with tf.variable_scope('fc3'):
                weights3 = variable(
                    'weights', 
                    [4096 * 10],
                    tf.contrib.layers.xavier_initializer())            
                biases3 = variable(
                    'biases',
                    [10],
                    tf.constant_initializer(0.0))

                logits = tf.matmul(hidden, tf.reshape(weights3, [4096, 10])) + biases3

        elif self.idx == 32:
            # first fc layer 
            with tf.variable_scope('fc1'):
                weights2 = variable(
                    'weights', 
                    [4096 * 4096],
                    tf.contrib.layers.xavier_initializer())            
                biases2 = variable(
                    'biases',
                    [4096],
                    tf.constant_initializer(0.0))

                hidden = tf.matmul(input_x, tf.reshape(weights2, [4096, 4096])) + biases2
                hidden = tf.nn.relu(hidden)

            # dropout
            with tf.variable_scope('do1'):
                hidden = tf.layers.dropout(hidden, rate=0.5)

            # last fc layer
            with tf.variable_scope('fc2'):
                weights3 = variable(
                    'weights', 
                    [4096 * 10],
                    tf.contrib.layers.xavier_initializer())            
                biases3 = variable(
                    'biases',
                    [10],
                    tf.constant_initializer(0.0))

                logits = tf.matmul(hidden, tf.reshape(weights3, [4096, 10])) + biases3

        elif self.idx == 34:
            # otherwise just the last layer
            with tf.variable_scope('fc'):
                weights3 = variable(
                    'weights', 
                    [4096 * 10],
                    tf.contrib.layers.xavier_initializer())            
                biases3 = variable(
                    'biases',
                    [10],
                    tf.constant_initializer(0.0))

                logits = tf.matmul(input_x, tf.reshape(weights3, [4096, 10])) + biases3

        return logits
Example #9
0
    def inference(self, input_x):

        input_reshaped = tf.reshape(
            input_x,
            [-1, self.input_side, self.input_side, self.input_channels])

        # Hidden 1
        with tf.variable_scope('h1_a'):
            h1_a = self.conv2d_softplus(input_reshaped,
                                        self.conv_patch_size,
                                        self.input_channels,
                                        self.hidden1_units,
                                        stride=1)

        with tf.variable_scope('h1_c'):
            h1_c = self.conv2d_softplus(h1_a,
                                        self.conv_patch_size,
                                        self.hidden1_units,
                                        self.hidden1_units,
                                        stride=2)

        # Hidden 2
        with tf.variable_scope('h2_a'):
            h2_a = self.conv2d_softplus(h1_c,
                                        self.conv_patch_size,
                                        self.hidden1_units,
                                        self.hidden2_units,
                                        stride=1)

        with tf.variable_scope('h2_c'):
            h2_c = self.conv2d_softplus(h2_a,
                                        self.conv_patch_size,
                                        self.hidden2_units,
                                        self.hidden2_units,
                                        stride=2)

        # Shared layers / hidden 3
        with tf.variable_scope('h3_a'):
            h3_a = self.conv2d_softplus(h2_c,
                                        self.conv_patch_size,
                                        self.hidden2_units,
                                        self.hidden3_units,
                                        stride=1)

        last_layer_units = 10
        with tf.variable_scope('h3_c'):
            h3_c = self.conv2d_softplus(h3_a,
                                        1,
                                        self.hidden3_units,
                                        last_layer_units,
                                        stride=1)

        h3_d = tf.reduce_mean(h3_c, axis=[1, 2])

        with tf.variable_scope('softmax_linear'):

            weights = variable_with_weight_decay(
                'weights', [last_layer_units * self.num_classes],
                stddev=1.0 / math.sqrt(float(last_layer_units)),
                wd=self.weight_decay)
            biases = variable('biases', [self.num_classes],
                              tf.constant_initializer(0.0))

            logits = tf.matmul(
                h3_d, tf.reshape(
                    weights, [last_layer_units, self.num_classes])) + biases

        return logits
    def inference(self, input_x):

        input_reshaped = tf.reshape(input_x,
                                    [-1, self.input_side, self.input_channels])

        # Hidden 1
        with tf.variable_scope('h1_a'):
            h1_a = self.conv1d_softplus(input_reshaped,
                                        self.conv_patch_size,
                                        self.input_channels,
                                        self.hidden1_units,
                                        stride=1,
                                        wd=0.001)

        with tf.variable_scope('h1_c'):
            h1_c = self.conv1d_softplus(h1_a,
                                        self.conv_patch_size,
                                        self.hidden1_units,
                                        self.hidden2_units,
                                        stride=1,
                                        wd=0.001)

        # Hidden 2
        with tf.variable_scope('h2_a'):
            h2_a = self.conv1d_softplus(h1_c,
                                        self.conv_patch_size,
                                        self.hidden2_units,
                                        self.hidden3_units,
                                        stride=1,
                                        wd=0.001)

        with tf.variable_scope('h2_c'):
            h2_c = self.conv1d_softplus(h2_a,
                                        self.conv_patch_size,
                                        self.hidden3_units,
                                        self.hidden3_units,
                                        stride=1,
                                        wd=0.001)
            #h2_c = tf.nn.dropout(h2_c, rate = 0.5)

        last_layer_units = 128

        # fully connected layer
        with tf.variable_scope('h3_a'):
            dim = h2_c.get_shape()[1].value

            weights = variable_with_weight_decay(
                'weights', [dim * 256 * last_layer_units],
                stddev=1.0 / math.sqrt(float(last_layer_units)),
                wd=0.001)
            biases = variable('biases', [last_layer_units],
                              tf.constant_initializer(0.0))

            h3_a = tf.nn.relu(
                tf.matmul(tf.reshape(h2_c, [-1, dim * 256]),
                          tf.reshape(weights, [dim * 256, last_layer_units])) +
                biases)

        with tf.variable_scope('softmax_linear'):

            weights = variable_with_weight_decay(
                'weights', [last_layer_units * self.num_classes],
                stddev=1.0 / math.sqrt(float(last_layer_units)),
                wd=0.0)
            biases = variable('biases', [self.num_classes],
                              tf.constant_initializer(0.0))

            logits = tf.matmul(
                h3_a, tf.reshape(
                    weights, [last_layer_units, self.num_classes])) + biases

        return logits