Ejemplo n.º 1
0
    def inference(self, input):
        # Softmax_linear
        with tf.variable_scope('softmax_linear'):

            # We regularize the bias to keep it in line with sklearn's
            # liblinear implementation
            if self.use_bias:
                weights = variable_with_weight_decay(
                    'weights', [self.input_dim + 1],
                    stddev=5.0 / math.sqrt(float(self.input_dim)),
                    wd=self.weight_decay)
                # biases = variable(
                #     'biases',
                #     [1],
                #     tf.constant_initializer(0.0))

                logits = tf.matmul(
                    tf.concat([input, tf.ones([tf.shape(input)[0], 1])],
                              axis=1), tf.reshape(weights,
                                                  [-1, 1]))  # + biases

            else:
                weights = variable_with_weight_decay(
                    'weights', [self.input_dim],
                    stddev=5.0 / math.sqrt(float(self.input_dim)),
                    wd=self.weight_decay)

                logits = tf.matmul(input, tf.reshape(weights, [-1, 1]))

        self.weights = weights
        return logits
Ejemplo n.º 2
0
 def inference(self, input_x):
     with tf.variable_scope('embedding_layer'):
         with tf.variable_scope('mlp'):
             embedding_users_mlp = variable_with_weight_decay(
                 "embedding_users", [self.num_users * self.embedding_size],
                 stddev=1.0 / math.sqrt(float(self.embedding_size)),
                 wd=self.weight_decay)
             embedding_items_mlp = variable_with_weight_decay(
                 "embedding_items", [self.num_items * self.embedding_size],
                 stddev=1.0 / math.sqrt(float(self.embedding_size)),
                 wd=self.weight_decay)
             user_embedding_mlp = tf.nn.embedding_lookup(
                 tf.reshape(embedding_users_mlp,
                            (self.num_users, self.embedding_size)),
                 input_x[:, 0],
                 name="user_embedding")
             item_embedding_mlp = tf.nn.embedding_lookup(
                 tf.reshape(embedding_items_mlp,
                            (self.num_items, self.embedding_size)),
                 input_x[:, 1],
                 name="item_embedding")
             hidden_input_mlp = tf.concat(
                 [user_embedding_mlp, item_embedding_mlp],
                 axis=1,
                 name='mlp_embedding_concat')
         with tf.variable_scope('gmf'):
             embedding_users_gmf = variable_with_weight_decay(
                 "embedding_users", [self.num_users * self.embedding_size],
                 stddev=1.0 / math.sqrt(float(self.embedding_size)),
                 wd=self.weight_decay)
             embedding_items_gmf = variable_with_weight_decay(
                 "embedding_items", [self.num_items * self.embedding_size],
                 stddev=1.0 / math.sqrt(float(self.embedding_size)),
                 wd=self.weight_decay)
             user_embedding_gmf = tf.nn.embedding_lookup(
                 tf.reshape(embedding_users_gmf,
                            (self.num_users, self.embedding_size)),
                 input_x[:, 0],
                 name="user_embedding")
             item_embedding_gmf = tf.nn.embedding_lookup(
                 tf.reshape(embedding_items_gmf,
                            (self.num_items, self.embedding_size)),
                 input_x[:, 1],
                 name="item_embedding")
             h_gmf = user_embedding_gmf * item_embedding_gmf
     with tf.variable_scope('h1'):
         h1_o = tf.nn.relu(
             self.fnn_layer(hidden_input_mlp, self.embedding_size),
             'hidden_output')
     with tf.variable_scope('h2'):
         h2_o = tf.nn.relu(self.fnn_layer(h1_o, self.embedding_size / 2),
                           'hidden_output')
         h2_concat = tf.concat([h2_o, h_gmf], axis=1, name='hidden_concat')
     with tf.variable_scope('h3'):
         rating = tf.squeeze(self.fnn_layer(h2_concat, 1), name='rating')
     return rating
Ejemplo n.º 3
0
    def inference(self, input):
        reshaped_input = tf.reshape(
            input, [-1, self.img_side, self.img_side, self.num_channels])
        if self.use_InceptionResNet:
            self.inception_model = InceptionResNetV2(
                include_top=False,
                weights='imagenet',
                input_tensor=reshaped_input)
        else:
            self.inception_model = InceptionV3(include_top=False,
                                               weights='imagenet',
                                               input_tensor=reshaped_input)

        raw_inception_features = self.inception_model.output

        pooled_inception_features = AveragePooling2D(
            (8, 8), strides=(8, 8), name='avg_pool')(raw_inception_features)
        self.inception_features = Flatten(
            name='flatten')(pooled_inception_features)

        with tf.variable_scope('softmax_linear'):
            # if binary, the proper dimension of Log Reg's weight param
            # should be input_dim * 1.
            # Thus to find the logits, we need to append a zero column
            # to get the proper input of the softmax layer
            if self.num_classes == 2:
                weights = variable_with_weight_decay(
                    'weights', [self.num_features],
                    stddev=1.0 / math.sqrt(float(self.num_features)),
                    wd=self.weight_decay)

                logits = tf.matmul(self.inception_features,
                                   tf.reshape(weights, [-1, 1]))
                zeros = tf.reshape(tf.zeros_like(logits)[:, 0], [-1, 1])
                logits = tf.concat([zeros, logits], 1)
            # if multilabels, logits would be simply the product of
            # weight and latent features
            else:
                weights = variable_with_weight_decay(
                    'weights', [self.num_features * self.num_classes],
                    stddev=1.0 / math.sqrt(float(self.num_features)),
                    wd=self.weight_decay)

                logits = tf.matmul(self.inception_features,
                                   tf.reshape(weights, [-1, self.num_classes]))

        self.weights = weights
        return logits
Ejemplo n.º 4
0
    def inference(self, input):
        reshaped_input = tf.reshape(
            input, [-1, self.img_side, self.img_side, self.num_channels])
        self.inception_model = InceptionV3(include_top=False,
                                           weights='imagenet',
                                           input_tensor=reshaped_input)

        raw_inception_features = self.inception_model.output

        pooled_inception_features = AveragePooling2D(
            (8, 8), strides=(8, 8), name='avg_pool')(raw_inception_features)
        self.inception_features = Flatten(
            name='flatten')(pooled_inception_features)

        with tf.variable_scope('softmax_linear'):
            weights = variable_with_weight_decay(
                'weights', [self.num_features],
                stddev=1.0 / math.sqrt(float(self.num_features)),
                wd=self.weight_decay)

            logits = tf.matmul(self.inception_features,
                               tf.reshape(weights, [-1, 1]))
            zeros = tf.zeros_like(logits)
            logits_with_zeros = tf.concat([zeros, logits], 1)

        self.weights = weights

        return logits_with_zeros
Ejemplo n.º 5
0
    def inference(self, input_x):

        with tf.variable_scope('embedding_layer'):
            embedding_users = variable_with_weight_decay(
                "embedding_users", [self.num_users * self.embedding_size],
                stddev=1.0 / math.sqrt(float(self.embedding_size)),
                wd=self.weight_decay)
            embedding_items = variable_with_weight_decay(
                "embedding_items", [self.num_items * self.embedding_size],
                stddev=1.0 / math.sqrt(float(self.embedding_size)),
                wd=self.weight_decay)
            user_embedding = tf.nn.embedding_lookup(tf.reshape(
                embedding_users, (self.num_users, self.embedding_size)),
                                                    input_x[:, 0],
                                                    name="user_embedding")
            item_embedding = tf.nn.embedding_lookup(tf.reshape(
                embedding_items, (self.num_items, self.embedding_size)),
                                                    input_x[:, 1],
                                                    name="item_embedding")

            bias_users = variable("bias_users", [self.num_users],
                                  tf.constant_initializer(0.0))
            bias_items = variable("bias_items", [self.num_items],
                                  tf.constant_initializer(0.0))
            user_bias = tf.nn.embedding_lookup(tf.reshape(
                bias_users, (self.num_users, 1)),
                                               input_x[:, 0],
                                               name="user_bias")
            item_bias = tf.nn.embedding_lookup(tf.reshape(
                bias_items, (self.num_items, 1)),
                                               input_x[:, 1],
                                               name="item_bias")
            global_bias = variable("global_bias", [1],
                                   tf.constant_initializer(0.0))

        with tf.variable_scope('interaction_layer'):
            rating = tf.squeeze(tf.reduce_sum(
                user_embedding * item_embedding, axis=1, keep_dims=True) +
                                user_bias + item_bias + global_bias,
                                name="rating_squeeze")

        return rating
    def conv1d_softplus(self, input_x, conv_patch_size, input_channels,
                        output_channels, stride, wd):
        weights = variable_with_weight_decay(
            'weights', [conv_patch_size * input_channels * output_channels],
            stddev=0.05,
            wd=wd)
        biases = variable('biases', [output_channels],
                          tf.constant_initializer(0.0))
        weights_reshaped = tf.reshape(
            weights, [conv_patch_size, input_channels, output_channels])
        hidden = tf.nn.relu(conv1d(input_x, weights_reshaped, stride) + biases)

        return hidden
Ejemplo n.º 7
0
 def fnn_layer(self, hidden_input, output_dim):
     input_dim = hidden_input.get_shape()[1].value
     print("input dim for hidden layer: %s" % input_dim)
     weights = variable_with_weight_decay(
         'weights',
         [input_dim * output_dim],
         stddev=1.0 / math.sqrt(float(input_dim)),
         # wd=0
         wd=self.weight_decay)
     bias = variable('biases', [output_dim], tf.constant_initializer(0.0))
     hidden_output = tf.matmul(
         hidden_input, tf.reshape(weights,
                                  (int(input_dim), int(output_dim)))) + bias
     return hidden_output
Ejemplo n.º 8
0
    def inference(self, input):                
        with tf.variable_scope('softmax_linear'):
            weights = variable_with_weight_decay(
                'weights', 
                [self.input_dim],
                stddev=1.0 / math.sqrt(float(self.input_dim)),
                wd=self.weight_decay)            

            logits = tf.matmul(input, tf.reshape(weights, [self.input_dim, 1])) # + biases
            zeros = tf.zeros_like(logits)            
            logits_with_zeros = tf.concat([zeros, logits], 1)

        self.weights = weights

        return logits_with_zeros
Ejemplo n.º 9
0
    def inference(self, input):        
        with tf.variable_scope('softmax_linear'):
            weights = variable_with_weight_decay(
                'weights', 
                [self.input_dim * self.num_classes],
                stddev=1.0 / math.sqrt(float(self.input_dim)),
                wd=self.weight_decay)            
            logits = tf.matmul(input, tf.reshape(weights, [self.input_dim, self.num_classes]))
            # biases = variable(
            #     'biases',
            #     [self.num_classes],
            #     tf.constant_initializer(0.0))
            # logits = tf.matmul(input, tf.reshape(weights, [self.input_dim, self.num_classes])) + biases

        self.weights = weights
        # self.biases = biases

        return logits
Ejemplo n.º 10
0
    def conv2d_softplus(self, input_x, conv_patch_size, input_channels,
                        output_channels, stride):
        weights = variable_with_weight_decay(
            'weights', [
                conv_patch_size * conv_patch_size * input_channels *
                output_channels
            ],
            stddev=2.0 / math.sqrt(
                float(conv_patch_size * conv_patch_size * input_channels)),
            wd=self.weight_decay)
        biases = variable('biases', [output_channels],
                          tf.constant_initializer(0.0))
        weights_reshaped = tf.reshape(weights, [
            conv_patch_size, conv_patch_size, input_channels, output_channels
        ])
        hidden = tf.nn.tanh(conv2d(input_x, weights_reshaped, stride) + biases)

        return hidden
Ejemplo n.º 11
0
    def inference(self, input_x):

        input_reshaped = tf.reshape(
            input_x,
            [-1, self.input_side, self.input_side, self.input_channels])

        # Hidden 1
        with tf.variable_scope('h1_a'):
            h1_a = self.conv2d_softplus(input_reshaped,
                                        self.conv_patch_size,
                                        self.input_channels,
                                        self.hidden1_units,
                                        stride=1)

        with tf.variable_scope('h1_c'):
            h1_c = self.conv2d_softplus(h1_a,
                                        self.conv_patch_size,
                                        self.hidden1_units,
                                        self.hidden1_units,
                                        stride=2)

        # Hidden 2
        with tf.variable_scope('h2_a'):
            h2_a = self.conv2d_softplus(h1_c,
                                        self.conv_patch_size,
                                        self.hidden1_units,
                                        self.hidden2_units,
                                        stride=1)

        with tf.variable_scope('h2_c'):
            h2_c = self.conv2d_softplus(h2_a,
                                        self.conv_patch_size,
                                        self.hidden2_units,
                                        self.hidden2_units,
                                        stride=2)

        # Shared layers / hidden 3
        with tf.variable_scope('h3_a'):
            h3_a = self.conv2d_softplus(h2_c,
                                        self.conv_patch_size,
                                        self.hidden2_units,
                                        self.hidden3_units,
                                        stride=1)

        last_layer_units = 10
        with tf.variable_scope('h3_c'):
            h3_c = self.conv2d_softplus(h3_a,
                                        1,
                                        self.hidden3_units,
                                        last_layer_units,
                                        stride=1)

        h3_d = tf.reduce_mean(h3_c, axis=[1, 2])

        with tf.variable_scope('softmax_linear'):

            weights = variable_with_weight_decay(
                'weights', [last_layer_units * self.num_classes],
                stddev=1.0 / math.sqrt(float(last_layer_units)),
                wd=self.weight_decay)
            biases = variable('biases', [self.num_classes],
                              tf.constant_initializer(0.0))

            logits = tf.matmul(
                h3_d, tf.reshape(
                    weights, [last_layer_units, self.num_classes])) + biases

        return logits
    def inference(self, input_x):

        input_reshaped = tf.reshape(input_x,
                                    [-1, self.input_side, self.input_channels])

        # Hidden 1
        with tf.variable_scope('h1_a'):
            h1_a = self.conv1d_softplus(input_reshaped,
                                        self.conv_patch_size,
                                        self.input_channels,
                                        self.hidden1_units,
                                        stride=1,
                                        wd=0.001)

        with tf.variable_scope('h1_c'):
            h1_c = self.conv1d_softplus(h1_a,
                                        self.conv_patch_size,
                                        self.hidden1_units,
                                        self.hidden2_units,
                                        stride=1,
                                        wd=0.001)

        # Hidden 2
        with tf.variable_scope('h2_a'):
            h2_a = self.conv1d_softplus(h1_c,
                                        self.conv_patch_size,
                                        self.hidden2_units,
                                        self.hidden3_units,
                                        stride=1,
                                        wd=0.001)

        with tf.variable_scope('h2_c'):
            h2_c = self.conv1d_softplus(h2_a,
                                        self.conv_patch_size,
                                        self.hidden3_units,
                                        self.hidden3_units,
                                        stride=1,
                                        wd=0.001)
            #h2_c = tf.nn.dropout(h2_c, rate = 0.5)

        last_layer_units = 128

        # fully connected layer
        with tf.variable_scope('h3_a'):
            dim = h2_c.get_shape()[1].value

            weights = variable_with_weight_decay(
                'weights', [dim * 256 * last_layer_units],
                stddev=1.0 / math.sqrt(float(last_layer_units)),
                wd=0.001)
            biases = variable('biases', [last_layer_units],
                              tf.constant_initializer(0.0))

            h3_a = tf.nn.relu(
                tf.matmul(tf.reshape(h2_c, [-1, dim * 256]),
                          tf.reshape(weights, [dim * 256, last_layer_units])) +
                biases)

        with tf.variable_scope('softmax_linear'):

            weights = variable_with_weight_decay(
                'weights', [last_layer_units * self.num_classes],
                stddev=1.0 / math.sqrt(float(last_layer_units)),
                wd=0.0)
            biases = variable('biases', [self.num_classes],
                              tf.constant_initializer(0.0))

            logits = tf.matmul(
                h3_a, tf.reshape(
                    weights, [last_layer_units, self.num_classes])) + biases

        return logits