Exemple #1
0
    def inference(self, input):
        reshaped_input = tf.reshape(
            input, [-1, self.img_side, self.img_side, self.num_channels])
        self.inception_model = InceptionV3(include_top=False,
                                           weights='imagenet',
                                           input_tensor=reshaped_input)

        raw_inception_features = self.inception_model.output

        pooled_inception_features = AveragePooling2D(
            (8, 8), strides=(8, 8), name='avg_pool')(raw_inception_features)
        self.inception_features = Flatten(
            name='flatten')(pooled_inception_features)

        with tf.variable_scope('softmax_linear'):
            weights = variable_with_weight_decay(
                'weights', [self.num_features],
                stddev=1.0 / math.sqrt(float(self.num_features)),
                wd=self.weight_decay)

            logits = tf.matmul(self.inception_features,
                               tf.reshape(weights, [-1, 1]))
            zeros = tf.zeros_like(logits)
            logits_with_zeros = tf.concat([zeros, logits], 1)

        self.weights = weights

        return logits_with_zeros
    def inference(self, input):
        reshaped_input = tf.reshape(
            input, [-1, self.img_side, self.img_side, self.num_channels])
        if self.use_InceptionResNet:
            self.inception_model = InceptionResNetV2(
                include_top=False,
                weights='imagenet',
                input_tensor=reshaped_input)
        else:
            self.inception_model = InceptionV3(include_top=False,
                                               weights='imagenet',
                                               input_tensor=reshaped_input)

        raw_inception_features = self.inception_model.output

        pooled_inception_features = AveragePooling2D(
            (8, 8), strides=(8, 8), name='avg_pool')(raw_inception_features)
        self.inception_features = Flatten(
            name='flatten')(pooled_inception_features)

        with tf.variable_scope('softmax_linear'):
            # if binary, the proper dimension of Log Reg's weight param
            # should be input_dim * 1.
            # Thus to find the logits, we need to append a zero column
            # to get the proper input of the softmax layer
            if self.num_classes == 2:
                weights = variable_with_weight_decay(
                    'weights', [self.num_features],
                    stddev=1.0 / math.sqrt(float(self.num_features)),
                    wd=self.weight_decay)

                logits = tf.matmul(self.inception_features,
                                   tf.reshape(weights, [-1, 1]))
                zeros = tf.reshape(tf.zeros_like(logits)[:, 0], [-1, 1])
                logits = tf.concat([zeros, logits], 1)
            # if multilabels, logits would be simply the product of
            # weight and latent features
            else:
                weights = variable_with_weight_decay(
                    'weights', [self.num_features * self.num_classes],
                    stddev=1.0 / math.sqrt(float(self.num_features)),
                    wd=self.weight_decay)

                logits = tf.matmul(self.inception_features,
                                   tf.reshape(weights, [-1, self.num_classes]))

        self.weights = weights
        return logits