예제 #1
0
def get_features(imgs, c):
    all_images = []
    for image_input in imgs:
        tflib.init_tf()
        image = cv2.imread(str(image_input))
        image = cv2.normalize(image,
                              None,
                              alpha=0,
                              beta=1,
                              norm_type=cv2.NORM_MINMAX,
                              dtype=cv2.CV_32F)
        image = cv2.resize(image, (256, 256), interpolation=cv2.INTER_AREA)
        image = np.array(image)
        image = np.expand_dims(image.T, axis=0)
        all_images.append(image)
    concat_imgs = tf.concat(all_images, 0, name='concat')

    logits = c.get_output_for(concat_imgs,
                              None,
                              is_validation=True,
                              randomize_noise=True)
    predictions = [tf.nn.softmax(tf.concat([logits, -logits], axis=1))]
    result = tflib.run(predictions)[0].tolist()

    # return logits
    return result[0]
예제 #2
0
def get_features(imgs, predictions, x):
    # tflib.init_tf()
    all_images = []
    for image_input in imgs:
        image = cv2.imread(str(image_input))
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = image - np.mean(image, (0, 1))
        image = image / np.std(image, axis=(0, 1))
        image = cv2.resize(image, (256, 256), interpolation=cv2.INTER_AREA)
        image = np.array(image)
        # top
        image[128:, :, :] = 0
        # bottom
        # image[:128, :, :] = 0
        image = np.expand_dims(image.transpose((2, 0, 1)), axis=0)
        all_images.append(image)
    concat_imgs = np.concatenate(all_images)

    result = tflib.run(predictions, feed_dict={x: concat_imgs})[0]

    # return logits
    # print(result)
    return result[:, 1].tolist()
예제 #3
0
 def get_random_labels_np(self, minibatch_size):  # => labels
     self.configure(minibatch_size)
     if self._tf_labels_np is None:
         self._tf_labels_np = self.get_random_labels_tf(minibatch_size)
     return tflib.run(self._tf_labels_np)
예제 #4
0
 def get_minibatch_np(self, minibatch_size, lod=0):  # => images, labels
     self.configure(minibatch_size, lod)
     if self._tf_minibatch_np is None:
         self._tf_minibatch_np = self.get_minibatch_tf()
     return tflib.run(self._tf_minibatch_np)
예제 #5
0
    def _evaluate(self, Gs, num_gpus):
        minibatch_size = num_gpus * self.minibatch_per_gpu

        # Construct TensorFlow graph for each GPU.
        result_expr = []
        for gpu_idx in range(num_gpus):
            with tf.device('/gpu:%d' % gpu_idx):
                Gs_clone = Gs.clone()

                # Generate images.
                latents = tf.random_normal([self.minibatch_per_gpu] +
                                           Gs_clone.input_shape[1:])
                dlatents = Gs_clone.components.mapping.get_output_for(
                    latents, None, is_validation=True)
                images = Gs_clone.components.synthesis.get_output_for(
                    dlatents, is_validation=True, randomize_noise=True)

                # Downsample to 256x256. The attribute classifiers were built for 256x256.
                if images.shape[2] > 256:
                    factor = images.shape[2] // 256
                    images = tf.reshape(images, [
                        -1, images.shape[1], images.shape[2] // factor, factor,
                        images.shape[3] // factor, factor
                    ])
                    images = tf.reduce_mean(images, axis=[3, 5])

                # Run classifier for each attribute.
                result_dict = dict(latents=latents, dlatents=dlatents[:, -1])
                for attrib_idx in self.attrib_indices:
                    classifier = misc.load_pkl(classifier_urls[attrib_idx])
                    logits = classifier.get_output_for(images, None)
                    predictions = tf.nn.softmax(
                        tf.concat([logits, -logits], axis=1))
                    result_dict[attrib_idx] = predictions
                result_expr.append(result_dict)

        # Sampling loop.
        results = []
        for _ in range(0, self.num_samples, minibatch_size):
            results += tflib.run(result_expr)
        results = {
            key: np.concatenate([value[key] for value in results], axis=0)
            for key in results[0].keys()
        }

        # Calculate conditional entropy for each attribute.
        conditional_entropies = defaultdict(list)
        for attrib_idx in self.attrib_indices:
            # Prune the least confident samples.
            pruned_indices = list(range(self.num_samples))
            pruned_indices = sorted(
                pruned_indices, key=lambda i: -np.max(results[attrib_idx][i]))
            pruned_indices = pruned_indices[:self.num_keep]

            # Fit SVM to the remaining samples.
            svm_targets = np.argmax(results[attrib_idx][pruned_indices],
                                    axis=1)
            for space in ['latents', 'dlatents']:
                svm_inputs = results[space][pruned_indices]
                try:
                    svm = sklearn.svm.LinearSVC()
                    svm.fit(svm_inputs, svm_targets)
                    svm.score(svm_inputs, svm_targets)
                    svm_outputs = svm.predict(svm_inputs)
                except:
                    svm_outputs = svm_targets  # assume perfect prediction

                # Calculate conditional entropy.
                p = [[
                    np.mean([
                        case == (row, col)
                        for case in zip(svm_outputs, svm_targets)
                    ]) for col in (0, 1)
                ] for row in (0, 1)]
                conditional_entropies[space].append(conditional_entropy(p))