def worker(fraction_used):
   num_samples_used = max(1, int(fraction_used * num_samples))
   train_embs = datasets['train_dataset']['embs'][:num_samples_used]
   train_labels = get_targets_from_labels(
       datasets['train_dataset']['labels'][:num_samples_used], num_classes)
   return fit_model(train_embs, train_labels, val_embs, val_labels,
                    global_step, num_classes, '%s_%s' % (datasets['name'],
                                                         str(fraction_used)))
Beispiel #2
0
    def evaluate_embeddings(self, algo, global_step, datasets):
        """Labeled evaluation."""
        fractions = CONFIG.EVAL.CLASSIFICATION_FRACTIONS

        train_embs = datasets['train_dataset']['embs']
        val_embs = datasets['val_dataset']['embs']
        num_classes = DATASET_TO_NUM_CLASSES[datasets['name']]

        if not train_embs or not val_embs:
            logging.warn(
                'All embeddings are NAN. Something is wrong with model.')
            return 1.0

        val_labels = get_targets_from_labels(datasets['val_dataset']['labels'],
                                             num_classes)

        num_samples = len(datasets['train_dataset']['embs'])

        def worker(fraction_used):
            num_samples_used = max(1, int(fraction_used * num_samples))
            train_embs = datasets['train_dataset']['embs'][:num_samples_used]
            train_labels = get_targets_from_labels(
                datasets['train_dataset']['labels'][:num_samples_used],
                num_classes)
            return fit_model(train_embs, train_labels, val_embs, val_labels,
                             global_step, num_classes,
                             '%s_%s' % (datasets['name'], str(fraction_used)))

        val_scores = []
        with cf.ThreadPoolExecutor(max_workers=len(fractions)) as executor:
            results = executor.map(worker, fractions)
            for (fraction, (train_score,
                            val_score)) in zip(fractions, results):
                prefix = '%s_%s' % (datasets['name'], str(fraction))
                logging.info(
                    '[Global step: {}] Event Completion {} Fraction Train '
                    'Score: {:.3f},'.format(global_step.numpy(), prefix,
                                            train_score))
                logging.info(
                    '[Global step: {}] Event Completion {} Fraction Val '
                    'Score: {:.3f},'.format(global_step.numpy(), prefix,
                                            val_score))
                tf.summary.scalar('event_completion/train_%s_score' % prefix,
                                  train_score,
                                  step=global_step)
                tf.summary.scalar('event_completion/val_%s_score' % prefix,
                                  val_score,
                                  step=global_step)
                val_scores.append(val_score)

        return val_scores[-1]