Beispiel #1
0
    def _prepare_augmented_batch(self, tensors, batch_size, image_size=None):
        if self.subset == "train":
            assert batch_size == 1, "only implemented for 1 so far"
            augmentor_strs = self.config.unicode_list("augmentors_train", [])
            augmentors = parse_augmentors(augmentor_strs, self.void_label())
            for augmentor in augmentors:
                tensors = augmentor.apply(tensors)
            if image_size is None:
                image_size = self.image_size
            tensors = assemble_input_tensors(tensors, image_size)

            # batchify
            keys = tensors.keys()
            tensors = {k: tf.expand_dims(tensors[k], axis=0) for k in keys}
        else:
            augmentor_strs = self.config.unicode_list("augmentors_val", [])
            assert "scale" not in augmentor_strs, "scale augmentation during test time not implemented yet"
            assert "translation" not in augmentor_strs, "translation augmentation during test time not implemented yet"
            augmentors = parse_augmentors(augmentor_strs, self.void_label())
            tensors = [tensors for _ in xrange(batch_size)]
            tensors = [apply_augmentors(t, augmentors) for t in tensors]
            tensors = [assemble_input_tensors(t) for t in tensors]
            # batchify
            keys = tensors[0].keys()
            tensors = {
                k: tf.stack([t[k] for t in tensors], axis=0)
                for k in keys
            }
        return tensors
Beispiel #2
0
 def _parse_augmentors_and_shuffle(self):
     if self.subset == "train":
         shuffle = True
         augmentor_strs = self.config.unicode_list("augmentors_train", [])
         augmentors = parse_augmentors(augmentor_strs, self.void_label())
         if len(augmentors) == 0:
             print >> log.v1, "warning, no data augmentors used on train"
     else:
         shuffle = False
         augmentor_strs = self.config.unicode_list("augmentors_val", [])
         augmentors = parse_augmentors(augmentor_strs, self.void_label())
     return augmentors, shuffle
Beispiel #3
0
 def jointly_augment_examples_after_resize(self, tensors_batch):
     augmentors_str = self.config.string_list("augmentors_" + self.subset,
                                              [])
     augmentors = parse_augmentors(augmentors_str, self.config)
     for aug in augmentors:
         tensors_batch = aug.batch_apply_after_resize(tensors_batch)
     return tensors_batch
Beispiel #4
0
 def augment_example_after_resize(self, tensors):
     augmentors_str = self.config.string_list("augmentors_" + self.subset,
                                              [])
     augmentors = parse_augmentors(augmentors_str, self.config)
     for aug in augmentors:
         tensors = aug.apply_after_resize(tensors)
     return tensors
Beispiel #5
0
    def _prepare_augmented_batch(self, tensors, batch_size, image_size=None):
        if self.subset == "train":
            assert batch_size == 1, "only implemented for 1 so far"
            augmentor_strs = self.config.unicode_list("augmentors_train", [])
            augmentors = parse_augmentors(augmentor_strs, self.void_label())
            for augmentor in augmentors:
                tensors = augmentor.apply(tensors)
            if image_size is None:
                image_size = self.image_size
            tensors = assemble_input_tensors(tensors, image_size)

            # batchify
            keys = list(tensors.keys())
            tensors = {k: tf.expand_dims(tensors[k], axis=0) for k in keys}

            # for testing
            # from datasets.Util.Normalization import unnormalize
            # summ0 = tf.summary.image("inputs", unnormalize(tensors["inputs"][:, :, :, :3]))
            # summ1 = tf.summary.image("labels", tensors["labels"] * 255)
            # summ2 = tf.summary.image("old_labels", tensors["inputs"][:, :, :, 3:4])
            # flow0 = tensors["inputs"][:, :, :, 4:5]
            # flow0 -= tf.reduce_min(flow0)
            # flow0 /= tf.reduce_max(flow0)
            # flow1 = tensors["inputs"][:, :, :, 5:6]
            # flow1 -= tf.reduce_min(flow1)
            # flow1 /= tf.reduce_max(flow1)
            # summ3 = tf.summary.image("flow_future_3", flow3)
            # summ4 = tf.summary.image("flow_future_4", flow4)
            # self.summaries += [summ0, summ1, summ2, summ3, summ4]
        else:
            augmentor_strs = self.config.unicode_list("augmentors_val", [])
            assert "scale" not in augmentor_strs, "scale augmentation during test time not implemented yet"
            assert "translation" not in augmentor_strs, "translation augmentation during test time not implemented yet"
            augmentors = parse_augmentors(augmentor_strs, self.void_label())
            tensors = [tensors for _ in range(batch_size)]
            tensors = [apply_augmentors(t, augmentors) for t in tensors]
            tensors = [assemble_input_tensors(t) for t in tensors]
            # batchify
            keys = list(tensors[0].keys())
            tensors = {
                k: tf.stack([t[k] for t in tensors], axis=0)
                for k in keys
            }
        return tensors
Beispiel #6
0
    def __init__(self, config, subset, coord):
        super(CUHK03Dataset, self).__init__(subset)
        assert subset in ("train", "valid"), subset
        self.ignore_classes = None
        self.config = config
        self.subset = subset
        self.coord = coord
        self.data_dir = config.unicode("data_dir", CUHK03_DEFAULT_PATH)
        self.model = config.unicode("model", "")
        self.train_folder = config.unicode("train_folder", "train/")
        self.epoch_length = config.int("epoch_length", 1000)
        self.n_classes = config.int("num_classes", None)
        self.input_size = config.int_list("input_size", DEFAULT_INPUT_SIZE)
        self.input_size = tuple(self.input_size)
        self.batching_mode = config.unicode("batching_mode", "single")
        assert self.batching_mode in ("single", "pair",
                                      "group"), self.batching_mode
        self.validation_mode = config.unicode("validation_mode", "embedding")
        assert self.validation_mode in ("embedding",
                                        "similarity"), self.validation_mode
        self.group_size = config.int("group_size", 4)
        self.pair_ratio = config.float("pair_ratio", 1.0)
        augmentor_strings = self.config.unicode_list("augmentors_train", [])
        self.augmentors = parse_augmentors(augmentor_strings,
                                           self.void_label())

        self.train_names = sorted(os.listdir(self.data_dir +
                                             self.train_folder))
        train_val = numpy.array([(int(r.split('_')[0]),
                                  int(r.split('_')[1].split('.')[0]))
                                 for r in self.train_names])
        train_id_list, train_counts = numpy.unique(train_val[:, 0],
                                                   return_counts=True)
        self.train_counts = tf.constant(train_counts.astype(numpy.int32))
        self.num_train_id = train_id_list.shape[0]

        self.num_test_id = 6

        self.idx_placeholder = tf.placeholder(tf.int32, (4, ), "idx")
        self.test_case = tf.placeholder(tf.string)
        self.use_end_network = tf.placeholder(tf.bool)