def __init__(self, config, mode="all"): assert mode in ["train", "validation", "all" ], f"Should be train, validation or all, got {mode}" self.config = config self.sequence_length = 30 self.sc = SequenceDataset(MPII_Sequence(config), self.sequence_length, step=config["sequence_step_size"]) self.train = int(config["train_size"] * len(self.sc)) self.test = 1 - self.train self.sigma = config["sigma"] self.augmentation = config["augmentation"] self.aug_factor = 0.5 self.resize = iaa.Resize(self.config["resize_to"]) if self.augmentation: self.seq = iaa.Sequential( [ # iaa.Sometimes(self.aug_factor, iaa.AdditiveGaussianNoise(scale=0.05 * 255)), # iaa.Sometimes(self.aug_factor, iaa.SaltAndPepper(0.01, per_channel=False)), # iaa.Sometimes(self.aug_factor, iaa.CoarseDropout(0.01, size_percent=0.5)), iaa.Fliplr(self.aug_factor), iaa.Flipud(self.aug_factor), # iaa.Sometimes(self.aug_factor, iaa.GaussianBlur(sigma=(0, 3.0))), # iaa.LinearContrast((0.75, 1.5)), # Convert each image to grayscale and then overlay the # result with the original with random alpha. I.e. remove # colors with varying strengths. # iaa.Grayscale(alpha=(0.0, 1.0)), ], random_order=True) if mode != "all": # split_indices = np.arange(self.train) if mode == "train" else np.arange(self.train + 1, len(self.sc)) dset_indices = np.arange(len(self.sc)) train_indices, test_indices = sklearn.model_selection.train_test_split( dset_indices, train_size=float(config["train_size"]), random_state=int(config["random_state"])) if mode == "train": self.data = SubDataset(self.sc, train_indices) else: self.data = SubDataset(self.sc, test_indices) else: self.data = self.sc
def __init__(self, config, mode="all"): assert mode in ["train", "validation", "all" ], f"Should be train, validation or all, got {mode}" self.config = config self.sequence_length = 2 # if config.get("sequence_length", False) == False else config["sequence_length"] # self.sc = Animal_Sequence(config) self.sc = MPII_Sequence(config) # works if dataroot like "VOC2011/cats_meta" # TODO PROBABLY NOT CORRECT HERE self.animal = config["dataroot"].split("/")[1].split("_")[0] self.train = int(config["train_size"] * len(self.sc)) self.test = 1 - self.train self.sigma = config["sigma"] self.augmentation = config["augmentation"] self.logger = get_logger(self) self.resize = iaa.Resize(self.config["resize_to"]) self.aug_factor = 0.5 self.seq = iaa.Sequential([ iaa.Sometimes(self.aug_factor + 0.2, iaa.Fliplr()), iaa.Sometimes(self.aug_factor, iaa.Flipud()), ], random_order=True) if mode != "all": # split_indices = np.arange(self.train) if mode == "train" else np.arange(self.train + 1, len(self.sc)) dset_indices = np.arange(len(self.sc)) train_indices, test_indices = sklearn.model_selection.train_test_split( dset_indices, train_size=float(config["train_size"]), random_state=int(config["random_state"])) if mode == "train": self.data = SubDataset(self.sc, train_indices) else: self.data = SubDataset(self.sc, test_indices) else: self.data = self.sc
def __init__(self, config, mode="all"): #super().__init__(config) assert mode in ["train", "validation", "all"], f"Should be train, validatiopn or all, got {mode}" self.sc = SingleCats(config) self.train = int(0.8 * len(self.sc)) self.test = 1 - self.train self.sigma = self.sc.config["sigma"] if mode != "all": # TODO Better split e.g. split per video! split_indices = np.arange(self.train) if mode == "train" else np.arange(self.train+1, len(self.sc)) self.data = SubDataset(self.sc, split_indices) else: self.data = self.sc
def test_sub(): D = DebugDataset(10) I = np.array([9, 1, 2, 4, 3, 5, 7, 6, 8, 0]) S = SubDataset(D, I) ref0 = {"val": 9, "other": 9, "index_": 0} ref2 = {"val": 2, "other": 2, "index_": 2} ref6 = {"val": 7, "other": 7, "index_": 6} assert S[0] == ref0 assert S[2] == ref2 assert S[6] == ref6 assert all(S.labels["label1"] == I)
def __init__(self, config, mode="all"): assert mode in ["train", "validation", "all"], f"Should be train, validation or all, got {mode}" self.config = config self.sc = AnimalVOC2011(config) self.train = int(config["train_size"] * len(self.sc)) self.test = 1 - self.train self.sigma = config["sigma"] self.augmentation = config["augmentation"] self.aug_factor = 0.5 self.resize = iaa.Resize(self.config["resize_to"]) if self.augmentation: self.seq = iaa.Sequential([ # iaa.Sometimes(self.aug_factor, iaa.AdditiveGaussianNoise(scale=0.05 * 255)), iaa.Sometimes(self.aug_factor, iaa.SaltAndPepper(0.01, per_channel=False)), iaa.Sometimes(self.aug_factor, iaa.CoarseDropout(0.01, size_percent=0.5)), iaa.Fliplr(self.aug_factor), iaa.Flipud(self.aug_factor), iaa.Sometimes(self.aug_factor, iaa.GaussianBlur(sigma=(0, 3.0))), iaa.LinearContrast((0.75, 1.5)), # Convert each image to grayscale and then overlay the # result with the original with random alpha. I.e. remove # colors with varying strengths. iaa.Grayscale(alpha=(0.0, 1.0)), ], random_order=True) self.joints = [ [2, 0], # Nose - L_Eye [2, 1], # Nose - R_Eye [0, 3], # L_Eye - L_EarBase [1, 4], # R_Eye - R_EarBase [2, 8], # Nose - Throat [8, 9], # Throat - L_F_Elbow [8, 5], # Throat - R_F_Elbow [9, 16], # L_F_Elbow - L_F_Knee [16, 6], # L_F_Knee - L_F_Paw [5, 17], # R_F_Elbow - R_F_Knee [17, 7], # R_F_Knee - R_F_Paw [14, 18], # L_B_Elbow - L_B_Knee [18, 13], # L_B_Knee - L_B_Paw [15, 19], # R_B_Elbow - R_B_Knee [19, 13], # R_B_Knee - R_B_Paw [10, 11], # Withers - TailBase ] if self.config.get("min_kps", 0) >= 0: good_indices = [] for i, entry in enumerate(self.sc): if len(entry.get("labels_").get("kps")[:, 0].nonzero()[0]) >= self.config.get("min_kps", 0): good_indices.append(i) self.sc = SubDataset(self.sc, good_indices).data if mode != "all": # split_indices = np.arange(self.train) if mode == "train" else np.arange(self.train + 1, len(self.sc)) dset_indices = np.arange(len(self.sc)) train_indices, test_indices = sklearn.model_selection.train_test_split(dset_indices, train_size=float( config["train_size"]), random_state=int( config["random_state"])) if mode == "train": self.data = SubDataset(self.sc, train_indices) else: self.data = SubDataset(self.sc, test_indices) else: self.data = self.sc
def labels(data, i): return {"fid": i} D = ExtraLabelsDataset(D, labels) print("D") for k, v in D.labels.items(): print(k) print(np.shape(v)) S = SequenceDataset(D, 2) print("S") for k, v in S.labels.items(): print(k) print(np.shape(v)) S = SubDataset(S, [2, 5, 10]) print("Sub") for k, v in S.labels.items(): print(k) print(np.shape(v)) U = UnSequenceDataset(S) print("U") for k, v in U.labels.items(): print(k) print(np.shape(v)) print(len(S)) print(U.seq_len) print(len(U))
def __init__(self, config): self.P = Prjoti_VUNet(config) self.data = SubDataset(self.P, np.arange(int(0.9 * len(self.P)), len(self.P)))
def __init__(self, dataset, length, step=1, fid_key="fid", strategy="raise"): """ Parameters ---------- dataset : DatasetMixin Dataset from which single frame examples are taken. length : int Length of the returned sequences in frames. step : int Step between returned frames. Must be `>= 1`. fid_key : str Key in labels, at which the frame indices can be found. strategy : str How to handle bad sequences, i.e. sequences starting with a :attr:`fid_key` > 0. - ``raise``: Raise a ``ValueError`` - ``remove``: remove the sequence - ``reset``: remove the sequence This dataset will have `len(dataset) - length * step` examples. """ self.step = step self.length = length frame_ids = np.array(dataset.labels[fid_key]) if frame_ids.ndim != 1 or len(frame_ids) != len(dataset): raise ValueError( "Frame ids must be supplied as a sequence of " "scalars with the same length as the dataset! Here we " "have np.shape(dataset.labels[{}]) = {}`.".format( fid_key, np.shape(frame_ids))) if frame_ids.dtype != np.int: raise TypeError( "Frame ids must be supplied as ints, but are {}".format( frame_ids.dtype)) if frame_ids.dtype != np.int: raise TypeError( "Frame ids must be supplied as ints, but are {}".format( frame_ids.dtype)) # Gradient diffs = frame_ids[1:] - frame_ids[:-1] # All indices where the fid is not monotonically growing idxs = np.array([0] + list(np.where(diffs != 1)[0] + 1)) # Values at these indices start_fids = frame_ids[idxs] # Bad starts badboys = start_fids != 0 if np.any(badboys): n = sum(badboys) i_s = "" if n == 1 else "s" areis = "is" if n == 1 else "are" id_s = "ex" if n == 1 else "ices" if strategy == "raise": raise ValueError( "Frame id sequences must always start with 0. " "There {} {} sequence{} starting with the follwing id{}: " "{} at ind{} {} in the dataset.".format( areis, n, i_s, i_s, start_fids[badboys], id_s, idxs[badboys])) elif strategy == "remove": idxs_stop = np.array(list(idxs[1:]) + [None]) starts = idxs[badboys] stops = idxs_stop[badboys] bad_seq_mask = np.ones(len(dataset), dtype=bool) for bad_start_idx, bad_stop_idx in zip(starts, stops): bad_seq_mask[bad_start_idx:bad_stop_idx] = False good_seq_idxs = np.arange(len(dataset))[bad_seq_mask] dataset = SubDataset(dataset, good_seq_idxs) frame_ids = dataset.labels[fid_key] elif strategy == "reset": frame_ids = np.copy(frame_ids) # Don't try to override idxs_stop = np.array(list(idxs[1:]) + [None]) starts = idxs[badboys] stops = idxs_stop[badboys] vals = start_fids[badboys] for val, bad_sa_idx, bad_so_idx in zip(vals, starts, stops): frame_ids[bad_sa_idx:bad_so_idx] = ( frame_ids[bad_sa_idx:bad_so_idx] - val) dataset.labels[fid_key] = frame_ids frame_ids = dataset.labels[fid_key] else: raise ValueError("Strategy of SequenceDataset must be one of " "`raise`, `remove` or `reset` but is " "{}".format(strategy)) top_indeces = np.where(np.array(frame_ids) >= (length * step - 1))[0] all_subdatasets = [] base_indices = [] for i in range(length * step): indeces = top_indeces - i base_indices += [indeces] subdset = SubDataset(dataset, indeces) all_subdatasets += [subdset] all_subdatasets = all_subdatasets[::-1] self.data = ExampleConcatenatedDataset(*all_subdatasets) self.data.set_example_pars(step=self.step) self.base_indices = np.array(base_indices).transpose(1, 0)[:, ::-1]
def __init__(self, config, mode="all"): assert mode in ["train", "validation", "all"], f"Should be train, validation or all, got {mode}" self.config = config self.sequence_length = 2 # if config.get("sequence_length", False) == False else config["sequence_length"] self.sc = SequenceDataset(Animal_Sequence(config), self.sequence_length, step=config["sequence_step_size"]) # works if dataroot like "VOC2011/cats_meta" # TODO PROBABLY NOT CORRECT HERE self.animal = config["dataroot"].split("/")[1].split("_")[0] self.train = int(config["train_size"] * len(self.sc)) self.test = 1 - self.train self.sigma = config["sigma"] self.augmentation = config["augmentation"] self.aug_factor = 0.5 self.logger = get_logger(self) self.resize = iaa.Resize(self.config["resize_to"]) if self.augmentation: self.seq = iaa.Sequential([ iaa.Sometimes(self.aug_factor, iaa.AdditiveGaussianNoise(scale=0.05 * 255)), iaa.Sometimes(self.aug_factor, iaa.SaltAndPepper(0.01, per_channel=False)), iaa.Sometimes(self.aug_factor, iaa.CoarseDropout(0.01, size_percent=0.5)), iaa.Fliplr(self.aug_factor), iaa.Flipud(self.aug_factor), iaa.Sometimes(self.aug_factor, iaa.GaussianBlur(sigma=(0, 3.0))), iaa.LinearContrast((0.75, 1.5)), # Convert each image to grayscale and then overlay the # result with the original with random alpha. I.e. remove # colors with varying strengths. iaa.Grayscale(alpha=(0.0, 1.0)), ], random_order=True) self.joints = [ [2, 0], # Nose - L_Eye [2, 1], # Nose - R_Eye [0, 3], # L_Eye - L_EarBase [1, 4], # R_Eye - R_EarBase [2, 8], # Nose - Throat [8, 9], # Throat - L_F_Elbow [8, 5], # Throat - R_F_Elbow [9, 16], # L_F_Elbow - L_F_Knee [16, 6], # L_F_Knee - L_F_Paw [5, 17], # R_F_Elbow - R_F_Knee [17, 7], # R_F_Knee - R_F_Paw [14, 18], # L_B_Elbow - L_B_Knee [18, 13], # L_B_Knee - L_B_Paw [15, 19], # R_B_Elbow - R_B_Knee [19, 13], # R_B_Knee - R_B_Paw [10, 11], # Withers - TailBase ] if mode != "all": # split_indices = np.arange(self.train) if mode == "train" else np.arange(self.train + 1, len(self.sc)) dset_indices = np.arange(len(self.sc)) train_indices, test_indices = sklearn.model_selection.train_test_split(dset_indices, train_size=float( config["train_size"]), random_state=int( config["random_state"])) if mode == "train": self.data = SubDataset(self.sc, train_indices) else: self.data = SubDataset(self.sc, test_indices) else: self.data = self.sc