def loadlines(root, checkvalid=True): if is_dict(root): lines = [] with open(root, 'r') as f: # files = [line.rstrip().split()[-1] for line in f.readlines()] files = [line.rstrip().split() for line in f.readlines()] if checkvalid: files = [ topath(line[-1]) for line in files if line[0] in cfg.base_classes ] else: files = [ topath(line[-1]) for line in files if line[0] in cfg.classes ] for file in files: with open(file, 'r') as f: lines.extend(f.readlines()) lines = sorted(list(set(lines))) else: with open(root, 'r') as file: lines = file.readlines() if checkvalid: lines = [topath(l) for l in lines if listDataset.is_valid(topath(l))] return lines
def stringify(self, obj): self._obj = obj if utils.is_json_array(obj): self._read_json_array() elif utils.is_dict(obj): self._read_dict() return "".join(self._tokens)
def build_section(dict_section): """ Builds a JWT section (header or payload) by properly encoding content. :param dict_section: JSON (dict) or plain text (str) content to be encoded :return: JWT encoded section """ if utils.is_dict(dict_section): new_section = utils.base64url_encode( utils.force_bytes( json.dumps(dict_section, separators=(",", ":")))) else: new_section = utils.base64url_encode( utils.force_bytes(dict_section)) return new_section
def _append_token_by_type(self, value): if utils.is_string(value): self._append_string(value) elif utils.is_number(value): self._append_number(value) elif value is None: self._append_null() elif value == True: self._append_true() elif value == False: self._append_false() elif utils.is_json_array(value): self._read_json_array(value) elif utils.is_dict(value): self._read_dict(value)
def __init__(self, root, shape=None, shuffle=True, transform=None, target_transform=None, train=False, seen=0, batch_size=64, num_workers=4): self.train = train if isinstance(root, list): self.lines = root elif is_dict(root): lines = [] with open(root, 'r') as f: files = [line.rstrip().split()[-1] for line in f.readlines()] for file in files: with open(file, 'r') as f: lines.extend(f.readlines()) self.lines = sorted(list(set(lines))) else: with open(root, 'r') as file: self.lines = [topath(l) for l in file.readlines()] # Filter out images not in base classes print("===> Number of samples (before filtring): %d" % len(self.lines)) if self.train and not isinstance(root, list): self.lines = [l for l in self.lines if self.is_valid(l)] print("===> Number of samples (after filtring): %d" % len(self.lines)) if shuffle: random.shuffle(self.lines) self.nSamples = len(self.lines) self.transform = transform self.target_transform = target_transform self.shape = shape self.seen = seen self.batch_size = batch_size self.num_workers = num_workers self.first_batch = False
def load_paths(root, checkvalid=True): if is_dict(root): lines = [] with open(root, 'r') as f: # files = [line.rstrip().split()[-1] for line in f.readlines()] files = [line.rstrip().split() for line in f.readlines()] if checkvalid: files = [line[-1] for line in files if line[0] in cfg.base_classes] else: files = [line[-1] for line in files if line[0] in cfg.classes] for file in files: with open(file, 'r') as f: lines.extend(f.readlines()) lines = sorted(list(set(lines))) else: with open(root, 'r') as file: lines = file.readlines() if checkvalid: # check whether the classes of the images contains meta classes. lines = [l for l in lines if is_valid(l)] return lines
def set_payload(self, payload): if utils.is_dict(payload): self.payload = payload else: self.is_json = False self.payload = {"default": payload}