def parse_manifest(source, ignore_gpg=True): types = {"DIST":{}, "AUX":{}, "EBUILD":{}, "MISC":{}} # manifest v2 format: (see glep 44 for exact rules) # TYPE filename size (CHF sum)+ # example 'type' entry, all one line #MISC metadata.xml 219 RMD160 613195ece366b33606e71ff1753be048f2507841 SHA1 d162fb909241ef50b95a3539bdfcde95429bdf81 SHA256 cbd3a20e5c89a48a842f7132fe705bf39959f02c1025052efce8aad8a8baa8dc # manifest v1 format is # CHF sum filename size # note that we do _not_ support manifest1 chf_types = set(["size"]) f = None try: if isinstance(source, basestring): i = f = open(source, "r", 32768) else: i = f = source.text_fileobj() if ignore_gpg: i = gpg.skip_signatures(f) for data in i: line = data.split() if not line: continue d = types.get(line[0]) if d is None: raise errors.ParseChksumError(source, "unknown manifest type: %s: %r" % (line[0], line)) if len(line) % 2 != 1: raise errors.ParseChksumError(source, "manifest 2 entry doesn't have right " "number of tokens, %i: %r" % (len(line), line)) chf_types.update(line[3::2]) # this is a trick to do pairwise collapsing; # [size, 1] becomes [(size, 1)] i = iter(line[3:]) d[line[1]] = [("size", long(line[2]))] + \ list(convert_chksums(izip(i, i))) finally: if f is not None and f.close: f.close() # finally convert it to slotted dict for memory savings. slotted_kls = make_SlottedDict_kls(x.lower() for x in chf_types) for t, d in types.iteritems(): types[t] = mappings.ImmutableDict((k, slotted_kls(v)) for k, v in d.iteritems()) # ordering annoyingly matters. bad api. return [types[x] for x in ("DIST", "AUX", "EBUILD", "MISC")]
def parse_manifest(source, ignore_gpg=True): types = {"DIST": {}, "AUX": {}, "EBUILD": {}, "MISC": {}} # manifest v2 format: (see glep 44 for exact rules) # TYPE filename size (CHF sum)+ # example 'type' entry, all one line # MISC metadata.xml 219 RMD160 613195ece366b33606e71ff1753be048f2507841 SHA1 d162fb909241ef50b95a3539bdfcde95429bdf81 SHA256 cbd3a20e5c89a48a842f7132fe705bf39959f02c1025052efce8aad8a8baa8dc # manifest v1 format is # CHF sum filename size # note that we do _not_ support manifest1 chf_types = set(["size"]) f = None try: if isinstance(source, basestring): i = f = open(source, "r", 32768) else: i = f = source.text_fileobj() if ignore_gpg: i = gpg.skip_signatures(f) for data in i: line = data.split() if not line: continue d = types.get(line[0]) if d is None: raise errors.ParseChksumError( source, "unknown manifest type: %s: %r" % (line[0], line)) if len(line) % 2 != 1: raise errors.ParseChksumError( source, "manifest 2 entry doesn't have right " "number of tokens, %i: %r" % (len(line), line)) chf_types.update(line[3::2]) # this is a trick to do pairwise collapsing; # [size, 1] becomes [(size, 1)] i = iter(line[3:]) d[line[1]] = [("size", long(line[2]))] + list( convert_chksums(izip(i, i))) finally: if f is not None and f.close: f.close() # finally convert it to slotted dict for memory savings. slotted_kls = make_SlottedDict_kls(x.lower() for x in chf_types) for t, d in types.iteritems(): types[t] = mappings.ImmutableDict( (k, slotted_kls(v)) for k, v in d.iteritems()) # ordering annoyingly matters. bad api. return [types[x] for x in ("DIST", "AUX", "EBUILD", "MISC")]
def __init__(self, auxdbkeys=None, readonly=False): """ initialize the derived class; specifically, store label/keys :param auxdbkeys: sequence of allowed keys for each cache entry :param readonly: defaults to False, controls whether the cache is mutable. """ if auxdbkeys is None: auxdbkeys = self.default_keys self._known_keys = frozenset(auxdbkeys) self._chf_key = '_%s_' % self.chf_type self._chf_serializer = self._get_chf_serializer(self.chf_type) self._chf_deserializer = self._get_chf_deserializer(self.chf_type) self._known_keys |= frozenset([self._chf_key]) self._cdict_kls = make_SlottedDict_kls(self._known_keys) self.readonly = readonly self.set_sync_rate(self.default_sync_rate) self.updates = 0