def determine_mirror(mirror, version, releases_development): # if you have a saviour mirror, let's assume it's a good one for source, _ in http_cache.saviour_sources(): if source != 'direct' and http_cache.is_fetcheable(source, mirror): return mirror # we can query a georedirector for a local Fedora mirror and use just # that one, consistently. problem is, it also yields really broken ones. # let's check that a mirror has at least a repomd.xml and a kernel: updates_repomd = f'updates/{version}/Everything/x86_64/repodata/repomd.xml' kernel = (f'{releases_development}/{version}' '/Everything/x86_64/os/isolinux/vmlinuz') h = requests.head(mirror + '/' + updates_repomd, allow_redirects=False) if h.status_code in (301, 302, 303, 307, 308) and 'Location' in h.headers: r = h.headers['Location'].rstrip('/').replace('https://', 'http://') assert r.endswith('/' + updates_repomd) base = r[:-len('/' + updates_repomd)] # good, now now ensure it also has a kernel h = requests.head(base + '/' + kernel) if h.status_code != 200: log.warning(f'{base + "/" + kernel} -> {h.status_code}') log.warning(f'mirror {base} is broken, trying another one') return determine_mirror(mirror, version, releases_development) else: return base return mirror
def machines(expired_for=0): if expired_for != 'all': adjusted_time = time.time() - units.parse_time_interval(expired_for) for root, dirs, files in os.walk(path.MACHINES, topdown=False): for d in (os.path.join(root, x) for x in dirs): lock_path = os.path.join(root, '.' + os.path.basename(d) + '-lock') lock = fasteners.process_lock.InterProcessLock(lock_path) lock.acquire() try: remove = fingertip.machine.needs_a_rebuild(d, by=adjusted_time) except Exception as ex: log.warning(f'while processing {d}: {ex}') remove = True if (expired_for == 'all' or remove): assert os.path.realpath(d).startswith( os.path.realpath(path.MACHINES)) log.info(f'removing {os.path.realpath(d)}') if not os.path.islink(d): shutil.rmtree(d) else: os.unlink(d) else: log.debug(f'keeping {os.path.realpath(d)}') os.unlink(lock_path) lock.release()
def schedule(): # Do this only if fingertip is in PATH if not shutil.which("fingertip"): log.debug('No `fingertip` found in PATH. Not scheduling ' 'automatic cleanup.') return # Skip if systemd is not available if not shutil.which('systemd-run') or not shutil.which('systemctl'): log.warning('It looks like systemd is not available. ' 'No cleanup is scheduled! If you are running out of disk, ' 'space, run `fingertip cleanup periodic` manually.') return # If the timer is already installed skip installation too p = subprocess.run([ 'systemctl', '--user', 'is-active', '--quiet', 'fingertip-cleanup.timer' ]) if p.returncode == 0: log.debug('The systemd timer handling cleanup is already installed ' 'and running.') return # Run twice a day log.info('Scheduling cleanup to run every two hours') subprocess.run([ 'systemd-run', '--unit=fingertip-cleanup', '--user', '--on-calendar=0/2:00:00', 'fingertip', 'cleanup', 'periodic' ])
def storage_destroy(): mount = subprocess.run(['mount'], capture_output=True) if path.COW_IMAGE in mount.stdout.decode(): log.warning('Filesystem is still mounted. Trying to unmount.') storage_unmount() if os.path.exists(path.COW_IMAGE): os.unlink(path.COW_IMAGE)
def has_space(how_much='2G', reserve_fraction=.5, where=None): where = where or tempfile.gettempdir() how_much = units.parse_binary(how_much) total, _, free = shutil.disk_usage(where) if not free >= how_much: log.warning(f'{where} does not have {how_much} of free space') if not free >= total * reserve_fraction: log.warning(f'{where} is {int((1 - reserve_fraction) * 100)}% full') return free >= how_much and free >= total * reserve_fraction
def file_has_not_changed(self, path): log.debug(f'checking that {path} has not changed...') mtime, hash_ = self._deps[path] if mtime != (os.stat(path).st_mtime): if hash_ != weak_hash.of_file(path): log.warning(f'{path} has changed, set ' 'FINGERTIP_IGNORE_CODE_CHANGES=1 to ignore') return False return True
def storage_destroy(): backing_file = os.path.join(path.CACHE, 'for-machines.xfs') if os.path.exists(backing_file): # we should not remove the file if it is mounted mount = subprocess.run(['mount'], capture_output=True) if backing_file in mount.stdout.decode(): log.warning('Filesystem is still mounted. Trying to unmount.') storage_unmount() os.unlink(backing_file)
def hack_around_unpacking(uri, headers, wrong_content): log.warning(f're-fetching correct content for {uri}') r = requests.get(uri, headers=headers, stream=True, allow_redirects=False) h = hashlib.sha256(wrong_content).hexdigest() cachefile = path.downloads('fixups', h, makedirs=True) if not os.path.exists(cachefile): with path.wip(cachefile) as wip: with open(wip, 'wb') as f: shutil.copyfileobj(r.raw, f) with open(cachefile, 'rb') as f: return f.read()
def has_space(how_much='2G', reserve_fraction=.3, where='/tmp'): for suffix, power in {'G': 30, 'M': 20, 'K': 10}.items(): if isinstance(how_much, str) and how_much.endswith(suffix): how_much = float(how_much[:-1]) * 2**power break total, _, free = shutil.disk_usage(where) if not free >= how_much: log.warning(f'{where} does not have {how_much} of free space') if not free >= total * reserve_fraction: log.warning(f'{where} is {int((1 - reserve_fraction) * 100)}% full') return free >= how_much and free >= total * reserve_fraction
def files_have_not_changed(self): if os.getenv('FINGERTIP_IGNORE_CODE_CHANGES', '0') != '0': return True for path, (mtime, hash_) in self._deps.items(): log.debug(f'checking that {path} has not changed...') if mtime != (os.stat(path).st_mtime): if hash_ != weak_hash.of_file(path): log.warning(f'{path} has changed, set ' 'FINGERTIP_IGNORE_CODE_CHANGES=1 to ignore') return False return True
def remove(*paths): for path in paths: assert AUTOREMOVE_PREFIX in path try: if not os.path.isdir(path) or os.path.islink(path): try: os.unlink(path) except FileNotFoundError: pass else: shutil.rmtree(path, ignore_errors=True) except Exception as e: log.warning(f'cleanup error for {path}: {e}')
def dir_listing_has_not_changed(self, path): log.debug(f'checking that {path} listing has not changed...') mtime, hash_ = self._deps_dirs[path] try: if mtime != (os.stat(path).st_mtime): listing = repr(sorted(os.listdir(path))) if hash_ != weak_hash.of_string(listing): log.warning(f'{path} listing has changed, set ' 'FINGERTIP_IGNORE_FILE_CHANGES=1 to ignore') return False except FileNotFoundError: return False return True
def c_r_offline(self, request): cache_url = self.cache_url(request.url) log.debug(f'looking up {cache_url} in the cache') cache_data = self.cache.get(cache_url) if cache_data is None: log.error(f'{cache_url} not in cache and fingertip is offline') return False resp = self.serializer.loads(request, cache_data) if not resp: log.error(f'{cache_url} cache entry deserialization failed, ignored') return False log.warning(f'Using {cache_url} from offline cache') return resp
def is_fetcheable(source, url, timeout=2): if source == 'local': return os.path.exists(path.saviour(url)) elif source != 'direct': url = source + '/' + url url = 'http://' + url if '://' not in source else url try: r = requests.head(url, allow_redirects=False, timeout=timeout) return r.status_code < 400 except (requests.exceptions.BaseHTTPError, urllib3.exceptions.HTTPError, requests.exceptions.Timeout, OSError) as ex: log.warning(f'{ex}') return False return False
def needs_a_rebuild(mpath, by=None): with open(os.path.join(mpath, 'machine.clpickle'), 'rb') as f: m = cloudpickle.load(f) if not m.expiration.files_have_not_changed(): return True expired = m.expiration.is_expired(by) if expired: log.debug(f'{mpath} has expired at {m.expiration.pretty()}') else: log.debug(f'{mpath} is valid until {m.expiration.pretty()}') if OFFLINE and expired: log.warning(f'{mpath} expired at {m.expiration.pretty()}, ' 'but offline mode is enabled, so, reusing it') return expired and not OFFLINE
def _serve(self, uri, headers, meth='GET'): uri = uri.lstrip('/') if uri in http_cache._mocks: return self._serve_http(uri, headers, meth, cache=False) sources = saviour_sources() for i, (source, cache) in enumerate(sources): if is_fetcheable(source, uri) or i == len(sources) - 1: if source == 'local': if meth == 'GET': return super().do_GET() elif meth == 'HEAD': return super().do_HEAD() elif source == 'direct': if WARN_ON_DIRECT: log.warning(f'{uri} not found on any mirror') su = uri else: su = source + '/' + uri su = 'http://' + su if '://' not in source else su return self._serve_http(su, headers, meth, cache=cache)
def fetch(self, url, out_path): sources = saviour_sources() for i, (source, cache) in enumerate(sources): if is_fetcheable(source, url) or i == len(sources) - 1: if source == 'local': reflink.auto(path.saviour(url), out_path) return sess = self._get_requests_session(direct=not cache) if source == 'direct': if WARN_ON_DIRECT: log.warning(f'{url} not found on any mirror') surl = url else: surl = source + '/' + url surl = 'http://' + surl if '://' not in source else surl log.debug(f'fetching{"/caching" if cache else ""} ' f'{os.path.basename(url)} from {surl}') r = sess.get(surl) # not raw because that punctures cache with open(out_path, 'wb') as f: f.write(r.content) return
def _serve(self, uri, headers, meth='GET'): sess = http_cache._get_requests_session() headers = {k: v for k, v in headers.items() if not (k in STRIP_HEADERS or k.startswith('Proxy-'))} log.debug(f'{meth} {uri}') for k, v in headers.items(): log.debug(f'{k}: {v}') try: if meth == 'GET' and not OFFLINE: # direct streaming might be required... preview = sess.head(uri, headers=headers, allow_redirects=False) direct = None if int(preview.headers.get('Content-Length', 0)) > BIG: direct = f'file bigger than {BIG}' if 'Range' in headers: # There seems to be a bug in CacheControl # that serves contents in full if a range request # hits a non-ranged cached entry. direct = f'ranged request, playing safe' if direct: # Don't cache, don't reencode, stream it as is log.warning(f'streaming {uri} directly ({direct})') r = requests.get(uri, headers=headers, stream=True) self._status_and_headers(r.status_code, r.headers) self.copyfile(r.raw, self.wfile) return # fetch with caching m_func = getattr(sess, meth.lower()) r = m_func(uri if '://' in uri else 'http://self' + uri, headers=headers, allow_redirects=False) data = r.content length = int(r.headers.get('Content-Length', 0)) if len(data) != length: data = hack_around_unpacking(uri, headers, data) assert len(data) == length self._status_and_headers(r.status_code, r.headers) if meth == 'GET': self.wfile.write(data) log.info(f'{meth} {uri} served {length}') except BrokenPipeError: log.warning(f'Broken pipe for {meth} {uri}') except ConnectionResetError: log.warning(f'Connection reset for {meth} {uri}') except requests.exceptions.ConnectionError: log.warning(f'Connection error for {meth} {uri}')
def storage_setup_wizard(): assert SETUP in ('auto', 'suggest', 'never') if SETUP == 'never': return size = SIZE os.makedirs(path.MACHINES, exist_ok=True) if not is_supported(path.MACHINES): log.warning(f'images directory {path.MACHINES} lacks reflink support') log.warning('without it, fingertip will thrash and fill up your SSD ' 'in no time') backing_file = os.path.join(path.CACHE, 'for-machines.xfs') if not os.path.exists(backing_file): if SETUP == 'suggest': log.info(f'would you like to allow fingertip ' f'to allocate {size} at {backing_file} ' 'for a reflink-enabled XFS loop mount?') log.info('(set FINGERTIP_SETUP="auto" environment variable' ' to do it automatically)') i = input(f'[{size}]/different size/cancel/ignore> ').strip() if i == 'cancel': log.error('cancelled') sys.exit(1) elif i == 'ignore': return size = i or size tmp = temp.disappearing_file(path.CACHE) create_supported_fs(tmp, size) os.rename(tmp, backing_file) log.info(f'fingertip will now mount the XFS image at {backing_file}') if SETUP == 'suggest': i = input(f'[ok]/skip/cancel> ').strip() if i == 'skip': log.warning('skipping; ' 'fingertip will have no reflink superpowers') log.warning('tell your SSD I\'m sorry') return elif i and i != 'ok': log.error('cancelled') sys.exit(1) mount_supported_fs(backing_file, path.MACHINES)
def __init__(self, url, *path_components, enough_to_have=None): assert path_components self.url = url cache_path = path.downloads('git', *path_components, makedirs=True) cache_exists = os.path.exists(cache_path) self.path = temp.disappearing_dir(os.path.dirname(cache_path), path_components[-1]) lock_working_copy_path = self.path + '-lock' lock_cache_path = cache_path + '-lock' lock.Lock.__init__(self, lock_working_copy_path) update_not_needed = None sources = saviour_sources() self.self_destruct = False with lock.Lock(lock_cache_path), lock.Lock(lock_working_copy_path): _remove(self.path) for i, (source, cache) in enumerate(sources): last_source = i == len(sources) - 1 if cache and cache_exists and update_not_needed is None: cr = git.Repo(cache_path) update_not_needed = enough_to_have and ( enough_to_have in (t.name for t in cr.tags) or enough_to_have in (h.name for h in cr.heads) or enough_to_have in (c.hexsha for c in cr.iter_commits()) # that's not all revspecs, but best-effort is fine ) if update_not_needed: log.info(f'not re-fetching {url} from {source} ' f'because {enough_to_have} ' 'is already present in cache') git.Repo.clone_from(cache_path, self.path, mirror=True) break if source == 'local': surl = path.saviour(url).replace('//', '/') # workaround if not os.path.exists(surl) and not last_source: continue log.info(f'cloning {url} from local saviour mirror') git.Repo.clone_from(surl, self.path, mirror=True) break elif source == 'direct': surl = url else: surl = source + '/' + url surl = 'http://' + surl if '://' not in source else surl log.info(f'cloning {url} from {source} ' f'cache_exists={cache_exists}...') try: # TODO: bare clone # no harm in referencing cache, even w/o cached+ git.Repo.clone_from(surl, self.path, mirror=True, dissociate=True, reference_if_able=cache_path) except git.GitError: log.warning(f'could not clone {url} from {source}') if last_source: raise continue break _remove(cache_path) reflink.auto(self.path, cache_path) git.Repo.__init__(self, self.path) self.remotes[0].set_url(url) self.self_destruct = True
def mirror(config, *what_to_mirror): total_failures = [] failures = collections.defaultdict(list) with open(config) as f: config = ruamel.yaml.YAML(typ='safe').load(f) hows, whats = config['how'], config['what'] if not what_to_mirror: what_to_mirror = whats.keys() else: what_to_mirror = [k for k in whats.keys() if any((fnmatch.fnmatch(k, req) for req in what_to_mirror))] for resource_name in what_to_mirror or whats.keys(): s = whats[resource_name] log.debug(f'processing {resource_name}...') if s is None: how, suffix = resource_name, '' elif '/' in s: how, suffix = s.split('/', 1) suffix = '/' + suffix else: how, suffix = s, '' try: how = hows[how] except KeyError: log.error(f'missing how section on {how}') raise SystemExit() url = how['url'] + suffix method = how['method'] sources = (how['sources'] if 'sources' in how else [how['url']]) sources = [s + suffix for s in sources] extra_args = {k: v for k, v in how.items() if k not in ('url', 'sources', 'method')} if f'method_{method}' not in globals(): log.error(f'unsupported method {method}') raise SystemExit() meth = globals()[f'method_{method}'] symlink = path.saviour(url.rstrip('/')) # usually symlink points to data, but while we're working on it, # it temporarily points to a consistent snapshot of it named `snap` data = path.saviour('_', resource_name, 'data') snap = path.saviour('_', resource_name, 'snap') temp = path.saviour('_', resource_name, 'temp') lockfile = path.saviour('_', resource_name) + '-lock' assert data.startswith(path.SAVIOUR) assert snap.startswith(path.SAVIOUR) assert temp.startswith(path.SAVIOUR) sublog = log.Sublogger(f'{method} {resource_name}') sublog.info('locking...') with lock.Lock(lockfile): os.makedirs(os.path.dirname(snap), exist_ok=True) if os.path.exists(temp): sublog.info('removing stale temp...') _remove(temp) if os.path.exists(symlink): # it's already published if os.path.exists(data) and not os.path.exists(snap): # `data` is present and is the best we have to publish sublog.info('snapshotting...') reflink.always(data, temp, preserve=True) os.rename(temp, snap) if os.path.exists(snap): # link to a consistent snapshot while we work on `data` _symlink(snap, symlink) for source in sources: sublog.info(f'trying {source}...') try: meth(sublog, source, snap, data, **extra_args) assert os.path.exists(data) break except Exception as _: traceback.print_exc() failures[resource_name].append(source) fingertip.util.log.warning(f'failed to mirror {source}') if len(failures[resource_name]) == len(sources): sublog.error(f'failed to mirror ' f'from all {len(sources)} sources') total_failures.append(resource_name) continue _symlink(data, symlink) if os.path.exists(snap): os.rename(snap, temp) # move it out the way asap sublog.info('removing now obsolete snapshot...') _remove(temp) try: deduplicate(sublog, resource_name, timeout=1) except lock.LockTimeout: log.warning('skipped deduplication, db was locked') if total_failures: fingertip.util.log.error(f'failed: {", ".join(total_failures)}') raise SystemExit() log.info('saviour has completed mirroring')
def mirror(config, *what_to_mirror, deduplicate=None): total_failures = [] failures = collections.defaultdict(list) with open(config) as f: config = ruamel.yaml.YAML(typ='safe').load(f) if 'mirror' in config and not config['mirror']: log.warning('mirroring is disabled in config') return hows, whats = config['how'], config['what'] if not what_to_mirror: what_to_mirror = whats.keys() else: what_to_mirror = ([ k for k in whats.keys() if any( fnmatch.fnmatch(k, req) for req in what_to_mirror) ] + [k for k in what_to_mirror if '=' in k]) if not what_to_mirror: log.error('nothing to mirror') return for resource in what_to_mirror: log.debug(f'processing {resource}...') if '=' not in resource: # example: alpine-3.13=alpine/v3.13/main/x86 resource_name, tail = resource, '' s = whats[resource_name] else: # example: alpine-3.13=alpine/v3.13/main/x86 resource_name, s = resource.split('=', 1) # FIXME UGLY: config overrides are stronger that = (more syntax?) # TODO: whats shouldn't be a dict, I think, just a list of strings if resource_name in whats: s = whats[resource_name] if s is None: s = resource_name if '/' in s: how_name, suffix = s.split('/', 1) suffix = '/' + suffix else: how_name, suffix = s, '' try: how = hows[how_name] except KeyError: log.error(f'missing how section on {how_name}') raise SystemExit() url = how['url'] + suffix method = how['method'] sources = (how['sources'] if 'sources' in how else [how['url']]) sources = [s + suffix for s in sources] extra_args = { k: v for k, v in how.items() if k not in ('url', 'sources', 'method', 'validate', 'deduplicate') } if f'method_{method}' not in globals(): log.error(f'unsupported method {method}') raise SystemExit() meth = globals()[f'method_{method}'] symlink = path.saviour(url.rstrip('/')) # usually symlink points to data, but while we're working on it, # it temporarily points to a consistent snapshot of it named `snap` data = os.path.realpath(path.saviour('_', resource_name, 'data')) snap = os.path.realpath(path.saviour('_', resource_name, 'snap')) temp = os.path.realpath(path.saviour('_', resource_name, 'temp')) lockfile = path.saviour('_', resource_name) + '-lock' assert data.startswith(os.path.realpath(path.SAVIOUR)) assert snap.startswith(os.path.realpath(path.SAVIOUR)) assert temp.startswith(os.path.realpath(path.SAVIOUR)) sublog = log.Sublogger(f'{method} {resource_name}') sublog.info('locking...') with lock.Lock(lockfile): os.makedirs(os.path.dirname(snap), exist_ok=True) if os.path.exists(temp): sublog.info('removing stale temp...') _remove(temp) if os.path.exists(symlink): # it's already published if os.path.exists(data) and not os.path.exists(snap): # `data` is present and is the best we have to publish sublog.info('snapshotting...') reflink.always(data, temp, preserve=True) os.rename(temp, snap) if os.path.exists(snap): # link to a consistent snapshot while we work on `data` _symlink(snap, symlink) for source in sources: sublog.info(f'trying {source}...') try: meth(sublog, source, snap, data, **extra_args) assert os.path.exists(data) if 'validate' in how: sublog.info(f'validating with {how["validate"]}...') validator = globals()[f'validate_{how["validate"]}'] validator(sublog, source, data) sublog.info('validated') break except Exception as _: traceback.print_exc() failures[resource_name].append(source) fingertip.util.log.warning(f'failed to mirror {source}') if len(failures[resource_name]) == len(sources): sublog.error(f'failed to mirror ' f'from all {len(sources)} sources') total_failures.append(resource_name) continue _symlink(data, symlink) if os.path.exists(snap): os.rename(snap, temp) # move it out the way asap sublog.info('removing now obsolete snapshot...') _remove(temp) how_deduplicate = how.get('deduplicate', True) db_name = how_deduplicate if how_deduplicate is not True else how_name if how_deduplicate and deduplicate is not False: try: _deduplicate(sublog, db_name, resource_name, timeout=1) except lock.LockTimeout: log.warning(f'skipped deduplication of {resource_name}, ' f'db {db_name} was locked') if total_failures: fingertip.util.log.error(f'failed: {", ".join(total_failures)}') raise FailureToMirrorError(", ".join(total_failures)) log.info('saviour has completed mirroring')
def _serve_http(self, uri, headers, meth='GET', cache=True, retries=RETRIES_MAX): sess = http_cache._get_requests_session(direct=not cache) sess_dir = http_cache._get_requests_session(direct=True) basename = os.path.basename(uri) headers = { k: v for k, v in headers.items() if not (k in STRIP_HEADERS or k.startswith('Proxy-')) } headers['Accept-Encoding'] = 'identity' log.debug(f'{meth} {basename} ({uri})') for k, v in headers.items(): log.debug(f'{k}: {v}') error = None try: if meth == 'GET': # direct streaming or trickery might be required... preview = sess.head(uri, headers=headers, allow_redirects=False) if (300 <= preview.status_code < 400 and 'Location' in preview.headers): nu = preview.headers['Location'] if nu.startswith('https://'): # no point in serving that, we have to pretend # that never happened log.debug(f'suppressing HTTPS redirect {nu}') return self._serve_http(nu, headers, meth=meth, cache=cache, retries=retries) direct = [] if not cache: direct.append('caching disabled for this source') if int(preview.headers.get('Content-Length', 0)) > BIG: direct.append(f'file bigger than {BIG}') if 'Range' in headers: # There seems to be a bug in CacheControl # that serves contents in full if a range request # hits a non-ranged cached entry. direct.append('ranged request, playing safe') if direct: # Don't cache, don't reencode, stream it as is log.debug(f'streaming {basename} directly ' f'from {uri} ({", ".join(direct)})') r = sess_dir.get(uri, headers=headers, stream=True) self._status_and_headers(r.status_code, r.headers) shutil.copyfileobj(r.raw, self.wfile) return # fetch with caching m_func = getattr(sess, meth.lower()) r = m_func(uri if '://' in uri else 'http://self' + uri, headers=headers, allow_redirects=False) data = r.content if 'Content-Length' in r.headers: length = int(r.headers['Content-Length']) if len(data) != length: data = hack_around_unpacking(uri, headers, data) assert len(data) == length except BrokenPipeError: error = f'Upwards broken pipe for {meth} {uri}' except ConnectionResetError: error = f'Upwards connection reset for {meth} {uri}' except requests.exceptions.ConnectionError: error = f'Upwards connection error for {meth} {uri}' if error: # delay a re-request if retries: log.warning(f'{error} (will retry x{retries})') t = (RETRIES_MAX - retries) / RETRIES_MAX * COOLDOWN time.sleep(t) return self._serve_http(uri, headers, meth=meth, cache=cache, retries=retries - 1) else: log.error(f'{error} (out of retries)') self.send_error(http.HTTPStatus.SERVICE_UNAVAILABLE) return log.debug(f'{meth} {basename} fetched {r.status_code} ({uri})') try: self._status_and_headers(r.status_code, r.headers) if meth == 'GET': self.wfile.write(data) except BrokenPipeError: log.warning(f'Downwards broken pipe for {meth} {uri}') except ConnectionResetError: log.warning(f'Downwards connection reset for {meth} {uri}') except requests.exceptions.ConnectionError: log.warning(f'Downwards connection error for {meth} {uri}') log.debug(f'{meth} {basename} served ({uri})')
def __init__(self, url, *path_components, enough_to_have=None): if not path_components: path_components = [url.replace('/', '::')] self.url = url cache_path = path.downloads('git', *path_components, makedirs=True) self.path = temp.disappearing_dir(os.path.dirname(cache_path), path_components[-1]) lock_working_copy_path = self.path + '-lock' lock_cache_path = cache_path + '-lock' lock.Lock.__init__(self, lock_working_copy_path) sources = saviour_sources() self.self_destruct = False with lock.Lock(lock_cache_path), lock.Lock(lock_working_copy_path): cache_is_enough = False if os.path.exists(cache_path): try: cr = git.Repo(cache_path) cache_is_enough = (enough_to_have and _has_rev(cr, enough_to_have)) except git.GitError as e: log.error(f'something wrong with git cache {cache_path}') log.error(str(e)) _remove(self.path) for i, (source, cache) in enumerate(sources): last_source = i == len(sources) - 1 if cache and cache_is_enough: log.info(f'not re-fetching {url} from {source} ' f'because {enough_to_have} ' 'is already present in cache') git.Repo.clone_from(cache_path, self.path, mirror=True) break if source == 'local': surl = path.saviour(url).replace('//', '/') # workaround if not os.path.exists(surl) and not last_source: continue log.info(f'cloning {url} from local saviour mirror') git.Repo.clone_from(surl, self.path, mirror=True) break elif source == 'direct': surl = url else: surl = source + '/' + url surl = 'http://' + surl if '://' not in source else surl log.info(f'cloning {url} from {source} ' f'cache_exists={os.path.exists(cache_path)}...') try: # TODO: bare clone # no harm in referencing cache, even w/o cached+ git.Repo.clone_from(surl, self.path, mirror=True, dissociate=True, reference_if_able=cache_path) except git.GitError: log.warning(f'could not clone {url} from {source}') if last_source: raise continue break _remove(cache_path) reflink.auto(self.path, cache_path) git.Repo.__init__(self, self.path) self.remotes[0].set_url(url) self.self_destruct = True