def resolve(name): # Name is assumed to be in the form debian or debian:10 if name.startswith('debian:'): try: _, vernum = name.split(':') except Exception: raise exceptions.VersionSpecificationError( 'Cannot parse version: %s' % name) url = config.DOWNLOAD_URL_DEBIAN % {'vernum': vernum} checksum_url = config.CHECKSUM_URL_DEBIAN % {'vernum': vernum} resp = requests.head( url, allow_redirects=True, headers={'User-Agent': util_general.get_user_agent()}) if resp.status_code != 200: raise exceptions.HTTPError('Failed to fetch %s, status code %d' % (url, resp.status_code)) else: found_any = False for vernum in range(9, 20): url = config.DOWNLOAD_URL_DEBIAN % {'vernum': vernum} resp = requests.head( url, allow_redirects=True, headers={'User-Agent': util_general.get_user_agent()}) if resp.status_code != 200: if found_any: vernum -= 1 break else: found_any = True url = config.DOWNLOAD_URL_DEBIAN % {'vernum': vernum} checksum_url = config.CHECKSUM_URL_DEBIAN % {'vernum': vernum} checksums = resolver_util.fetch_remote_checksum(checksum_url) checksum = checksums.get(os.path.basename(url)) LOG.with_fields({ 'name': name, 'url': url, 'checksum': checksum }).info('Image resolved') if checksum: return url, checksum, 'md5' else: return url, None, None
def wrapper(*args, **kwargs): i = kwargs.get('instance_from_db') if not i: return placement = i.placement if not placement: return if not placement.get('node'): return if placement.get('node') != config.NODE_NAME: url = 'http://%s:%d%s' % (placement['node'], config.API_PORT, flask.request.environ['PATH_INFO']) api_token = util_general.get_api_token( 'http://%s:%d' % (placement['node'], config.API_PORT), namespace=get_jwt_identity()[0]) r = requests.request( flask.request.environ['REQUEST_METHOD'], url, data=json.dumps(flask_get_post_body()), headers={'Authorization': api_token, 'User-Agent': util_general.get_user_agent()}) LOG.info('Proxied %s %s returns: %d, %s' % ( flask.request.environ['REQUEST_METHOD'], url, r.status_code, r.text)) resp = flask.Response(r.text, mimetype='application/json') resp.status_code = r.status_code return resp return func(*args, **kwargs)
def wrapper(*args, **kwargs): u = kwargs.get('upload_from_db') if not u: return if not u.node: return if u.node != config.NODE_NAME: url = 'http://%s:%d%s' % (u.node, config.API_PORT, flask.request.environ['PATH_INFO']) api_token = util_general.get_api_token( 'http://%s:%d' % (u.node, config.API_PORT), namespace=get_jwt_identity()[0]) r = requests.request( flask.request.environ['REQUEST_METHOD'], url, data=flask.request.get_data(cache=False, as_text=False, parse_form_data=False), headers={'Authorization': api_token, 'User-Agent': util_general.get_user_agent()}) LOG.info('Proxied %s %s returns: %d, %s' % ( flask.request.environ['REQUEST_METHOD'], url, r.status_code, r.text)) resp = flask.Response(r.text, mimetype='application/json') resp.status_code = r.status_code return resp return func(*args, **kwargs)
def post(self, artifact_name=None, upload_uuid=None, source_url=None): u = Upload.from_db(upload_uuid) if not u: return api_base.error(404, 'upload not found') if u.node != config.NODE_NAME: url = 'http://%s:%d%s' % (u.node, config.API_PORT, flask.request.environ['PATH_INFO']) api_token = util_general.get_api_token( 'http://%s:%d' % (u.node, config.API_PORT), namespace=get_jwt_identity()[0]) r = requests.request( flask.request.environ['REQUEST_METHOD'], url, data=json.dumps(api_base.flask_get_post_body()), headers={ 'Authorization': api_token, 'User-Agent': util_general.get_user_agent() }) LOG.info('Proxied %s %s returns: %d, %s' % (flask.request.environ['REQUEST_METHOD'], url, r.status_code, r.text)) resp = flask.Response(r.text, mimetype='application/json') resp.status_code = r.status_code return resp if not source_url: source_url = ('%s%s/%s' % (UPLOAD_URL, get_jwt_identity()[0], artifact_name)) a = Artifact.from_url(Artifact.TYPE_IMAGE, source_url) with a.get_lock(ttl=(12 * constants.LOCK_REFRESH_SECONDS), timeout=config.MAX_IMAGE_TRANSFER_SECONDS): blob_uuid = str(uuid.uuid4()) blob_dir = os.path.join(config.STORAGE_PATH, 'blobs') blob_path = os.path.join(blob_dir, blob_uuid) upload_dir = os.path.join(config.STORAGE_PATH, 'uploads') upload_path = os.path.join(upload_dir, u.uuid) # NOTE(mikal): we can't use os.rename() here because these paths # might be on different filesystems. shutil.move(upload_path, blob_path) st = os.stat(blob_path) b = Blob.new( blob_uuid, st.st_size, time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime()), time.time()) b.state = Blob.STATE_CREATED b.ref_count_inc() b.observe() b.request_replication() a.state = Artifact.STATE_CREATED a.add_event('upload', None, None, 'success') a.add_index(b.uuid) a.state = Artifact.STATE_CREATED return a.external_view()
def resolve(name): # Name is assumed to be in the form ubuntu, ubuntu:18.04, or ubuntu:bionic resp = requests.get(config.LISTING_URL_UBUNTU, allow_redirects=True, headers={'User-Agent': util_general.get_user_agent()}) if resp.status_code != 200: raise exceptions.HTTPError( 'Failed to fetch %s, status code %d' % (config.LISTING_URL_UBUNTU, resp.status_code)) num_to_name = {} name_to_num = {} dir_re = re.compile( r'.*<a href="(.*)/">.*Ubuntu Server ([0-9]+\.[0-9]+).*') for line in resp.text.split('\n'): m = dir_re.match(line) if m: num_to_name[m.group(2)] = m.group(1) name_to_num[m.group(1)] = m.group(2) LOG.with_field('versions', num_to_name).info('Found ubuntu versions') vernum = None vername = None if name == 'ubuntu': vernum = sorted(num_to_name.keys())[-1] vername = num_to_name[vernum] else: try: _, version = name.split(':') if version in num_to_name: vernum = version vername = num_to_name[version] else: vername = version vernum = name_to_num[version] except Exception: raise exceptions.VersionSpecificationError( 'Cannot parse version: %s' % name) url = (config.DOWNLOAD_URL_UBUNTU % {'vernum': vernum, 'vername': vername}) checksum_url = config.CHECKSUM_URL_UBUNTU % {'vername': vername} checksums = resolver_util.fetch_remote_checksum(checksum_url) checksum = checksums.get('*' + os.path.basename(url)) LOG.with_fields({ 'name': name, 'url': url, 'checksum': checksum }).info('Image resolved') if checksum: return url, checksum, 'md5' else: return url, None, None
def fetch_remote_checksum(checksum_url): resp = requests.get(checksum_url, headers={'User-Agent': util_general.get_user_agent()}) if resp.status_code != 200: return {} checksums = {} for line in resp.text.split('\n'): elems = line.split() if len(elems) == 2: checksums[elems[1]] = elems[0] return checksums
def _open_connection(self, url): proxies = {} if config.HTTP_PROXY_SERVER: proxies['http'] = config.HTTP_PROXY_SERVER resp = requests.get( url, allow_redirects=True, stream=True, headers={'User-Agent': util_general.get_user_agent()}, proxies=proxies) if resp.status_code != 200: raise exceptions.HTTPError( 'Failed to fetch HEAD of %s (status code %d)' % (url, resp.status_code)) return resp
def _read_remote(target, blob_uuid, offset=0): api_token = util_general.get_api_token('http://%s:%d' % (target, config.API_PORT), namespace=get_jwt_identity()[0]) url = 'http://%s:%d/blob/%s?offset=%d' % (target, config.API_PORT, blob_uuid, offset) r = requests.request('GET', url, stream=True, headers={ 'Authorization': api_token, 'User-Agent': util_general.get_user_agent() }) for chunk in r.iter_content(chunk_size=8192): yield chunk
def resolve(name): # Name is assumed to be in the form cirros or cirros:0.4.0 if name == 'cirros': resp = requests.get( config.LISTING_URL_CIRROS, allow_redirects=True, headers={'User-Agent': util_general.get_user_agent()}) if resp.status_code != 200: raise exceptions.HTTPError( 'Failed to fetch %s, status code %d' % (config.LISTING_URL_CIRROS, resp.status_code)) versions = [] dir_re = re.compile(r'.*<a href="([0-9]+\.[0-9]+\.[0-9]+)/">.*/</a>.*') for line in resp.text.split('\n'): m = dir_re.match(line) if m: versions.append(m.group(1)) LOG.with_field('versions', versions).info('Found cirros versions') vernum = versions[-1] else: try: _, vernum = name.split(':') except Exception: raise exceptions.VersionSpecificationError( 'Cannot parse version: %s' % name) url = config.DOWNLOAD_URL_CIRROS % {'vernum': vernum} checksum_url = config.CHECKSUM_URL_CIRROS % {'vernum': vernum} checksums = resolver_util.fetch_remote_checksum(checksum_url) checksum = checksums.get(os.path.basename(url)) LOG.with_fields({ 'name': name, 'url': url, 'checksum': checksum }).info('Image resolved') if checksum: return url, checksum, 'md5' else: return url, None, None
def wrapper(*args, **kwargs): if not config.NODE_IS_NETWORK_NODE: admin_token = util_general.get_api_token( 'http://%s:%d' % (config.NETWORK_NODE_IP, config.API_PORT), namespace='system') r = requests.request( flask.request.environ['REQUEST_METHOD'], 'http://%s:%d%s' % (config.NETWORK_NODE_IP, config.API_PORT, flask.request.environ['PATH_INFO']), data=flask.request.data, headers={'Authorization': admin_token, 'User-Agent': util_general.get_user_agent()}) LOG.info('Returning proxied request: %d, %s' % (r.status_code, r.text)) resp = flask.Response(r.text, mimetype='application/json') resp.status_code = r.status_code return resp return func(*args, **kwargs)
def ensure_local(self, locks): with self.get_lock(config.NODE_NAME) as blob_lock: if self.state.value != self.STATE_CREATED: self.log.warning( 'Blob not in created state, replication cancelled') return blob_dir = os.path.join(config.STORAGE_PATH, 'blobs') blob_path = os.path.join(blob_dir, self.uuid) os.makedirs(blob_dir, exist_ok=True) if os.path.exists(blob_path): self.log.info('Blob already exists!') self.observe() return locations = self.locations random.shuffle(locations) blob_source = locations[0] with open(blob_path + '.partial', 'wb') as f: done = False last_refresh = 0 refreshable_locks = locks.copy() refreshable_locks.append(blob_lock) total_bytes_received = 0 previous_percentage = 0 while not done: bytes_in_attempt = 0 try: admin_token = util_general.get_api_token( 'http://%s:%d' % (blob_source, config.API_PORT)) url = ('http://%s:%d/blob/%s?offset=%d' % (blob_source, config.API_PORT, self.uuid, total_bytes_received)) r = requests.request('GET', url, stream=True, headers={ 'Authorization': admin_token, 'User-Agent': util_general.get_user_agent() }) for chunk in r.iter_content(chunk_size=8192): f.write(chunk) total_bytes_received += len(chunk) bytes_in_attempt += len(chunk) if time.time( ) - last_refresh > LOCK_REFRESH_SECONDS: db.refresh_locks(refreshable_locks) last_refresh = time.time() percentage = (total_bytes_received / int(self.size) * 100.0) if (percentage - previous_percentage) > 10.0: self.log.with_fields({ 'bytes_fetched': total_bytes_received, 'size': int(self.size) }).info('Fetch %.02f percent complete' % percentage) previous_percentage = percentage done = True self.log.with_fields({ 'bytes_fetched': total_bytes_received, 'size': int(self.size), 'done': done }).info('HTTP request ran out of chunks') except (http.client.IncompleteRead, urllib3.exceptions.ProtocolError, requests.exceptions.ChunkedEncodingError) as e: # An API error (or timeout) occured. Retry unless we got nothing. if bytes_in_attempt > 0: self.log.info('HTTP connection dropped, retrying') else: self.log.error('HTTP connection dropped without ' 'transferring data: %s' % e) raise e if total_bytes_received != int(self.size): if os.path.exists(blob_path + '.partial'): os.unlink(blob_path + '.partial') raise BlobFetchFailed('Did not fetch enough data') self.log.info('Completing transfer') os.rename(blob_path + '.partial', blob_path) self.log.with_fields({ 'bytes_fetched': total_bytes_received, 'size': int(self.size) }).info('Fetch complete') self.observe() return total_bytes_received