def _make_call(self, func, uri, *args, **kwargs): log.debug('~'*60) log.debug("uri = {} {}".format(func.__func__.__name__, uri)) log.debug("args = {}".format(args)) log.debug("kwargs = {}".format(kwargs)) log.debug("headers = {}".format(self.session.headers)) return BlackfynnRequest(func, uri, *args, **kwargs)
def remove_old_pages(cache, mbdiff): # taste the rainbow! n = int(1.5 * ((mbdiff * 1024*1024) / 100) / cache.page_size) + 5 # 2. Delete some pages from cache with cache.index_con as con: log.debug("Cache - removing {} pages...".format(n)) # find the oldest/least accessed pages q = """ SELECT channel,page,access_count,last_access FROM ts_pages ORDER BY last_access ASC, access_count ASC LIMIT {num_pages} """.format(num_pages=n) pages = con.execute(q).fetchall() # remove the selected pages pages_by_channel = groupby(pages, lambda x: x[0]) for channel, page_group in pages_by_channel: _,pages,counts,times = list(zip(*page_group)) # remove page files cache.remove_pages(channel, *pages) with cache.index_con as con: con.execute("VACUUM") log.debug('Cache - {} pages removed.'.format(n)) return n
def upload_file( file, s3_host, s3_port, s3_bucket, s3_keybase, region, access_key_id, secret_access_key, session_token, encryption_key_id, upload_session_id=None, ): # progress callback progress = ProgressPercentage(file, upload_session_id) UPLOADS[upload_session_id] = progress try: # account for dev connections resource_args = {} config_args = dict(signature_version='s3v4') if 'amazon' not in s3_host.lower() and len(s3_host) != 0: resource_args = dict( endpoint_url="http://{}:{}".format(s3_host, s3_port)) config_args = dict(s3=dict(addressing_style='path')) # connect to s3 session = boto3.session.Session() s3 = session.client('s3', region_name=region, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, aws_session_token=session_token, config=Config(**config_args), **resource_args) # s3 key s3_key = '{}/{}'.format(s3_keybase, os.path.basename(file)) # upload file to s3 s3.upload_file(Filename=file, Bucket=s3_bucket, Key=s3_key, Callback=progress, ExtraArgs=dict(ServerSideEncryption="aws:kms", SSEKMSKeyId=encryption_key_id)) return s3_key except Exception as e: log.debug(e) progress.set_error() raise e
def _handle_response(self, sess, resp): log.debug("resp = {}".format(resp)) log.debug("resp.content = {}".format(resp.content)) if resp.status_code in [requests.codes.forbidden, requests.codes.unauthorized]: raise UnauthorizedException() if not resp.status_code in [requests.codes.ok, requests.codes.created]: resp.raise_for_status() try: # return object from json resp.data = json.loads(resp.content) except: # if not json, still return response content resp.data = resp.content
def compact_cache(cache, max_mb): log.debug('Inspecting cache...') wait = 2 current_mb = (cache.size/(1024.0*1024)) desired_mb = 0.9*max_mb while current_mb > desired_mb: log.debug('Cache - current: {:02f} MB, maximum: {} MB'.format(current_mb, max_mb)) try: remove_old_pages(cache, current_mb-desired_mb) except sqlite3.OperationalError: log.debug('Cache - Index DB was locked, waiting {} seconds...'.format(wait)) if wait >= 1024: log.error('Cache - Unable to compact cache!') return # silently fail time.sleep(wait) wait = wait*2 current_mb = (cache.size/(1024.0*1024))
def _make_call(self, func, uri, *args, **kwargs): log.debug("uri = {}".format(uri)) log.debug("args = {}".format(args)) log.debug("kwargs = {}".format(kwargs)) return BlackfynnRequest(func, uri, *args, **kwargs)