def __init__(self): super().__init__() self.island_num = get_process_number() self.file_logger = get_file_logger() self.lines_in_buffer = 0 self.island_name = get_island_name(self.island_num) self.s3 = S3() self.s3_logs_dir = S3.join_paths(self.simulation_id, '/logs') self.island_latest_record = {} self.island_records_in_buffer = Counter() self.log_s3_location = S3.join_paths( self.simulation_id, '/logs') + '{}.txt'.format(self.island_name) self.log_local_location = get_local_log_file_location()
def get_upload_url(bucket: str, path: str, metadata: dict): if bucket not in Config.MEDIA.BUCKETS: raise Exception("Invalid bucket") if path not in Config.MEDIA.PATHS: raise Exception("Invalid path") file_name = uuid4() key = f"{path}/{file_name}" pre_signed_url = S3().get_upload_url(bucket, key, metadata) return pre_signed_url | {"downloadUrl": generate_url(f'{bucket}/{key}')}
def update_env(host): client = S3(env.aws_access_key_id, env.aws_secret_access_key) filename = env.proj_name + '.env' client.download_file(filename, env.environment_bucket, filename) with connection(host) as conn: if not exists(conn, env.shared_dir): logger.info("Creating Shared dir") conn.run('mkdir -p ' + env.shared_dir) # put env file in shared_dir conn.put(filename, env.shared_dir) # create symlink to env file conn.run('mv {}/{} {}/{}'.format(env.shared_dir, filename, env.shared_dir, '.env')) create_symlink(conn, os.path.join(env.shared_dir, '.env'), os.path.join(env.release_dir, env.proj_name, '.env')) # clear environment file form local os.remove(filename)
def get_resource_url(url: str, width: int, height: int) -> (str, bool): s3 = S3() # If size params are not integers, set default size if type(width) != int or type(height) != int: [width, height] = Config.MEDIA.DEFAULT_SIZE if not valid_size(width, height): width, height = closest_valid_size(width, height) [bucket, path, file_name] = destructure_url(url) # Assert if original file exists. If original file is missing, raise exception. key = f'{path}/{file_name}' _, original_file_header = s3.assert_file_exists(bucket, key, True) # width <= 0 or height <= 0 returns original file if width <= 0 and height <= 0: return generate_s3_url(key), True # Valid sizes append to key to create a new key s3_key = f'{key}_{width}_{height}' file_exists, file_header, error = s3.assert_file_exists(bucket, s3_key) # If file is not found, it could be because: # - It has not been processed for this size yet # - It is in the processing queue for conversion to this size # We trigger action for conversion to this size (by principle of laziness). # Issues like cache-stampede and conversion failures would be handled at the conversion handler level. if not file_exists: if error.response.get('Error', {}).get('Code') in ['404', 404]: # trigger new size conversion and return original return generate_s3_url(key), False else: raise Exception(error) return generate_s3_url(s3_key), True
def setUp(self): self.s3 = S3()
def _get_incoming_migrants_key(self, island_number): return S3.join_paths( self.migration_location, '/island_{}/'.format(island_number)) + 'incoming.obj'
def __init__(self): super().__init__() self.s3 = S3() self.simulation_id = get_simulation_id() self.migration_location = S3.join_paths(self.simulation_id, '/migrations/')
def _get_island_path(self, island_number): return S3.join_paths(self.migration_location, '/island_{}/'.format(island_number))
def s3(self): return S3()
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.migration_location = S3.join_paths(self.simulation_id, '/migrations/')