Beispiel #1
0
def handler(event, context):
    '''
    somewhere in the event data should be a jobid
    '''
    # s3 bucket that stores the output
    bucket_name = event['config']['log_bucket']
    s3 = utils.s3Utils(bucket_name, bucket_name, bucket_name)

    # info about the jobby job
    jobid = event['jobid']
    job_started = "%s.job_started" % jobid
    job_success = "%s.success" % jobid
    job_error = "%s.error" % jobid
    job_log = "%s.log" % jobid
    job_log_location = "https://s3.amazonaws.com/%s/%s" % (bucket_name,
                                                           job_log)

    # check to see ensure this job has started else fail
    if not s3.does_key_exist(job_started):
        raise EC2StartingException(
            "Failed to find jobid %s, ec2 is probably still booting" % jobid)

    # check to see if job has error, report if so
    if s3.does_key_exist(job_error):
        raise Exception("Job encountered an error check log at %s" %
                        job_log_location)

    # check to see if job has completed if not throw retry error
    if s3.does_key_exist(job_success):
        print("completed successfully")
        return event
    else:
        raise StillRunningException("job %s still running" % jobid)
Beispiel #2
0
def calc_ebs_size(bucket, key):
    s3 = s3Utils(bucket, bucket, bucket)
    import pdb
    pdb.set_trace()
    size = s3.get_file_size(key, bucket, add_gb=3, size_in_gb=True)
    if size < 10:
        size = 10
    return size
Beispiel #3
0
def mocked_s3():
    '''
    don't put this as a fixutre so it will be called within
    the s3 mock decorators on the associated tests
    '''
    # We need to create the bucket since this is all in Moto's 'virtual' AWS account
    conn = boto3.resource('s3', region_name='us-east-1')
    conn.create_bucket(Bucket='dbmi-repo-registry')
    conn.create_bucket(Bucket='tc-systems')

    return s3Utils(outfile_bucket='tc-systems',
                   sys_bucket='dbmi-repo-registry')
Beispiel #4
0
def s3(check_task_input):
    bucket_name = check_task_input['config']['log_bucket']
    return utils.s3Utils(bucket_name, bucket_name, bucket_name)
Beispiel #5
0
 def __init__(self, bucket, key, runner, accession=None):
     self.bucket = bucket
     self.key = key
     self.s3 = utils.s3Utils(self.bucket, self.bucket, self.bucket)
     self.runner = runner
     self.accession = accession
Beispiel #6
0
'''
KISS scheduler, simply store list of registered urls on s3 file
'''
import time
from core.utils import s3Utils
from botocore.exceptions import ClientError
import logging
from concurrent.futures import ThreadPoolExecutor


log = logging.getLogger(__name__)

s3 = s3Utils(outfile_bucket='tc-systems', sys_bucket='dbmi-repo-registry')


def register(name, url, s3utils, update=False):
    '''
    register just by writting a new file to s3 with key=name and data=url
    '''
    try:
        exists = s3utils.read_s3(name)
    except ClientError as e:
        if e.response['Error']['Code'] == 'NoSuchKey':
            exists = False

    if exists and update is False:
        raise Exception("Already registered, use update option")

    return s3utils.s3_put(url, name)

Beispiel #7
0
def s3_utils():
    return s3Utils(env=tibanna_env()['_tibanna']['env'])