def create_update_api(role_arn, function_arn, wiring): logger.info('creating or updating api /{}'.format(wiring['pathPart'])) api_id = get_create_api() resource_id = resource(api_id, wiring['pathPart']) uri = function_uri(function_arn, region()) api_method(api_id, resource_id, role_arn, uri, wiring) cors(api_id, resource_id)
def _cleanup_old_versions(name): logger.info('cleaning up old versions of {0}. Keeping {1}'.format( name, REVISIONS)) versions = _versions(name) for version in versions[0:(len(versions) - REVISIONS)]: logger.debug('deleting {} version {}'.format(name, version)) aws_lambda('delete_function', FunctionName=name, Qualifier=version)
def _load_config(config_filename): try: with open(config_filename) as config_file: logger.info('Using config {}'.format(config_filename)) return config_file.name, json.load(config_file) except IOError: logger.debug('trying to load {} (not found)'.format(config_filename)) return config_filename, {}
def deploy(preflight): if preflight: logger.info('running preflight checks') if not preflight_checks(): return logger.info('deploying') run() js_code_snippet()
def deploy_api(api_id): logger.info('deploying API') return apigateway('create_deployment', restApiId=api_id, description='lamed deployment', stageName='prod', stageDescription='lamed production', cacheClusterEnabled=False, query='id')
def rollback_lambda(name, alias=LIVE): all_versions = _versions(name) live_version = _get_version(name, alias) try: live_index = all_versions.index(live_version) if live_index < 1: raise RuntimeError('Cannot find previous version') prev_version = all_versions[live_index - 1] logger.info('rolling back to version {}'.format(prev_version)) _function_alias(name, prev_version) except RuntimeError as error: logger.error('Unable to rollback. {}'.format(repr(error)))
def role(): new_role = False try: logger.info('finding role') iam('get_role', RoleName='lamed') except ClientError: logger.info('role not found. creating') iam('create_role', RoleName='lamed', AssumeRolePolicyDocument=ASSUMED_ROLE_POLICY) new_role = True role_arn = iam('get_role', RoleName='lamed', query='Role.Arn') logger.debug('role_arn={}'.format(role_arn)) logger.info('updating role policy') iam('put_role_policy', RoleName='lamed', PolicyName='lamed', PolicyDocument=POLICY) if new_role: from time import sleep logger.info('waiting for role policy propagation') sleep(5) return role_arn
def preflight_checks(): logger.info('checking aws credentials and region') if region() is None: logger.error('Region is not set up. please run aws configure') return False try: check_aws_credentials() except AttributeError: logger.error('AWS credentials not found. please run aws configure') return False logger.info('testing redis') try: _redis().ping() except redis.exceptions.ConnectionError: logger.error('Redis ping failed. Please run lamed configure') return False return True
def prepare_zip(): from pkg_resources import resource_filename as resource from json import dumps logger.info('creating/updating lamed.zip') with ZipFile('lamed.zip', 'w', ZIP_DEFLATED) as zipf: info = ZipInfo('config.json') info.external_attr = 0o664 << 16 zipf.writestr(info, dumps(config)) zipf.write(resource('lamed', 'config.py'), 'config.py') zipf.write(resource('lamed', 'lamed.py'), 'lamed.py') zipf.write(resource('lamed', 'logger.py'), 'logger.py') for root, dirs, files in os.walk(resource('lamed', 'vendor')): for file in files: real_file = os.path.join(root, file) relative_file = os.path.relpath(real_file, resource('lamed', '')) zipf.write(real_file, relative_file)
def _function_alias(name, version, alias=LIVE): try: logger.info('creating function alias {0} for {1}:{2}'.format( alias, name, version)) arn = aws_lambda('create_alias', FunctionName=name, FunctionVersion=version, Name=alias, query='AliasArn') except ClientError: logger.info('alias {0} exists. updating {0} -> {1}:{2}'.format( alias, name, version)) arn = aws_lambda('update_alias', FunctionName=name, FunctionVersion=version, Name=alias, query='AliasArn') return arn
def js_code_snippet(): api_id = get_create_api() api_region = region() endpoint = TRACK_ENDPOINT logger.info('AlephBet JS code snippet:') logger.info(""" <!-- Copy and paste this snippet to start tracking with lamed --> <script src="https://unpkg.com/alephbet/dist/alephbet.min.js"></script> <script> // * javascript code snippet to track experiments with AlephBet * // * For more information: https://github.com/Alephbet/alephbet * track_url = 'https://%(api_id)s.execute-api.%(api_region)s.amazonaws.com/prod/%(endpoint)s'; namespace = 'alephbet'; experiment = new AlephBet.Experiment({ name: "my a/b test", tracking_adapter: new AlephBet.LamedAdapter(track_url, namespace), // trigger: function() { ... }, // optional trigger variants: { red: { activate: function() { // add your code here } }, blue: { activate: function() { // add your code here } } } }); </script> """ % locals())
def create_update_lambda(role_arn, wiring): name, handler, memory, timeout = (wiring[k] for k in ('FunctionName', 'Handler', 'MemorySize', 'Timeout')) try: logger.info('finding lambda function') function_arn = aws_lambda('get_function', FunctionName=name, query='Configuration.FunctionArn') except ClientError: function_arn = None if not function_arn: logger.info('creating new lambda function {}'.format(name)) with open('lamed.zip', 'rb') as zf: function_arn, version = aws_lambda('create_function', FunctionName=name, Runtime='python3.8', Role=role_arn, Handler=handler, MemorySize=memory, Timeout=timeout, Publish=True, Code={'ZipFile': zf.read()}, query='[FunctionArn, Version]') else: logger.info('updating lambda function {}'.format(name)) aws_lambda('update_function_configuration', FunctionName=name, Runtime='python3.8', Role=role_arn, Handler=handler, MemorySize=memory, Timeout=timeout) with open('lamed.zip', 'rb') as zf: function_arn, version = aws_lambda('update_function_code', FunctionName=name, Publish=True, ZipFile=zf.read(), query='[FunctionArn, Version]') function_arn = _function_alias(name, version) _cleanup_old_versions(name) logger.debug('function_arn={} ; version={}'.format(function_arn, version)) return function_arn
def _add_unique(pipe, key, uuid): logger.info("adding {} to {}".format(uuid, key)) uuid = hashlib.sha1("{} {}".format(key, uuid).encode('utf-8')).hexdigest() logger.info("sha1 uuid = {}".format(uuid)) while True: try: pipe.watch(uuid) uuid_exists = pipe.get(uuid) if uuid_exists is not None: logger.debug("{} exists".format(uuid)) break pipe.multi() # setting a flag for the uuid with expiry time of UUID_EXPIRY pipe.setex(uuid, UUID_EXPIRY, "1") # incrementing counter for key pipe.incr(key) pipe.execute() logger.info("added {} to {}".format(uuid, key)) break except redis.WatchError: logger.debug("watch error with {} {}".format(uuid, key)) continue
def configure(): if not config: logger.info('generating new config {}'.format(config_filename)) generate_config(config_filename) click.edit(filename=config_filename)
def preflight(): logger.info('running preflight checks') preflight_checks()