def _update_lambda_function_code(awsclient, function_name, artifact_bucket=None, zipfile=None): client_lambda = awsclient.get_client('lambda') if not zipfile: return 1 local_hash = create_sha256(zipfile) # print ('getting remote hash') # print local_hash remote_hash = get_remote_code_hash(awsclient, function_name) # print remote_hash if local_hash == remote_hash: print('Code hasn\'t changed - won\'t upload code bundle') else: if not artifact_bucket: log.info('no stack bucket found') response = client_lambda.update_function_code( FunctionName=function_name, ZipFile=zipfile, Publish=True) else: # reuse the zipfile we already created! dest_key, e_tag, version_id = \ s3_upload(awsclient, artifact_bucket, zipfile, function_name) # print dest_key, e_tag, version_id response = client_lambda.update_function_code( FunctionName=function_name, S3Bucket=artifact_bucket, S3Key=dest_key, S3ObjectVersion=version_id, Publish=True) print(json2table(response)) return 0
def _update_lambda_configuration(awsclient, function_name, role, handler_function, description, timeout, memory, subnet_ids=None, security_groups=None): client_lambda = awsclient.get_client('lambda') if subnet_ids and security_groups: # print ('found vpc config') response = client_lambda.update_function_configuration( FunctionName=function_name, Role=role, Handler=handler_function, Description=description, Timeout=timeout, MemorySize=memory, VpcConfig={ 'SubnetIds': subnet_ids, 'SecurityGroupIds': security_groups }) print(json2table(response)) else: response = client_lambda.update_function_configuration( FunctionName=function_name, Role=role, Handler=handler_function, Description=description, Timeout=timeout, MemorySize=memory) print(json2table(response)) function_version = response['Version'] return function_version
def _lambda_add_s3_event_source(awsclient, arn, event, bucket, prefix, suffix): """Use only prefix OR suffix :param arn: :param event: :param bucket: :param prefix: :param suffix: :return: """ json_data = { 'LambdaFunctionConfigurations': [{ 'LambdaFunctionArn': arn, 'Id': str(uuid.uuid1()), 'Events': [event] }] } filter_rules = build_filter_rules(prefix, suffix) json_data['LambdaFunctionConfigurations'][0].update( {'Filter': { 'Key': { 'FilterRules': filter_rules } }}) # http://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-notification-configuration.html # http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html client_s3 = awsclient.get_client('s3') bucket_configurations = client_s3.get_bucket_notification_configuration( Bucket=bucket) bucket_configurations.pop('ResponseMetadata') if 'LambdaFunctionConfigurations' in bucket_configurations: bucket_configurations['LambdaFunctionConfigurations'].append( json_data['LambdaFunctionConfigurations'][0]) else: bucket_configurations['LambdaFunctionConfigurations'] = json_data[ 'LambdaFunctionConfigurations'] response = client_s3.put_bucket_notification_configuration( Bucket=bucket, NotificationConfiguration=bucket_configurations) # TODO don't return a table, but success state return json2table(response)
def delete_lambda(awsclient, function_name, s3_event_sources=[], time_event_sources=[]): # FIXME: mutable default arguments! """Delete a lambda function. :param awsclient: :param function_name: :param s3_event_sources: :param time_event_sources: :return: exit_code """ unwire(awsclient, function_name, s3_event_sources=s3_event_sources, time_event_sources=time_event_sources, alias_name=ALIAS_NAME) client_lambda = awsclient.get_client('lambda') response = client_lambda.delete_function(FunctionName=function_name) # TODO remove event source first and maybe also needed for permissions print(json2table(response)) return 0
def info(awsclient, function_name, s3_event_sources=None, time_event_sources=None, alias_name=ALIAS_NAME): if s3_event_sources is None: s3_event_sources = [] if time_event_sources is None: time_event_sources = [] if not lambda_exists(awsclient, function_name): print( colored.red('The function you try to display doesn\'t ' + 'exist... Bailing out...')) return 1 client_lambda = awsclient.get_client('lambda') lambda_function = client_lambda.get_function(FunctionName=function_name) lambda_alias = client_lambda.get_alias(FunctionName=function_name, Name=alias_name) lambda_arn = lambda_alias['AliasArn'] if lambda_function is not None: print(json2table(lambda_function['Configuration']).encode('utf-8')) print(json2table(lambda_alias).encode('utf-8')) print("\n### PERMISSIONS ###\n") try: result = client_lambda.get_policy(FunctionName=function_name, Qualifier=alias_name) policy = json.loads(result['Policy']) for statement in policy['Statement']: print('{} ({}) -> {}'.format( statement['Condition']['ArnLike']['AWS:SourceArn'], statement['Principal']['Service'], statement['Resource'])) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': print("No permissions found!") else: raise e print("\n### EVENT SOURCES ###\n") # S3 Events client_s3 = awsclient.get_client('s3') for s3_event_source in s3_event_sources: bucket_name = s3_event_source.get('bucket') print('- \tS3: %s' % bucket_name) bucket_notification = client_s3.get_bucket_notification( Bucket=bucket_name) filter_rules = build_filter_rules( s3_event_source.get('prefix', None), s3_event_source.get('suffix', None)) response = client_s3.get_bucket_notification_configuration( Bucket=bucket_name) if 'LambdaFunctionConfigurations' in response: relevant_configs, irrelevant_configs = \ filter_bucket_notifications_with_arn( response['LambdaFunctionConfigurations'], lambda_arn, filter_rules ) if len(relevant_configs) > 0: for config in relevant_configs: print('\t\t{}:'.format(config['Events'][0])) for rule in config['Filter']['Key']['FilterRules']: print('\t\t{}: {}'.format(rule['Name'], rule['Value'])) else: print('\tNot attached') # TODO Beautify # wrapper = TextWrapper(initial_indent='\t', subsequent_indent='\t') # output = "\n".join(wrapper.wrap(json.dumps(config, indent=True))) # print(json.dumps(config, indent=True)) else: print('\tNot attached') # CloudWatch Event client_events = awsclient.get_client('events') for time_event in time_event_sources: rule_name = time_event.get('ruleName') print('- \tCloudWatch: %s' % rule_name) try: rule_response = client_events.describe_rule(Name=rule_name) target_list = client_events.list_targets_by_rule( Rule=rule_name, )["Targets"] if target_list: print("\t\tSchedule expression: {}".format( rule_response['ScheduleExpression'])) for target in target_list: print('\t\tId: {} -> {}'.format(target['Id'], target['Arn'])) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': print('\tNot attached!') else: raise e
def _create_lambda(awsclient, function_name, role, handler_filename, handler_function, folders, description, timeout, memory, subnet_ids=None, security_groups=None, artifact_bucket=None, zipfile=None, runtime='python2.7'): log.debug('create lambda function: %s' % function_name) # move to caller! # _install_dependencies_with_pip('requirements.txt', './vendored') client_lambda = awsclient.get_client('lambda') # print ('creating function %s with role %s handler %s folders %s timeout %s memory %s') % ( # function_name, role, handler_filename, str(folders), str(timeout), str(memory)) if not artifact_bucket: log.debug('create without artifact bucket...') response = client_lambda.create_function(FunctionName=function_name, Runtime=runtime, Role=role, Handler=handler_function, Code={'ZipFile': zipfile}, Description=description, Timeout=int(timeout), MemorySize=int(memory), Publish=True) elif artifact_bucket and zipfile: log.debug('create with artifact bucket...') # print 'uploading bundle to s3' dest_key, e_tag, version_id = \ s3_upload(awsclient, artifact_bucket, zipfile, function_name) # print dest_key, e_tag, version_id response = client_lambda.create_function(FunctionName=function_name, Runtime=runtime, Role=role, Handler=handler_function, Code={ 'S3Bucket': artifact_bucket, 'S3Key': dest_key, 'S3ObjectVersion': version_id }, Description=description, Timeout=int(timeout), MemorySize=int(memory), Publish=True) else: log.debug('no zipfile and no artifact_bucket -> nothing to do!') # no zipfile and no artifact_bucket -> nothing to do! return function_version = response['Version'] print(json2table(response)) # FIXME: 23.08.2016 WHY update configuration after create? # timing issue: # http://jenkins.dev.dp.glomex.cloud/job/packages/job/gcdt_pull_request/32/console # 1) we need to wait till the function is available for update # is there a better way than sleep? time.sleep(15) # 2) I believe this was implemented as shortcut to set subnet, and sg # a way better way is to set this is using the using VPCConfig argument! _update_lambda_configuration(awsclient, function_name, role, handler_function, description, timeout, memory, subnet_ids, security_groups) return function_version