Esempio n. 1
0
def parse_http_response(response: Response, resources_info: Dict, scar_info: Dict) -> None:
    '''Process the response generated by an API Gateway invocation.'''
    output_type = scar_info.get('cli_output')
    function_name = resources_info.get('lambda').get('name')
    asynch = resources_info.get('lambda').get('asynchronous')
    text_message = ""
    if response.ok:
        if output_type == OutputType.BINARY.value:
            output_file = scar_info.get('output_file', '')
            with open(output_file, "wb") as out:
                out.write(StrUtils.decode_base64(response.text))
            text_message = f"Output saved in file '{output_file}'"
        else:
            text_message = f"Request Id: {response.headers['amz-lambda-request-id']}"
            if asynch:
                text_message += f"\nFunction '{function_name}' launched correctly"
            else:
                text_message += f"\nLog Group Name: {response.headers['amz-log-group-name']}\n"
                text_message += f"Log Stream Name: {response.headers['amz-log-stream-name']}\n"
                text_message += StrUtils.base64_to_utf8_string(response.text)
    else:
        if asynch and response.status_code == 502:
            text_message = f"Function '{function_name}' launched successfully."
        else:
            error = json.loads(response.text)
            if 'message' in error:
                text_message = f"Error ({response.reason}): {error['message']}"
            else:
                text_message = f"Error ({response.reason}): {error['exception']}"
    logger.info(text_message)
Esempio n. 2
0
File: utils.py Progetto: grycap/scar
 def execute_command_with_msg(command: List[str], cmd_wd: Optional[str]=None,
                              cli_msg: str='') -> str:
     """Execute the specified command and return the result."""
     cmd_out = subprocess.check_output(command, cwd=cmd_wd).decode('utf-8')
     logger.debug(cmd_out)
     logger.info(cli_msg)
     return cmd_out[:-1]
Esempio n. 3
0
    def _create_s3_buckets(self, resources_info: Dict) -> None:
        if resources_info.get('lambda').get('input', False):
            s3_service = S3(resources_info)
            for bucket in resources_info.get('lambda').get('input'):
                if bucket.get('storage_provider') == 's3':
                    bucket_name, folders = s3_service.create_bucket_and_folders(
                        bucket.get('path'))
                    Lambda(resources_info).link_function_and_bucket(
                        bucket_name)
                    s3_service.set_input_bucket_notification(
                        bucket_name, folders)
                    if not folders:
                        logger.info(
                            f'Input bucket "{bucket_name}" successfully created'
                        )

        if resources_info.get('lambda').get('output', False):
            s3_service = S3(resources_info)
            for bucket in resources_info.get('lambda').get('output'):
                if bucket.get('storage_provider') == 's3':
                    bucket_name, folders = s3_service.create_bucket_and_folders(
                        bucket.get('path'))
                    if not folders:
                        logger.info(
                            f'Output bucket "{bucket_name}" successfully created'
                        )
Esempio n. 4
0
def parse_http_response(response, function_name, asynch, output_type,
                        output_file):
    if response.ok:
        if output_type == OutputType.BINARY:
            with open(output_file, "wb") as out:
                out.write(StrUtils.decode_base64(response.text))
            text_message = f"Output saved in file '{output_file}'"
        else:
            text_message = f"Request Id: {response.headers['amz-lambda-request-id']}"
            if asynch:
                text_message += f"\nFunction '{function_name}' launched correctly"
            else:
                text_message += f"\nLog Group Name: {response.headers['amz-log-group-name']}\n"
                text_message += f"Log Stream Name: {response.headers['amz-log-stream-name']}\n"
                text_message += json.loads(response.text)["udocker_output"]
    else:
        if asynch and response.status_code == 502:
            text_message = f"Function '{function_name}' launched sucessfully."
        else:
            error = json.loads(response.text)
            if 'message' in error:
                text_message = f"Error ({response.reason}): {error['message']}"
            else:
                text_message = f"Error ({response.reason}): {error['exception']}"
    logger.info(text_message)
Esempio n. 5
0
 def delete_ecr_image(resources_info: Dict) -> None:
     """Delete the ECR repository created in _create_ecr_image function."""
     ecr_cli = ECR(resources_info)
     repo_name = resources_info.get('lambda').get('name')
     if ecr_cli.get_repository_uri(repo_name):
         logger.info('Deleting ECR repo: %s' % repo_name)
         ecr_cli.delete_repository(repo_name)
Esempio n. 6
0
def _print_generic_response(response,
                            output_type,
                            aws_output,
                            text_message=None,
                            json_output=None,
                            verbose_output=None,
                            output_file=None):
    if output_type == OutputType.BINARY.value:
        with open(output_file, "wb") as out:
            out.write(StrUtils.decode_base64(response['Payload']['body']))
    elif output_type == OutputType.PLAIN_TEXT.value:
        output = text_message
        logger.info(output)
    else:
        if output_type == OutputType.JSON.value:
            output = json_output if json_output else {
                aws_output: {
                    'RequestId':
                    response['ResponseMetadata']['RequestId'],
                    'HTTPStatusCode':
                    response['ResponseMetadata']['HTTPStatusCode']
                }
            }
        elif output_type == OutputType.VERBOSE.value:
            output = verbose_output if verbose_output else {
                aws_output: response
            }
        logger.info_json(output)
Esempio n. 7
0
 def log(self):
     index = 0
     if len(self.aws_resources) > 1:
         index = _choose_function(self.aws_resources)
     # We only return the logs of one function each time
     if index >= 0:
         logger.info(CloudWatchLogs(self.aws_resources[index]).get_aws_logs())
Esempio n. 8
0
 def download_file(self, bucket_name, file_key, file_path):
     kwargs = {'Bucket': bucket_name, 'Key': file_key}
     logger.info(
         f"Downloading file '{file_key}' from bucket '{bucket_name}' in path '{file_path}'."
     )
     with open(file_path, 'wb') as file:
         kwargs['Fileobj'] = file
         self.client.download_file(**kwargs)
Esempio n. 9
0
File: s3.py Progetto: secobau/scar
 def delete_bucket_notification(self, bucket_name, function_arn):
     bucket_conf = self.client.get_notification_configuration(bucket_name)
     if bucket_conf and "LambdaFunctionConfigurations" in bucket_conf:
         lambda_conf = bucket_conf["LambdaFunctionConfigurations"]
         filter_conf = [x for x in lambda_conf if x['LambdaFunctionArn'] != function_arn]
         notification = { "LambdaFunctionConfigurations": filter_conf }
         self.client.put_notification_configuration(bucket_name, notification)
         logger.info("Bucket notifications successfully deleted")
Esempio n. 10
0
 def _delete_job_definitions(self) -> None:
     # Get main job definition
     kwargs = {"jobDefinitionName": self.function_name}
     job_info = self.client.describe_job_definitions(**kwargs)
     for job_def in _get_job_definitions(job_info):
         kwars = {"jobDefinition": job_def}
         self.client.deregister_job_definition(**kwars)
     logger.info("Job definitions successfully deleted.")
Esempio n. 11
0
 def _delete_valid_compute_environment(self, state):
     if state == "ENABLED":
         update_args = {'computeEnvironment': self.function_name,
                        'state': 'DISABLED'}
         self.client.update_compute_environment(**update_args)
     elif state == "DISABLED":
         delete_args = {'computeEnvironment': self.function_name}
         self.client.delete_compute_environment(**delete_args)
         logger.info("Compute environment successfully deleted.")
Esempio n. 12
0
 def _delete_valid_job_queue(self, state):
     if state == "ENABLED":
         updating_args = {'jobQueue': self.function_name,
                          'state': 'DISABLED'}
         self.client.update_job_queue(**updating_args)
     elif state == "DISABLED":
         deleting_args = {'jobQueue': self.function_name}
         self.client.delete_job_queue(**deleting_args)
         logger.info("Job queue successfully deleted.")
Esempio n. 13
0
 def _ecr_image_name_prepared(container_info: Dict) -> str:
     """If the user set an already prepared image return the image name."""
     image_name = container_info.get('image')
     if ":" not in image_name:
         image_name = "%s:latest" % image_name
     if not container_info.get(
             'create_image') and ".dkr.ecr." in image_name:
         logger.info('Image already prepared in ECR.')
         return image_name
     return None
Esempio n. 14
0
 def _delete_valid_compute_environment(self, state, name):
     if state == "ENABLED":
         update_args = {
             'computeEnvironment': self._get_resource_name(name),
             'state': 'DISABLED'
         }
         self.client.update_compute_environment(**update_args)
     elif state == "DISABLED":
         delete_args = {'computeEnvironment': self._get_resource_name(name)}
         logger.info("Compute environment deleted")
         self.client.delete_compute_environment(**delete_args)
Esempio n. 15
0
def _print_generic_response(resources_info: Dict, output_type: int, text_message: str, json_output: Dict) -> None:
    # Support 'PLAIN_TEXT', 'JSON' and 'VERBOSE' output types
    if output_type == OutputType.PLAIN_TEXT.value:
        output = text_message
        logger.info(output)
    else:
        if output_type == OutputType.JSON.value:
            output = json_output
        elif output_type == OutputType.VERBOSE.value:
            output = resources_info
        logger.info_json(output)
Esempio n. 16
0
 def _delete_valid_job_queue(self, state, name):
     if state == "ENABLED":
         updating_args = {
             'jobQueue': self._get_resource_name(name),
             'state': 'DISABLED'
         }
         self.client.update_job_queue(**updating_args)
     elif state == "DISABLED":
         deleting_args = {'jobQueue': self._get_resource_name(name)}
         logger.info("Job queue deleted")
         self.client.delete_job_queue(**deleting_args)
Esempio n. 17
0
 def ls(self):
     # If a bucket is defined, then we list their files
     resources_info = self.aws_resources[0]
     if resources_info.get('lambda').get('input', False):
         file_list = S3(resources_info).get_bucket_file_list()
         for file_info in file_list:
             logger.info(file_info)
     else:
         # Return the resources of the region in the scar's configuration file
         aws_resources = _get_all_functions(self.aws_resources[0])
         response_parser.parse_ls_response(aws_resources, self.scar_info.get('cli_output'))
Esempio n. 18
0
 def _process_input_bucket_calls(self):
     s3_file_list = self.aws_s3.get_bucket_file_list()
     logger.info(f"Files found: '{s3_file_list}'")
     # First do a request response invocation to prepare the lambda environment
     if s3_file_list:
         s3_event = self.aws_s3.get_s3_event(s3_file_list.pop(0))
         self.aws_lambda.launch_request_response_event(s3_event)
     # If the list has more elements, invoke functions asynchronously
     if s3_file_list:
         s3_event_list = self.aws_s3.get_s3_event_list(s3_file_list)
         self.aws_lambda.process_asynchronous_lambda_invocations(s3_event_list)
Esempio n. 19
0
 def _add_extra_payload(self) -> None:
     if self.resources_info.get('lambda').get('extra_payload', False):
         payload_path = self.resources_info.get('lambda').get('extra_payload')
         logger.info(f"Adding extra payload '{payload_path}'")
         if FileUtils.is_file(payload_path):
             FileUtils.copy_file(self.resources_info.get('lambda').get('extra_payload'),
                                 self.tmp_payload_folder.name)
         else:
             FileUtils.copy_dir(self.resources_info.get('lambda').get('extra_payload'),
                                self.tmp_payload_folder.name)
         del(self.resources_info['lambda']['extra_payload'])
Esempio n. 20
0
 def get_supervisor_layer_arn(self) -> str:
     """Returns the ARN of the specified supervisor layer version.
     If the layer or version doesn't exists, creates the layer."""
     if self._is_supervisor_created():
         is_created = self._is_supervisor_version_created()
         if is_created != '':
             logger.info(f'Using existent \'{self.layer_name}\' layer.')
             return is_created
     logger.info((f'Creating lambda layer with \'{self.layer_name}\''
                  f' version \'{self.supervisor_version}\'.'))
     return self._create_layer()
Esempio n. 21
0
 def check_supervisor_version(cls, supervisor_version: str) -> str:
     """Checks if the specified version exists in FaaS Supervisor's GitHub
     repository. Returns the version if exists and 'latest' if not."""
     if GitHubUtils.exists_release_in_repo(cls._SUPERVISOR_GITHUB_USER,
                                           cls._SUPERVISOR_GITHUB_REPO,
                                           supervisor_version):
         return supervisor_version
     latest_version = SupervisorUtils.get_latest_release()
     if supervisor_version != 'latest':
         logger.info('Defined supervisor version does not exists.')
     logger.info(f'Using latest supervisor release: \'{latest_version}\'.')
     return latest_version
Esempio n. 22
0
    def create_ecr_image(resources_info: Dict, supervisor_version: str) -> str:
        """Creates an ECR image using the user provided image adding the supervisor tools."""
        # If the user set an already prepared image return the image name
        image_name = ContainerImage._ecr_image_name_prepared(
            resources_info.get('lambda').get('container'))
        if image_name:
            return image_name

        tmp_folder = FileUtils.create_tmp_dir()

        # Create function config file
        FileUtils.write_yaml(
            FileUtils.join_paths(tmp_folder.name, "function_config.yaml"),
            create_function_config(resources_info))

        init_script_path = resources_info.get('lambda').get('init_script')
        # Copy the init script defined by the user to the payload folder
        if init_script_path:
            FileUtils.copy_file(
                init_script_path,
                FileUtils.join_paths(
                    tmp_folder.name,
                    FileUtils.get_file_name(init_script_path)))

        # Get supervisor zip
        supervisor_zip_path = ContainerImage.get_supervisor_zip(
            resources_info, supervisor_version)
        # Unzip the supervisor file to the temp file
        FileUtils.unzip_folder(supervisor_zip_path, tmp_folder.name)

        # Create dockerfile to generate the new ECR image
        FileUtils.create_file_with_content(
            "%s/Dockerfile" % tmp_folder.name,
            ContainerImage._create_dockerfile_ecr_image(
                resources_info.get('lambda')))

        # Create the ECR Repo and get the image uri
        ecr_cli = ECR(resources_info)
        repo_name = resources_info.get('lambda').get('name')
        ecr_image = ecr_cli.get_repository_uri(repo_name)
        if not ecr_image:
            logger.info('Creating ECR repository: %s' % repo_name)
            ecr_image = ecr_cli.create_repository(repo_name)

        # Build and push the image to the ECR repo
        platform = None
        arch = resources_info.get('lambda').get('architectures', ['x86_64'])[0]
        if arch == 'arm64':
            platform = 'linux/arm64'
        return ContainerImage._build_push_ecr_image(
            tmp_folder.name, ecr_image, platform,
            ecr_cli.get_authorization_token())
Esempio n. 23
0
File: s3.py Progetto: secobau/scar
 def upload_file(self, folder_name=None, file_path=None, file_key=None):
     kwargs = {'Bucket' : self.aws.s3.input_bucket}
     kwargs['Key'] = self.get_file_key(folder_name, file_path, file_key)
     if file_path:
         try:
             kwargs['Body'] = FileUtils.read_file(file_path, 'rb')
         except FileNotFoundError:
             raise excp.UploadFileNotFoundError(file_path=file_path)
     if folder_name and not file_path:
         logger.info("Folder '{0}' created in bucket '{1}'".format(kwargs['Key'], kwargs['Bucket']))
     else:
         logger.info("Uploading file '{0}' to bucket '{1}' with key '{2}'".format(file_path, kwargs['Bucket'], kwargs['Key']))
     self.client.upload_file(**kwargs)
Esempio n. 24
0
 def create_api_gateway(self) -> None:
     """Creates an Api Gateway endpoint."""
     api_info = self.client.create_rest_api(self.aws.api_gateway.name)
     self._set_api_gateway_id(api_info)
     resource_info = self.client.create_resource(self.aws.api_gateway.id,
                                                 self._get_resource_id(),
                                                 _DEFAULT_PATH_PART)
     self.client.create_method(**self._get_method_args(resource_info))
     self.client.set_integration(
         **self._get_integration_args(resource_info))
     self.client.create_deployment(self.aws.api_gateway.id,
                                   _DEFAULT_STAGE_NAME)
     logger.info(f'API Gateway endpoint: {self._get_endpoint()}')
Esempio n. 25
0
 def create_api_gateway(self) -> None:
     """Creates an Api Gateway endpoint."""
     api_info = self.client.create_rest_api(self.api.get('name', ''))
     self._set_api_gateway_id(api_info)
     resource_info = self.client.create_resource(
         self.api.get('id', ''), self._get_resource_id(),
         self.api.get('path_part', ''))
     self._set_resource_info_id(resource_info)
     self.client.create_method(**self._get_method_args())
     self.client.set_integration(**self._get_integration_args())
     self.client.create_deployment(self.api.get('id', ''),
                                   self.api.get('stage_name', ''))
     logger.info(f'API Gateway endpoint: {self._get_endpoint()}')
Esempio n. 26
0
 def _process_s3_input_bucket_calls(self, resources_info: Dict, storage: Dict) -> None:
     s3_service = S3(resources_info)
     lambda_service = Lambda(resources_info)
     s3_file_list = s3_service.get_bucket_file_list(storage)
     bucket_name, _ = get_bucket_and_folders(storage.get('path'))
     logger.info(f"Files found: '{s3_file_list}'")
     # First do a request response invocation to prepare the lambda environment
     if s3_file_list:
         s3_event = s3_service.get_s3_event(bucket_name, s3_file_list.pop(0))
         lambda_service.launch_request_response_event(s3_event)
     # If the list has more elements, invoke functions asynchronously
     if s3_file_list:
         s3_event_list = s3_service.get_s3_event_list(bucket_name, s3_file_list)
         lambda_service.process_asynchronous_lambda_invocations(s3_event_list)
Esempio n. 27
0
 def _delete_job_definitions(self, name):
     job_definitions = []
     # Get IO definitions (if any)
     kwargs = {"jobDefinitionName": '{0}-io'.format(name)}
     io_job_info = self.client.describe_job_definitions(**kwargs)
     job_definitions.extend(_get_job_definitions(io_job_info))
     # Get main job definition
     kwargs = {"jobDefinitionName": name}
     job_info = self.client.describe_job_definitions(**kwargs)
     job_definitions.extend(_get_job_definitions(job_info))
     for job_def in job_definitions:
         kwars = {"jobDefinition": job_def}
         self.client.deregister_job_definition(**kwars)
     logger.info("Job definitions deleted")
Esempio n. 28
0
    def _create_s3_buckets(self, resources_info: Dict) -> None:
        if resources_info.get('lambda').get('input', False):
            s3_service = S3(resources_info)
            for bucket in resources_info.get('lambda').get('input'):
                if bucket.get('storage_provider') == 's3':
                    bucket_name, folders = s3_service.create_bucket_and_folders(
                        bucket.get('path'))
                    lambda_client = Lambda(resources_info)
                    lambda_client.link_function_and_bucket(bucket_name)
                    # Check if function is already available
                    logger.info("Wait function to be 'Active'")
                    if not lambda_client.wait_function_active(
                            resources_info.get('lambda').get('arn')):
                        logger.error("Timeout waiting function.")
                    else:
                        logger.info("Function 'Active'")
                    s3_service.set_input_bucket_notification(
                        bucket_name, folders)
                    if not folders:
                        logger.info(
                            f'Input bucket "{bucket_name}" successfully created'
                        )

        if resources_info.get('lambda').get('output', False):
            s3_service = S3(resources_info)
            for bucket in resources_info.get('lambda').get('output'):
                if bucket.get('storage_provider') == 's3':
                    bucket_name, folders = s3_service.create_bucket_and_folders(
                        bucket.get('path'))
                    if not folders:
                        logger.info(
                            f'Output bucket "{bucket_name}" successfully created'
                        )
Esempio n. 29
0
    def update_function_configuration(self, function_info=None):
        if not function_info:
            function_info = self.get_function_info()
        update_args = {'FunctionName' : function_info['FunctionName'] }
#         if hasattr(self.aws.lambdaf, "memory"):
#             update_args['MemorySize'] = self.aws.lambdaf.memory
#         else:
#             update_args['MemorySize'] = function_info['MemorySize']
#         if hasattr(self.aws.lambdaf, "time"):
#             update_args['Timeout'] = self.aws.lambdaf.time
#         else:
#             update_args['Timeout'] = function_info['Timeout']
        self._update_environment_variables(function_info, update_args)
        self._update_supervisor_layer(function_info, update_args)
        self.client.update_function_configuration(**update_args)
        logger.info("Function '{}' updated successfully.".format(function_info['FunctionName']))
Esempio n. 30
0
 def check_faas_supervisor_layer(self):
     """Checks if the supervisor layer exists, if not, creates the layer.
     If the layer exists and it's not updated, updates the layer."""
     # Get the layer information
     layer_info = self.layer.get_latest_layer_info(
         self._SUPERVISOR_LAYER_NAME)
     # Compare supervisor versions
     if layer_info and 'Description' in layer_info:
         # If the supervisor layer version is lower than the passed version,
         # we must update the layer
         if StrUtils.compare_versions(layer_info.get('Description', ''),
                                      self.supervisor_version) < 0:
             self._update_supervisor_layer()
         else:
             logger.info("Using existent 'faas-supervisor' layer")
     else:
         # Layer not found, we have to create it
         self._create_supervisor_layer()