示例#1
0
def lambda_handler(event, context):
    """Lambda function that responds shows active batch information.

    Parameters
    ----------
    event: dict, required API gateway request with an input SQS arn, output SQS arn
    context: object, required Lambda Context runtime methods and attributes
    Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html

    Returns
    ------
    Lambda Output Format: dict
    Return doc:
    https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
    """
    log_request_and_context(event, context)

    try:
        response = handle_request()
    # Allow because we want to control status code and message if request has any failure.
    # pylint: disable=broad-except
    except Exception as err:
        logger.error("Failed to handle request for workforce: %s", err)
        return {
            "statusCode": 500,
            "body": "Error: failed to handle request.",
        }

    response = {
        "statusCode": 200,
        "body": json.dumps(response, default=str),
        "isBase64Encoded": False,
    }
    return response
def create_presigned_url(s3_uri, expiration=86400):
    """Generate a presigned URL to share an S3 object for validation of 24 hours

    :param s3_uri: string
    :param expiration: Time in seconds for the presigned URL to remain valid
    :return: Presigned URL as string. If error, returns None.
    """

    bucket_name, object_name = split_uri(s3_uri)

    # Generate a presigned URL for the S3 object
    s3_client = boto3.client(
        "s3", config=botocore.config.Config(signature_version="s3v4"))
    try:
        response = s3_client.generate_presigned_url(
            "get_object",
            Params={
                "Bucket": bucket_name,
                "Key": object_name
            },
            ExpiresIn=expiration,
        )
    except botocore.exceptions.ClientError as err:
        # Soft failure.
        logger.error("failed to generate presigned url: %s", err)
        return None

    # The response contains the presigned URL
    return response
示例#3
0
def download(req):
    from CaliperServer.settings import downloadPath
    try:
        filePath = ''
        version = req.GET.get('version')
        # obJson = req.body
        # params = json.loads(obJson)
        # version = params['version']
        for path in os.listdir(downloadPath):
            if path.endswith('.zip'):
                v = re.search('v(\S+)\.', path)
                if v is not None and v.group(1) == str(version):
                    filePath = os.path.join(downloadPath, path)
                    print filePath
    except Exception as e:
        logger.error(str(e))
        return Response.CustomJsonResponse(Response.CODE_FAILED, 'fail')
    if filePath is not None:
        splits = filePath.split("/")
        fileName = splits[len(splits) - 1]
        response = StreamingHttpResponse(file_iterator(filePath))
        response['Content-Type'] = 'application/octet-stream'
        response['Content-Disposition'] = 'attachment;filename="{0}"'.format(
            fileName)
        return response
示例#4
0
def save_db(userName,
            result,
            outputFileName,
            log_path,
            config,
            hostName,
            remark=''):
    '''
    数据入库
    :param userName: 用户名
    :param result: 得分结果
    :param outputFileName: output 压缩包文件名
    :param log_path:    log 解压后路径
    :param config:      config文件 路径
    :param hostName:    hostName名称
    :param remark:  备注
    :return:
    '''
    try:
        owner = accountModels.UserProfile.objects.get(username=userName)
        task = taskModels.Task(owner=owner,
                               config=config,
                               remark=remark,
                               delete=False,
                               name=hostName,
                               path=outputFileName)
        task.save()
        parseResult(result, task)
        parseLog(log_path, task)
        logger.debug("保存数据库")
    except Exception as e:
        logger.error(str(e))
        return False
    return True
示例#5
0
def lambda_handler(event, context):
    """Lambda function that copies any worker logs to s3 and publishes batch finish to SNS.

    Parameters
    ----------
    event: dict, required
    context: object, required Lambda Context runtime methods and attributes
    Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html

    Returns
    ------
    Lambda Output Format: dict
    """
    log_request_and_context(event, context)

    try:
        request_input = parse_input(event)
    except KeyError as err:
        logger.error("Input event missing required args: %s: %s", event, err)
        raise Exception("Failed to parse input lambda handler") from err

    batch_id = request_input["batch_id"]
    # Mark the batch as completed.
    try:
        db.update_batch_status(batch_id, BatchStatus.COMPLETE)
    except botocore.exceptions.ClientError as err:
        raise Exception(f"failed to mark batch {batch_id} complete") from err

    batch_metadata = db.get_batch_metadata(batch_id)
    batch_info = input_batch_to_human_readable(batch_metadata)

    message = {
        "batchId": batch_id,
        "message": "Batch processing has completed successfully.",
        "batchInfo": batch_info,
        "token": request_input["execution_id"],
        "status": "SUCCESS",
    }

    output_sns_arn = os.getenv("DEFAULT_STATUS_SNS_ARN")
    if request_input["output_sns_arn"]:
        output_sns_arn = request_input["output_sns_arn"]

    topic = sns.Topic(output_sns_arn)
    try:
        topic.publish(Message=json.dumps(message, indent=4, default=str), )
    except botocore.exceptions.ClientError as err:
        raise Exception(
            f"Service error publishing SNS response for batch id: {batch_id}"
        ) from err

    return {
        "published_sns": message,
        "output_sns_arn": output_sns_arn,
    }
示例#6
0
def parseLog(logPath, task):

    result_files = showtree(logPath)
    for tool in result_files:
        toolName = tool["toolName"]
        filePath = tool["logPath"]
        try:
            tool = taskModels.TestTool.objects.get(name=toolName)
            json_data = open(filePath, 'r')
            json_data = json_data.read()
            log = taskModels.Log(tool=tool, content=json_data, task=task)
            log.save()
        except Exception as e:
            logger.error(str(e))
            logger.error("toolName:" + toolName)
def get_batch_description(batch_id):
    """
    Looks up a batch using the given batch id and validates that the batch
    is of appropriate type, then returns a human readable representation.

    :param batch_id: Id of batch to convert to human readable description
    :returns: json serializable description of a given batch
    """
    batch_metadata = db.get_batch_metadata(batch_id)

    # User should only be querying for parent batches of type "INPUT", not frame
    # level batches.
    if batch_metadata["BatchMetadataType"] != BatchMetadataType.INPUT:
        logger.error(
            "User requested existing batch, but it is of the wrong type (not INPUT): %s",
            batch_id)
        return None

    # Convert batch metadata to something user presentable.
    return input_batch_to_human_readable(batch_metadata)
def input_batch_to_human_readable(batch):
    """
    Generates a human friendly version of an INPUT batch metadata with presigned urls

    :param batch_metadata: Batch metadata dictionary
    :returns: json serializable dictionary of batch info
    """

    # User should only be querying for parent batches of type "INPUT", not frame
    # level batches.
    if batch[Attributes.BATCH_METADATA_TYPE] != BatchMetadataType.INPUT:
        logger.error(
            "User requested existing batch, but it is of the wrong input type: %s",
            batch[Attributes.BATCH_ID],
        )
        return None

    response = {
        "batchId": batch[Attributes.BATCH_ID],
        "status": batch[Attributes.BATCH_STATUS],
        # Straight copy of request labeling jobs to acknowledge the request.
        "inputLabelingJobs": batch[Attributes.LABELING_JOBS],
    }

    stage_attributes = [
        ("firstLevel", BatchMetadataType.FIRST_LEVEL),
        ("secondLevel", BatchMetadataType.SECOND_LEVEL),
        ("thirdLevel", BatchMetadataType.THIRD_LEVEL),
    ]

    for field_name, attribute in stage_attributes:
        first_or_second_level_batches = db.get_child_batch_metadata(
            batch[Attributes.BATCH_ID], attribute)
        for first_or_second_level_batch in first_or_second_level_batches:
            response[field_name] = first_or_second_level_to_human_readable(
                first_or_second_level_batch)

    return response
def lambda_handler(event, context):
    """Lambda function that responds shows active batch information.

    Parameters
    ----------
    event: dict, required API gateway request with an input SQS arn, output SQS arn
    context: object, required Lambda Context runtime methods and attributes
    Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html

    Returns
    ------
    Lambda Output Format: dict
    Return doc:
    https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
    """
    log_request_and_context(event, context)

    try:
        request = parse_request(event)
    except (KeyError, ValueError) as err:
        logger.error("Failed to parse request: %s", err)
        return {
            "statusCode": 400,
            "body": "Error: failed to parse request.",
        }

    try:
        batch_info = handle_request(request)
    except botocore.exceptions.ClientError as err:
        logger.error(
            "Boto call failed to execute during request handling: {err}")
        return {
            "statusCode": 500,
            "body": "Error: internal error",
        }

    if batch_info is None:
        logger.error("Batch id not found, request: %s", request)
        return {
            "statusCode": 400,
            "body": f"batch id: {request['batchId']} not found",
            "headers": {
                "X-Amzn-ErrorType": "InvalidParameterException"
            },
        }

    response = {
        "statusCode": 200,
        "body": json.dumps(batch_info, indent=4, default=str),
        "isBase64Encoded": False,
    }
    return response
示例#10
0
def lambda_handler(event, context):
    """Lambda function that copies any worker logs to s3 and publishes batch finish to SNS.

    Parameters
    ----------
    event: dict, required
    context: object, required Lambda Context runtime methods and attributes
    Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html

    Returns
    ------
    Lambda Output Format: dict
    """
    log_request_and_context(event, context)

    execution_id = event["execution_id"]
    request = event["input"]

    validation_output = request.get("transformation_step_output")
    if validation_output is None:
        raise Exception(
            "no batch id stored with validation output, can't write sns")
    parent_batch_id = validation_output["batch_id"]

    error_info = request["error-info"]
    error_type = "Unknown"
    error_message = ""
    try:
        # If cause is json parsable, get more specific error details (like from python exception).
        # This avoids sending stack traces to the SNS receiver.
        cause = json.loads(error_info["Cause"])
        error_type = cause["errorType"]
        error_message = cause["errorMessage"]
    except (ValueError, KeyError):
        # Error message isn't json parseable, default to just put the whole "Cause" string.
        error_type = error_info["Error"]
        error_message = error_info["Cause"]

    try:
        db.mark_batch_and_children_failed(parent_batch_id,
                                          f"{error_type}: {error_message}")
    except botocore.exceptions.ClientError as err:
        # Soft failure, we want to still publish error sns even if we can't update db.
        logger.error("failed to set batch status to error: %s", err)

    message = {
        "batchId": parent_batch_id,
        "message": "Batch processing failed",
        "errorType": error_type,
        "errorString": error_message,
        "token": execution_id,
        "status": "FAILED",
    }

    output_sns_arn = os.getenv("DEFAULT_STATUS_SNS_ARN")
    if "destinationSnsArn" in request:
        output_sns_arn = request["destinationSnsArn"]
    sns = boto3.resource("sns")
    topic = sns.Topic(output_sns_arn)

    try:
        topic.publish(Message=json.dumps(message, indent=4), )
    except botocore.exceptions.ClientError as err:
        raise Exception(
            f"Service error publishing SNS response for batch id: {parent_batch_id}"
        ) from err

    return {
        "published_sns": message,
        "output_sns_arn": output_sns_arn,
    }
示例#11
0
def parseSce(dim, dimKey, performance, dimResult):
    '''
    解析维度下的所有场景,包含父子场景 (最多三级)
    :param dim: dim数据库对象
    :param dimKey: dim的key值
    :param performance: json数据对象
    :return:
    '''
    try:
        dimDict = performance[dimKey]
        for k1 in dimDict.keys():
            if k1 != 'Total_Scores':  #所有的一级场景
                if not taskModels.Scenario.objects.filter(
                        name=k1, parentid=0).exists():  # 如果不存在这样的一级场景值,则添加数据库
                    sce1 = taskModels.Scenario(name=k1, parentid=0, dim=dim)
                    sce1.save()
                else:
                    sce1 = taskModels.Scenario.objects.get(name=k1, parentid=0)
                sce1Dict = dimDict[k1]
                for k2 in sce1Dict.keys():
                    if k2 != 'Total_Scores' and k2 != 'Point_Scores':  #二级场景值
                        if not taskModels.Scenario.objects.filter(
                                name=k2, parentid=sce1.id).exists(
                                ):  # 如果不存在这样的二级场景值,则添加数据库
                            sce2 = taskModels.Scenario(name=k2,
                                                       parentid=sce1.id,
                                                       dim=dim)
                            sce2.save()
                        else:
                            sce2 = taskModels.Scenario.objects.get(
                                name=k2, parentid=sce1.id)
                        sce2Dict = sce1Dict[k2]
                        for k3 in sce2Dict.keys():
                            if k3 != 'Total_Scores' and k3 != 'Point_Scores':  # 二级场景值
                                if not taskModels.Scenario.objects.filter(
                                        name=k3, parentid=sce2.id).exists(
                                        ):  # 如果不存在这样的三级场景值,则添加数据库
                                    sce3 = taskModels.Scenario(
                                        name=k3, parentid=sce2.id, dim=dim)
                                    sce3.save()
                                else:
                                    sce3 = taskModels.Scenario.objects.get(
                                        name=k3, parentid=sce2.id)
                                sceResult3 = taskModels.ScenarioResult(
                                    dimresult=dimResult,
                                    scenario=sce3,
                                    result=dimDict[k1][k2][k3]['Total_Scores'])
                                sceResult3.save()
                                parseTestCase(
                                    dimDict[k1][k2][k3]['Point_Scores'], sce3,
                                    sceResult3)
                        sceResult2 = taskModels.ScenarioResult(
                            dimresult=dimResult,
                            scenario=sce2,
                            result=dimDict[k1][k2]['Total_Scores'])
                        sceResult2.save()
                        parseTestCase(dimDict[k1][k2]['Point_Scores'], sce2,
                                      sceResult2)
                sceResult1 = taskModels.ScenarioResult(
                    dimresult=dimResult,
                    scenario=sce1,
                    result=dimDict[k1]['Total_Scores'])
                sceResult1.save()
                parseTestCase(dimDict[k1]['Point_Scores'], sce1, sceResult1)
    except Exception as e:
        logger.error(str(e))
        return False
    return True
示例#12
0
def parseConfig(filePath):
    try:
        json_data = open(filePath, 'r')
        json_data = json_data.read()
        configDict = json.loads(json_data)
        print configDict
        sys = taskModels.System(
            name=configDict['system']['name'],
            manufacturer=configDict['system']['manufacturer'],
            version=configDict['system']['version'])
        sys.save()
        baseboard = taskModels.Baseboard(
            name=configDict['baseboard']['name'],
            manufacturer=configDict['baseboard']['manufacturer'],
            version=configDict['baseboard']['version'])
        baseboard.save()

        config = taskModels.Config(hostname=configDict['hostName'],
                                   kernel=configDict['kernel'],
                                   os=configDict['os'],
                                   board=baseboard,
                                   sys=sys)
        config.save()

        cacheInfos = configDict['cacheInfo']
        for cacheInfo in cacheInfos:
            cache = taskModels.Cache(socketdes=cacheInfo['socketdes'],
                                     size=cacheInfo['size'],
                                     operational=cacheInfo['operational'],
                                     config=config)
            cache.save()

        cpuInfos = configDict['cpuInfo']
        for cpuInfo in cpuInfos:
            cpu = taskModels.Cpu(socketdes=cpuInfo['socketdes'],
                                 manufacturer=cpuInfo['manufacturer'],
                                 version=cpuInfo['version'],
                                 maxspeed=cpuInfo['maxSpeed'],
                                 currentspeed=cpuInfo['currentSpeed'],
                                 status=cpuInfo['status'],
                                 corecount=cpuInfo['coreCount'],
                                 enabledCore=cpuInfo['coreEnabledCount'],
                                 threadcount=cpuInfo['threadCount'],
                                 config=config)
            cpu.save()

        memInfos = configDict['memInfo']
        for memInfo in memInfos:
            memory = taskModels.Memory(manufacturer=memInfo['manufacturer'],
                                       size=memInfo['size'],
                                       type=memInfo['type'],
                                       speed=memInfo['speed'],
                                       clockspeed=memInfo['clockSpeed'],
                                       banklocator=memInfo['bankLocator'],
                                       config=config)
            memory.save()

        storageInfos = configDict['storageInfo']
        for storageInfo in storageInfos:
            storage = taskModels.Storage(devicename=storageInfo['deviceName'],
                                         manufactor=storageInfo['model'],
                                         capacity=storageInfo['capacity'],
                                         sectorsize=storageInfo['sectorsize'],
                                         config=config)
            storage.save()
            partitonInfos = storageInfo['partitons']
            for partitonInfo in partitonInfos:
                partiton = taskModels.Partition(name=partitonInfo['name'],
                                                size=partitonInfo['size'],
                                                storage=storage)
                partiton.save()

        netInfos = configDict['netInfo']
        for netInfo in netInfos:
            net = taskModels.Net(interface=netInfo['interface'],
                                 bandwidth=netInfo['bankWidth'],
                                 driver=netInfo['driver'],
                                 driverversion=netInfo['driverVersion'],
                                 protocoltype=netInfo['protocolType'],
                                 address=netInfo['address'],
                                 broadcast=netInfo['broadcast'],
                                 netmask=netInfo['netmask'],
                                 network=netInfo['network'],
                                 mac=netInfo['mac'],
                                 config=config)
            net.save()
    except Exception as e:
        logger.error(str(e))
        return None
    return config, configDict['hostName']