コード例 #1
0
ファイル: job.py プロジェクト: yorek/azure-cli
def create_job(client,
               resource_group_name,
               account_name,
               transform_name,
               job_name,
               output_asset_names,
               input_asset_name=None,
               description=None,
               priority=None,
               files=None,
               base_uri=None):
    from azure.mgmt.media.models import (Job, JobInputAsset, JobInputHttp,
                                         JobOutputAsset)

    if input_asset_name:
        job_input = JobInputAsset(asset_name=input_asset_name, files=files)
    else:
        if base_uri is None and files is None:
            raise CLIError(
                "Missing required arguments.\nEither --input-asset-name, "
                "or both --files or --base-uri must be specified.")
        else:
            job_input = JobInputHttp(files=files, base_uri=base_uri)

    job_outputs = list(
        map(lambda x: JobOutputAsset(asset_name=x), output_asset_names))

    job = Job(input=job_input,
              outputs=job_outputs,
              description=description,
              priority=priority)

    return client.create(resource_group_name, account_name, transform_name,
                         job_name, job)
コード例 #2
0
def _upload_and_encode(file):
    # clean up inputs
    basename = file.filename.replace('.', '-')
    basename = basename.replace(' ', '-')
    in_asset = "in__" + basename
    print(in_asset)

    # Create new input asset
    client.assets.create_or_update(RESOURCE_GROUP_NAME, ACCOUNT_NAME, in_asset,
                                   Asset())

    #Get asset container and upload file to it
    response = client.assets.get(RESOURCE_GROUP_NAME, ACCOUNT_NAME, in_asset)
    print("got a response, know what kind of container??")
    print(response.container)

    blob_service.create_blob_from_stream(response.container,
                                         basename,
                                         stream=file.stream)

    #Create output asset and add streaming locator name to it as metadata
    out_asset = "out__" + basename
    locator_name = "loc__" + basename
    client.assets.create_or_update(RESOURCE_GROUP_NAME, ACCOUNT_NAME,
                                   out_asset, Asset(alternate_id=locator_name))

    #Check if job exists, delete it if it does
    job_name = "job__" + basename
    response = client.jobs.get(RESOURCE_GROUP_NAME, ACCOUNT_NAME,
                               TRANSFORM_NAME, job_name)
    if response:
        client.jobs.delete(RESOURCE_GROUP_NAME, ACCOUNT_NAME, TRANSFORM_NAME,
                           job_name)

    #Create new job with corret inputs and outputs
    inputs = JobInputAsset(asset_name=in_asset)
    outputs = [JobOutputAsset(asset_name=out_asset)]
    new_job = Job(input=inputs, outputs=outputs)
    print("Creating new job")
    client.jobs.create(RESOURCE_GROUP_NAME, ACCOUNT_NAME, TRANSFORM_NAME,
                       job_name, new_job)
コード例 #3
0
ファイル: job.py プロジェクト: bridgewaytc/AzureCLI
def create_job(client,
               resource_group_name,
               account_name,
               transform_name,
               job_name,
               output_assets,
               input_asset_name=None,
               label=None,
               correlation_data=None,
               description=None,
               priority=None,
               files=None,
               base_uri=None):
    from azure.mgmt.media.models import (Job, JobInputAsset, JobInputHttp)

    if input_asset_name:
        job_input = JobInputAsset(asset_name=input_asset_name,
                                  files=files,
                                  label=label)
    else:
        if base_uri is not None and files is not None:
            job_input = JobInputHttp(files=files,
                                     base_uri=base_uri,
                                     label=label)
        else:
            raise CLIError(
                "Missing required arguments.\nEither --input-asset-name, "
                "or both --files or --base-uri must be specified.")

    job_outputs = output_assets

    job = Job(input=job_input,
              outputs=job_outputs,
              correlation_data=correlation_data,
              description=description,
              priority=priority)

    return client.create(resource_group_name, account_name, transform_name,
                         job_name, job)
コード例 #4
0
audio_job_name = 'AudioAnalyticsJob' + uniqueness
video_job_name = 'VideoAnalyticsJob' + uniqueness

# Choose which of the analyzer job name you would like to Use
# For the audio analytics job, use audio_job_name
# For the video analytics job, use video_job_name
analysis_job_name = audio_job_name
print(f"Creating Analytics job {analysis_job_name}")
files = (source_file)

# Create Job Input and Job Output Asset
input = JobInputAsset(asset_name=in_asset_name)
outputs = JobOutputAsset(asset_name=out_asset_name)

# Create a job object
the_job = Job(input=input, outputs=[outputs])

# Choose which of the analyzer Transform names you would like to use here by changing the name of the Transform to be used
# For the basic audio analyzer - pass in the audio_transform_name
# For the video analyzer - change this code to pass in the video_transform_name
analysis_transform_name = audio_transform_name

# Create a transform job
job: Job = client.jobs.create(resource_group,
                              account_name,
                              analysis_transform_name,
                              analysis_job_name,
                              parameters=the_job)

# Check Job State
job_state = client.jobs.get(resource_group, account_name,
コード例 #5
0
def main(event: func.EventGridEvent):
    result = json.dumps({
        'id': event.id,
        'data': event.get_json(),
        'topic': event.topic,
        'subject': event.subject,
        'event_type': event.event_type,
    })

    logging.info('Python EventGrid trigger processed an event: %s', result)

    blob_url = event.get_json().get('url')
    logging.info('blob URL: %s', blob_url)
    blob_name = blob_url.split("/")[-1].split("?")[0]
    logging.info('blob name: %s', blob_name)
    origin_container_name = blob_url.split("/")[-2].split("?")[0]
    logging.info('container name: %s', origin_container_name)
    storage_account_name = blob_url.split("//")[1].split(".")[0]
    logging.info('storage account name: %s', storage_account_name)

    ams_account_name = os.getenv('ACCOUNTNAME')
    resource_group_name = os.getenv('RESOURCEGROUP')
    subscription_id = os.getenv('SUBSCRIPTIONID')
    client_id = os.getenv('AZURE_CLIENT_ID')
    client_secret = os.getenv('AZURE_CLIENT_SECRET')
    TENANT_ID = os.getenv('AZURE_TENANT_ID')
    storage_blob_url = 'https://' + storage_account_name + '.blob.core.windows.net/'
    transform_name = 'faceredact'
    LOGIN_ENDPOINT = AZURE_PUBLIC_CLOUD.endpoints.active_directory
    RESOURCE = AZURE_PUBLIC_CLOUD.endpoints.active_directory_resource_id

    logging.info('login_endpoint: %s', LOGIN_ENDPOINT)
    logging.info('tenant_id: %s', TENANT_ID)

    out_asset_name = 'faceblurringOutput_' + datetime.utcnow().strftime(
        "%m-%d-%Y_%H:%M:%S")
    out_alternate_id = 'faceblurringOutput_' + datetime.utcnow().strftime(
        "%m-%d-%Y_%H:%M:%S")
    out_description = 'Redacted video with blurred faces'

    context = adal.AuthenticationContext(LOGIN_ENDPOINT + "/" + TENANT_ID)
    credentials = AdalAuthentication(
        context.acquire_token_with_client_credentials, RESOURCE, client_id,
        client_secret)
    client = AzureMediaServices(credentials, subscription_id)

    output_asset = Asset(alternate_id=out_alternate_id,
                         description=out_description)
    client.assets.create_or_update(resource_group_name, ams_account_name,
                                   out_asset_name, output_asset)

    token_credential = DefaultAzureCredential()
    datalake_service_client = DataLakeServiceClient(
        account_url=storage_blob_url, credential=token_credential)

    delegation_key = datalake_service_client.get_user_delegation_key(
        key_start_time=datetime.utcnow(),
        key_expiry_time=datetime.utcnow() + timedelta(hours=1))

    sas_token = generate_file_sas(account_name=storage_account_name,
                                  file_system_name=origin_container_name,
                                  directory_name="",
                                  file_name=blob_name,
                                  credential=delegation_key,
                                  permission=FileSasPermissions(read=True),
                                  expiry=datetime.utcnow() +
                                  timedelta(hours=1),
                                  protocol="https")

    sas_url = "{}?{}".format(blob_url, sas_token)
    logging.info(sas_url)

    job_name = 'Faceblurring-job_' + datetime.utcnow().strftime(
        "%m-%d-%Y_%H:%M:%S")
    job_input = JobInputHttp(label="Video_asset", files=[sas_url])
    job_output = JobOutputAsset(asset_name=out_asset_name)
    job_parameters = Job(input=job_input, outputs=[job_output])

    client.jobs.create(resource_group_name,
                       ams_account_name,
                       transform_name,
                       job_name,
                       parameters=job_parameters)