Ejemplo n.º 1
0
def add_transform_output(client,
                         account_name,
                         resource_group_name,
                         transform_name,
                         preset,
                         insights_to_extract=None,
                         video_analysis_mode=None,
                         audio_language=None,
                         audio_analysis_mode=None,
                         on_error=None,
                         relative_priority=None,
                         resolution=None,
                         face_detector_mode=None,
                         blur_type=None):

    transform = client.get(resource_group_name, account_name, transform_name)

    if not transform:
        show_resource_not_found_message(resource_group_name, account_name,
                                        'transforms', transform_name)

    transform.outputs.append(
        build_transform_output(preset, insights_to_extract,
                               video_analysis_mode, audio_language,
                               audio_analysis_mode, on_error,
                               relative_priority, resolution,
                               face_detector_mode, blur_type))

    parameters = Transform(outputs=transform.outputs)

    return client.create_or_update(resource_group_name, account_name,
                                   transform_name, parameters)
Ejemplo n.º 2
0
def create_transform(client,
                     account_name,
                     resource_group_name,
                     transform_name,
                     preset,
                     insights_to_extract=None,
                     video_analysis_mode=None,
                     audio_language=None,
                     audio_analysis_mode=None,
                     on_error=None,
                     relative_priority=None,
                     description=None,
                     resolution=None,
                     face_detector_mode=None,
                     blur_type=None):

    outputs = [
        build_transform_output(preset, insights_to_extract,
                               video_analysis_mode, audio_language,
                               audio_analysis_mode, on_error,
                               relative_priority, resolution,
                               face_detector_mode, blur_type)
    ]
    parameters = Transform(description=description, outputs=outputs)
    return client.create_or_update(resource_group_name, account_name,
                                   transform_name, parameters)
Ejemplo n.º 3
0
def remove_transform_output(client, account_name, resource_group_name, transform_name, output_index):
    transform = client.get(resource_group_name, account_name, transform_name)

    try:
        transform.outputs.pop(output_index)
    except IndexError:
        raise CLIError("index {} doesn't exist on outputs".format(output_index))

    parameters = Transform(outputs=transform.outputs)
    return client.create_or_update(resource_group_name, account_name, transform_name, parameters)
    audio_language=
    "en-US",  # Be sure to modify this to your desired language code in BCP-47 format
    insights_to_extract=
    "AllInsights",  # Video Analyzer can also run in Video only mode.
    mode=
    "Standard",  # Video analyzer can also process audio in basic or standard mode when using All Insights
    experimental_options=
    {  # Optional settings for preview or experimental features
        # "SpeechProfanityFilterMode="None" " # Disables the speech-to-text profanity filtering
    }))

# Ensure that you have customized transforms for the AudioAnalyzer. This is really a one time setup operation.
print("Creating Audio Analyzer transform...")

# Adding transform details
my_transform = Transform()
my_transform.description = "A simple Audio Analyzer Transform"
my_transform.outputs = [audio_transform_output]

print(f"Creating transform {audio_transform_name}")
transform = client.transforms.create_or_update(
    resource_group_name=resource_group,
    account_name=account_name,
    transform_name=audio_transform_name,
    parameters=my_transform)

print(f"{audio_transform_name} created (or updated if it existed already). ")

# Ensure that you have customized transforms for the VideoAnalyzer. This is really a one time setup operation.
print("Creating Video Analyzer transform...")
transform_name = 'ContentAwareEncodingStreamFilesSample'

# Create a new Standard encoding Transform for Built-in Copy Codec
print(f"Creating Encoding transform named: {transform_name}")
# For this snippet, we are using 'BuiltInStandardEncoderPreset'
transform_output = TransformOutput(
    preset=BuiltInStandardEncoderPreset(preset_name="ContentAwareEncoding"),
    # What should we do with the job if there is an error?
    on_error=OnErrorType.STOP_PROCESSING_JOB,
    # What is the relative priority of this job to others? Normal, high or low?
    relative_priority=Priority.NORMAL)

print("Creating encoding transform...")

# Adding transform details
my_transform = Transform()
my_transform.description = "Transform with Stream Files"
my_transform.outputs = [transform_output]

print(f"Creating transform {transform_name}")
transform = client.transforms.create_or_update(
    resource_group_name=resource_group,
    account_name=account_name,
    transform_name=transform_name,
    parameters=my_transform)

print(f"{transform_name} created (or updated if it existed already). ")

job_name = 'StreamFilesSample' + uniqueness
print(f"Creating custom encoding job {job_name}")
files = (source_file)
Ejemplo n.º 6
0
def transform_update_setter(client, resource_group_name, account_name,
                            transform_name, parameters):
    parameters = Transform(outputs=parameters.outputs,
                           description=parameters.description)
    return client.create_or_update(resource_group_name, account_name,
                                   transform_name, parameters)
Ejemplo n.º 7
0
print(f"Creating Encoding transform named: {transform_name}")
# For this snippet, we are using 'BuiltInStandardEncoderPreset'
transform_output = TransformOutput(
  preset=BuiltInStandardEncoderPreset(
    preset_name="ContentAwareEncoding"
  ),
  # What should we do with the job if there is an error?
  on_error=OnErrorType.STOP_PROCESSING_JOB,
  # What is the relative priority of this job to others? Normal, high or low?
  relative_priority=Priority.NORMAL
)

print("Creating encoding transform...")

# Adding transform details
my_transform = Transform()
my_transform.description="Transform with Asset filters"
my_transform.outputs = [transform_output]

print(f"Creating transform {transform_name}")
transform = client.transforms.create_or_update(
  resource_group_name=resource_group,
  account_name=account_name,
  transform_name=transform_name,
  parameters=my_transform)

print(f"{transform_name} created (or updated if it existed already). ")

job_name = 'ContentAwareEncodingAssetFilters'+ uniqueness
print(f"Creating custom encoding job {job_name}")
files = (source_file)
Ejemplo n.º 8
0
transform_name = 'ContentAwareEncoding'

# Create a Encoding transform
print(f"Creating Encoding transform named: {transform_name}")
# For this snippet, we are using 'BuiltInStandardEncoderPreset'
transform_output = TransformOutput(
    preset=BuiltInStandardEncoderPreset(preset_name="ContentAwareEncoding"),
    # What should we do with the job if there is an error?
    on_error=OnErrorType.STOP_PROCESSING_JOB,
    # What is the relative priority of this job to others? Normal, high or low?
    relative_priority=Priority.NORMAL)

print("Creating encoding transform...")

# Adding transform details
my_transform = Transform()
my_transform.description = "Transform with List Tracks in Assets"
my_transform.outputs = [transform_output]

print(f"Creating transform {transform_name}")
transform = client.transforms.create_or_update(
    resource_group_name=resource_group,
    account_name=account_name,
    transform_name=transform_name,
    parameters=my_transform)

print(f"{transform_name} created (or updated if it existed already). ")

job_name = 'ListTracksInAsset' + uniqueness
print(f"Creating custom encoding job {job_name}")
files = (source_file)
Ejemplo n.º 9
0
# Adjust the path as needed depending on how you are launching this python sample file.

# Upload the video to storage as a block blob
with open(upload_file_path, "rb") as data:
    # From SDK
    # upload_blob(data, blob_type=<BlobType.BlockBlob: 'BlockBlob'>, length=None, metadata=None, **kwargs)
    blob_client.upload_blob(data)

### Create a Transform ###
transform_name = 'MyTrans' + str(uniqueness)
# From SDK
# TransformOutput(*, preset, on_error=None, relative_priority=None, **kwargs) -> None
transform_output = TransformOutput(preset=BuiltInStandardEncoderPreset(
    preset_name="AdaptiveStreaming"))

transform = Transform()
transform.outputs = [transform_output]

print("Creating transform " + transform_name)
# From SDK
# Create_or_update(resource_group_name, account_name, transform_name, outputs, description=None, custom_headers=None, raw=False, **operation_config)
transform = client.transforms.create_or_update(
    resource_group_name=os.getenv("RESOURCEGROUP"),
    account_name=os.getenv("ACCOUNTNAME"),
    transform_name=transform_name,
    parameters=transform)

### Create a Job ###
job_name = 'MyJob' + str(uniqueness)
print("Creating job " + job_name)
files = (source_file)
Ejemplo n.º 10
0
        #         The output of this mode includes an Insights JSON file including only the keywords, transcription, and timing information.
        #         Automatic language detection and speaker diarization are not included in this mode.
        # Standard : Performs all operations included in the Basic mode, additionally performing language detection and speaker diarization.
        mode=AudioAnalysisMode.
        BASIC  # Change this to Standard if you would like to use the more advanced audio analyzer
    ),
    # What should we do with the job if there is an error?
    on_error=OnErrorType.STOP_PROCESSING_JOB,
    # What is the relative priority of this job to others? Normal, high or low?
    relative_priority=Priority.NORMAL)

# Ensure that you have customized transforms for the AudioAnalyzer.  This is really a one time setup operation.
print("Creating Audio Analyzer transforms...")

# Adding transform details
my_transform = Transform()
my_transform.description = "A simple Audio Analyzer Transform"
my_transform.outputs = [transform_output]

print(f"Creating transform {transform_name}")
transform = client.transforms.create_or_update(
    resource_group_name=resource_group,
    account_name=account_name,
    transform_name=transform_name,
    parameters=my_transform)

print(f"{transform_name} created (or updated if it existed already). ")

job_name = 'AudioAnalyticsJob' + uniqueness
print(f"Creating AudioAnalytics job {job_name}")
files = (source_file)