def create_job_from_ad_hoc(project_id, location, input_uri, output_uri): """Creates a job based on an ad-hoc job configuration. Args: project_id: The GCP project ID. location: The location to start the job in. input_uri: Uri of the video in the Cloud Storage bucket. output_uri: Uri of the video output folder in the Cloud Storage bucket.""" client = TranscoderServiceClient() parent = f"projects/{project_id}/locations/{location}" job = transcoder_v1.types.Job() job.input_uri = input_uri job.output_uri = output_uri job.config = transcoder_v1.types.JobConfig( elementary_streams=[ transcoder_v1.types.ElementaryStream( key="video-stream0", video_stream=transcoder_v1.types.VideoStream( h264=transcoder_v1.types.VideoStream.H264CodecSettings( height_pixels=360, width_pixels=640, bitrate_bps=550000, frame_rate=60, ), ), ), transcoder_v1.types.ElementaryStream( key="video-stream1", video_stream=transcoder_v1.types.VideoStream( h264=transcoder_v1.types.VideoStream.H264CodecSettings( height_pixels=720, width_pixels=1280, bitrate_bps=2500000, frame_rate=60, ), ), ), transcoder_v1.types.ElementaryStream( key="audio-stream0", audio_stream=transcoder_v1.types.AudioStream( codec="aac", bitrate_bps=64000), ), ], mux_streams=[ transcoder_v1.types.MuxStream( key="sd", container="mp4", elementary_streams=["video-stream0", "audio-stream0"], ), transcoder_v1.types.MuxStream( key="hd", container="mp4", elementary_streams=["video-stream1", "audio-stream0"], ), ], ) response = client.create_job(parent=parent, job=job) print(f"Job: {response.name}") return response
def get_job(project_id, location, job_id): """Gets a job. Args: project_id: The GCP project ID. location: The location this job is in. job_id: The job ID.""" client = TranscoderServiceClient() name = f"projects/{project_id}/locations/{location}/jobs/{job_id}" response = client.get_job(name=name) print(f"Job: {response.name}") return response
def delete_job_template(project_id, location, template_id): """Deletes a job template. Args: project_id: The GCP project ID. location: The location of the template. template_id: The user-defined template ID.""" client = TranscoderServiceClient() name = f"projects/{project_id}/locations/{location}/jobTemplates/{template_id}" response = client.delete_job_template(name=name) print("Deleted job template") return response
def list_job_templates(project_id, location): """Lists all job templates in a location. Args: project_id: The GCP project ID. location: The location of the templates.""" client = TranscoderServiceClient() parent = f"projects/{project_id}/locations/{location}" response = client.list_job_templates(parent=parent) print("Job templates:") for jobTemplate in response.job_templates: print({jobTemplate.name}) return response
def create_job_from_preset(project_id, location, input_uri, output_uri, preset): """Creates a job based on a job preset. Args: project_id: The GCP project ID. location: The location to start the job in. input_uri: Uri of the video in the Cloud Storage bucket. output_uri: Uri of the video output folder in the Cloud Storage bucket. preset: The preset template.""" client = TranscoderServiceClient() parent = f"projects/{project_id}/locations/{location}" job = transcoder_v1.types.Job() job.input_uri = input_uri job.output_uri = output_uri job.template_id = preset response = client.create_job(parent=parent, job=job) print(f"Job: {response.name}") return response
def create_job_with_embedded_captions( project_id, location, input_video_uri, input_captions_uri, output_uri, ): """Creates a job based on an ad-hoc job configuration that embeds captions in the output video. Args: project_id (str): The GCP project ID. location (str): The location to start the job in. input_video_uri (str): Uri of the input video in the Cloud Storage bucket. input_captions_uri (str): Uri of the input captions file in the Cloud Storage bucket. output_uri (str): Uri of the video output folder in the Cloud Storage bucket.""" client = TranscoderServiceClient() parent = f"projects/{project_id}/locations/{location}" job = transcoder_v1.types.Job() job.output_uri = output_uri job.config = transcoder_v1.types.JobConfig( inputs=[ transcoder_v1.types.Input( key="input0", uri=input_video_uri, ), transcoder_v1.types.Input( key="caption-input0", uri=input_captions_uri, ), ], edit_list=[ transcoder_v1.types.EditAtom( key="atom0", inputs=["input0", "caption-input0"], ), ], elementary_streams=[ transcoder_v1.types.ElementaryStream( key="video-stream0", video_stream=transcoder_v1.types.VideoStream( h264=transcoder_v1.types.VideoStream.H264CodecSettings( height_pixels=360, width_pixels=640, bitrate_bps=550000, frame_rate=60, ), ), ), transcoder_v1.types.ElementaryStream( key="audio-stream0", audio_stream=transcoder_v1.types.AudioStream( codec="aac", bitrate_bps=64000), ), transcoder_v1.types.ElementaryStream( key="cea-stream0", # The following doesn't work because "mapping" is a reserved # argument name in GCP python client libraries (see # https://github.com/googleapis/proto-plus-python/blob/main/proto/message.py#L447): # # text_stream=transcoder_v1.types.TextStream( # codec="cea608", # mapping=[ # transcoder_v1.types.TextStream.TextMapping( # atom_key="atom0", # input_key="caption-input0", # input_track=0, # ), # ], # ), # Use a python dictionary as a workaround: text_stream={ "codec": "cea608", "mapping": [{ "atom_key": "atom0", "input_key": "caption-input0", "input_track": 0, }], }, ), ], mux_streams=[ transcoder_v1.types.MuxStream( key="sd", container="mp4", elementary_streams=["video-stream0", "audio-stream0"], ), transcoder_v1.types.MuxStream( key="sd-hls", container="ts", elementary_streams=["video-stream0", "audio-stream0"], ), transcoder_v1.types.MuxStream( key="sd-dash", container="fmp4", elementary_streams=["video-stream0"], ), transcoder_v1.types.MuxStream( key="audio-dash", container="fmp4", elementary_streams=["audio-stream0"], ), ], manifests=[ transcoder_v1.types.Manifest( file_name="manifest.m3u8", type_="HLS", mux_streams=["sd-hls"], ), transcoder_v1.types.Manifest( file_name="manifest.mpd", type_="DASH", mux_streams=["sd-dash", "audio-dash"], ), ], ) response = client.create_job(parent=parent, job=job) print(f"Job: {response.name}") return response
def create_job_template(project_id, location, template_id): """Creates a job template. Args: project_id: The GCP project ID. location: The location to store this template in. template_id: The user-defined template ID.""" client = TranscoderServiceClient() parent = f"projects/{project_id}/locations/{location}" job_template = transcoder_v1.types.JobTemplate() job_template.name = ( f"projects/{project_id}/locations/{location}/jobTemplates/{template_id}" ) job_template.config = transcoder_v1.types.JobConfig( elementary_streams=[ transcoder_v1.types.ElementaryStream( key="video-stream0", video_stream=transcoder_v1.types.VideoStream( h264=transcoder_v1.types.VideoStream.H264CodecSettings( height_pixels=360, width_pixels=640, bitrate_bps=550000, frame_rate=60, ), ), ), transcoder_v1.types.ElementaryStream( key="video-stream1", video_stream=transcoder_v1.types.VideoStream( h264=transcoder_v1.types.VideoStream.H264CodecSettings( height_pixels=720, width_pixels=1280, bitrate_bps=2500000, frame_rate=60, ), ), ), transcoder_v1.types.ElementaryStream( key="audio-stream0", audio_stream=transcoder_v1.types.AudioStream( codec="aac", bitrate_bps=64000 ), ), ], mux_streams=[ transcoder_v1.types.MuxStream( key="sd", container="mp4", elementary_streams=["video-stream0", "audio-stream0"], ), transcoder_v1.types.MuxStream( key="hd", container="mp4", elementary_streams=["video-stream1", "audio-stream0"], ), ], ) response = client.create_job_template( parent=parent, job_template=job_template, job_template_id=template_id ) print(f"Job template: {response.name}") return response
def create_job_with_concatenated_inputs( project_id, location, input1_uri, start_time_input1, end_time_input1, input2_uri, start_time_input2, end_time_input2, output_uri, ): """Creates a job based on an ad-hoc job configuration that concatenates two input videos. Args: project_id (str): The GCP project ID. location (str): The location to start the job in. input1_uri (str): Uri of the first video in the Cloud Storage bucket. start_time_input1 (str): Start time, in fractional seconds ending in 's' (e.g., '0s'), relative to the first input video timeline. end_time_input1 (str): End time, in fractional seconds ending in 's' (e.g., '8.1s'), relative to the first input video timeline. input2_uri (str): Uri of the second video in the Cloud Storage bucket. start_time_input2 (str): Start time, in fractional seconds ending in 's' (e.g., '3.5s'), relative to the second input video timeline. end_time_input2 (str): End time, in fractional seconds ending in 's' (e.g., '15s'), relative to the second input video timeline. output_uri (str): Uri of the video output folder in the Cloud Storage bucket.""" s1 = duration.Duration() s1.FromJsonString(start_time_input1) e1 = duration.Duration() e1.FromJsonString(end_time_input1) s2 = duration.Duration() s2.FromJsonString(start_time_input2) e2 = duration.Duration() e2.FromJsonString(end_time_input2) client = TranscoderServiceClient() parent = f"projects/{project_id}/locations/{location}" job = transcoder_v1.types.Job() job.output_uri = output_uri job.config = transcoder_v1.types.JobConfig( inputs=[ transcoder_v1.types.Input( key="input1", uri=input1_uri, ), transcoder_v1.types.Input( key="input2", uri=input2_uri, ), ], edit_list=[ transcoder_v1.types.EditAtom( key="atom1", inputs=["input1"], start_time_offset=s1, end_time_offset=e1, ), transcoder_v1.types.EditAtom( key="atom2", inputs=["input2"], start_time_offset=s2, end_time_offset=e2, ), ], elementary_streams=[ transcoder_v1.types.ElementaryStream( key="video-stream0", video_stream=transcoder_v1.types.VideoStream( h264=transcoder_v1.types.VideoStream.H264CodecSettings( height_pixels=360, width_pixels=640, bitrate_bps=550000, frame_rate=60, ), ), ), transcoder_v1.types.ElementaryStream( key="audio-stream0", audio_stream=transcoder_v1.types.AudioStream( codec="aac", bitrate_bps=64000), ), ], mux_streams=[ transcoder_v1.types.MuxStream( key="sd", container="mp4", elementary_streams=["video-stream0", "audio-stream0"], ), ], ) response = client.create_job(parent=parent, job=job) print(f"Job: {response.name}") return response
def create_job_with_set_number_images_spritesheet( project_id, location, input_uri, output_uri ): """Creates a job based on an ad-hoc job configuration that generates two spritesheets. Args: project_id: The GCP project ID. location: The location to start the job in. input_uri: Uri of the video in the Cloud Storage bucket. output_uri: Uri of the video output folder in the Cloud Storage bucket.""" client = TranscoderServiceClient() parent = f"projects/{project_id}/locations/{location}" job = transcoder_v1.types.Job() job.input_uri = input_uri job.output_uri = output_uri job.config = transcoder_v1.types.JobConfig( # Create an ad-hoc job. For more information, see https://cloud.google.com/transcoder/docs/how-to/jobs#create_jobs_ad_hoc. # See all options for the job config at https://cloud.google.com/transcoder/docs/reference/rest/v1beta1/JobConfig. elementary_streams=[ # This section defines the output video stream. transcoder_v1.types.ElementaryStream( key="video-stream0", video_stream=transcoder_v1.types.VideoStream( h264=transcoder_v1.types.VideoStream.H264CodecSettings( height_pixels=360, width_pixels=640, bitrate_bps=550000, frame_rate=60, ), ), ), # This section defines the output audio stream. transcoder_v1.types.ElementaryStream( key="audio-stream0", audio_stream=transcoder_v1.types.AudioStream( codec="aac", bitrate_bps=64000 ), ), ], # This section multiplexes the output audio and video together into a container. mux_streams=[ transcoder_v1.types.MuxStream( key="sd", container="mp4", elementary_streams=["video-stream0", "audio-stream0"], ), ], # Generate two sprite sheets from the input video into the GCS bucket. For more information, see # https://cloud.google.com/transcoder/docs/how-to/generate-spritesheet#generate_set_number_of_images. sprite_sheets=[ # Generate a 10x10 sprite sheet with 64x32px images. transcoder_v1.types.SpriteSheet( file_prefix="small-sprite-sheet", sprite_width_pixels=64, sprite_height_pixels=32, column_count=10, row_count=10, total_count=100, ), # Generate a 10x10 sprite sheet with 128x72px images. transcoder_v1.types.SpriteSheet( file_prefix="large-sprite-sheet", sprite_width_pixels=128, sprite_height_pixels=72, column_count=10, row_count=10, total_count=100, ), ], ) response = client.create_job(parent=parent, job=job) print(f"Job: {response.name}") return response
def create_job_with_static_overlay(project_id, location, input_uri, overlay_image_uri, output_uri): """Creates a job based on an ad-hoc job configuration that includes a static image overlay. Args: project_id: The GCP project ID. location: The location to start the job in. input_uri: Uri of the video in the Cloud Storage bucket. overlay_image_uri: Uri of the JPEG image for the overlay in the Cloud Storage bucket. Must be a JPEG. output_uri: Uri of the video output folder in the Cloud Storage bucket.""" client = TranscoderServiceClient() parent = f"projects/{project_id}/locations/{location}" job = transcoder_v1.types.Job() job.input_uri = input_uri job.output_uri = output_uri job.config = transcoder_v1.types.JobConfig( elementary_streams=[ transcoder_v1.types.ElementaryStream( key="video-stream0", video_stream=transcoder_v1.types.VideoStream( h264=transcoder_v1.types.VideoStream.H264CodecSettings( height_pixels=360, width_pixels=640, bitrate_bps=550000, frame_rate=60, ), ), ), transcoder_v1.types.ElementaryStream( key="audio-stream0", audio_stream=transcoder_v1.types.AudioStream( codec="aac", bitrate_bps=64000), ), ], mux_streams=[ transcoder_v1.types.MuxStream( key="sd", container="mp4", elementary_streams=["video-stream0", "audio-stream0"], ), ], overlays=[ transcoder_v1.types.Overlay( image=transcoder_v1.types.Overlay.Image( uri=overlay_image_uri, resolution=transcoder_v1.types.Overlay. NormalizedCoordinate( x=1, y=0.5, ), alpha=1, ), animations=[ transcoder_v1.types.Overlay.Animation( animation_static=transcoder_v1.types.Overlay. AnimationStatic( xy=transcoder_v1.types.Overlay. NormalizedCoordinate( x=0, y=0, ), start_time_offset=duration.Duration(seconds=0, ), ), ), transcoder_v1.types.Overlay.Animation( animation_end=transcoder_v1.types.Overlay.AnimationEnd( start_time_offset=duration.Duration( seconds=10, ), ), ), ], ), ], ) response = client.create_job(parent=parent, job=job) print(f"Job: {response.name}") return response