def convert_paths(self): """converts event paths with proper ones """ from stalker import Shot import timecode # do a db connection for e in self.events: # get the reel which shows the shot name # (or something similar to it) shot_name = self.get_shot_name(e.reel) # find the shot in Stalker shot = Shot.query.filter(Shot.name == shot_name).first() if shot: # get the shot path latest_output = self.find_latest_outputs(shot) if latest_output: e.source_file = str(latest_output) else: e.source_file = '' # set the in and out points correctly # stupid AVID places the source clips to either 8th or 1st hour first_hour = timecode.Timecode(self.fps, start_timecode='01:00:00:00') eigth_hour = timecode.Timecode(self.fps, start_timecode='07:59:00:00') twelfth_hour = timecode.Timecode(self.fps, start_timecode='11:59:00:00') if e.src_start_tc.frames >= twelfth_hour.frames: e.src_start_tc -= twelfth_hour - 1 e.src_end_tc -= twelfth_hour - 1 elif e.src_start_tc.frames >= eigth_hour.frames: e.src_start_tc -= eigth_hour - 1 e.src_end_tc -= eigth_hour - 1 elif e.src_start_tc.frames >= first_hour.frames: e.src_start_tc -= first_hour - 1 e.src_end_tc -= first_hour - 1
def __init__(self, reel, warp_fps, tc, fps): self.reverse = False self.reel = reel self.fps = fps self.warp_fps = float(warp_fps) self.timecode = timecode.Timecode(fps, tc) self.timecode_warped = timecode.Timecode(abs(self.warp_fps), tc)
def apply(self, stack, line): evt = None m = re.search(self.regex, line.strip()) if m: matches = m.groups() keys = [ 'num', 'reel', 'track', 'tr_code', 'aux', 'src_start_tc', 'src_end_tc', 'rec_start_tc', 'rec_end_tc' ] values = map(self.stripper, matches) evt = Event(dict(zip(keys, values))) t = evt.tr_code if t == 'C': if len(stack) > 0: stack[-1].next_event = evt evt.transition = Cut() elif t == 'D': evt.transition = Dissolve() elif re.match('W\d+', t): evt.transition = Wipe() elif t == 'K': evt.transition = Key() else: evt.transition = None evt.src_start_tc = timecode.Timecode(self.fps, evt.src_start_tc) evt.src_end_tc = timecode.Timecode(self.fps, evt.src_end_tc) evt.rec_start_tc = timecode.Timecode(self.fps, evt.rec_start_tc) evt.rec_end_tc = timecode.Timecode(self.fps, evt.rec_end_tc) stack.append(evt) return evt
def convert_paths(self): """converts event paths with proper ones """ from stalker import Shot # do a db connection for e in self.events: # get the reel which shows the shot name # (or something similar to it) shot_name = self.get_shot_name(e.reel) # find the shot in Stalker shot = Shot.query.filter(Shot.name == shot_name).first() if shot: # get the shot path latest_output = self.find_latest_outputs(shot) if latest_output: e.source_file = str(latest_output) else: e.source_file = '' # set the in and out points correctly # check if it has handles of 2 seconds if e.src_start_tc.frames > 48: e.src_end_tc -= e.src_start_tc e.src_start_tc = timecode.Timecode(self.fps)
def get_timecode_from_image(self, fps, img_path): """queries timecode metadata from given image """ import subprocess import timecode frame_number = 0 info = {} process = subprocess.Popen(['exiftool', img_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) for tag in process.stdout: line = tag.strip().split(':') info[line[0].strip()] = line[-1].strip() try: tc_exif = info['Time Code'].split(' ')[0] t = timecode.Timecode('%s' % fps, start_timecode=int(tc_exif)) frame_number = t.frame_number except BaseException: pass print('[%s] frame number returned from [%s]' % (frame_number, os.path.basename(img_path))) return frame_number
def jobAnalyzeInputs(job): """ Run analysis on the job inputs and calculate job metrics based on the analysis :param job: JSON job data struture from dynamodb """ job['analysis'] = {} job['analysis']['frameCount'] = 0 print('Analyze: job: ' + job['id']) # number of inputs job['analysis']['num_inputs'] = len(job['settings']['inputs']) #number of outputs num_outputs = 0 for og in job['settings']['outputGroups']: num_outputs += len(og['outputs']) job['analysis']['num_outputs'] = num_outputs # calculate total frames in inputs and job for input in job['settings']['inputs']: inputFPS = input['mediainfo']['File']['track'][0]['Frame_rate'][0] if 'InputClippings' in input: for clip in input['InputClippings']: start_tc = timecode.Timecode(inputFPS, clip['StartTimecode']) end_tc = timecode.Timecode(inputFPS, clip['EndTimecode']) input_duration = end_tc - start_tc input['frameCount'] = int(input_duration.tc_frames()) input['duration'] = input['frameCount'] * inputFPS else: input['frameCount'] = int( input['mediainfo']['File']['track'][0]['Frame_count']) input['duration'] = float( input['mediainfo']['File']['track'][0]['Duration'][0]) job['analysis']['frameCount'] += input['frameCount'] job['analysis']['codec'] = input['mediainfo']['File']['track'][0][ 'Video_Format_List']
def wizard(): global dropFrame print( "Welcome to Handle Wizard!\nThis script will generate an EDL of all the shots in your directory with new start and end timecodes based on the number of frame handles." ) time.sleep(1) if os.path.isfile(edlPath): #Check if Handle Wizard EDL file exists print( "You already have handleWizard.edl on your desktop. This script will overwrite that file. Do you want to continue? y/n" ) choice = input(">>> ") if choice == 'y' or choice == 'yes': os.remove(edlPath) #Deletes old EDL edl = open(edlPath, 'w+') #Creates new EDL edl.write("\n") edl.close() print("Continuing...") time.sleep(1) else: print("Exiting Handle Wizard...") time.sleep(1) exit(1) else: edl = open(edlPath, 'w+') #Creates new EDL edl.close() print( "Drag and drop folder or enter path. Be careful not to add an extra space at the end!" ) dir = input(">>> ") if len(dir) > 0: fps = input("""Select Frame Rate: A. 23.98 B. 24 C. 25 D. 29.97 E. 30 F. 50 G. 59.94 H. 60 >>> """) if len(fps) > 0 and fps.isalpha(): fps = fps.lower() if fps == 'a': fps = "23.98" elif fps == 'b': fps = "24" elif fps == 'c': fps = "25" elif fps == 'd': fps = "29.97" dropFrame = True elif fps == 'e': fps = "30" elif fps == 'f': fps = "50" elif fps == 'g': fps = "59.94" dropFrame = True elif fps == 'h': fps = "60" else: print("Type the letter for the correct frame rate.") time.sleep(1) wizard() handles = input("Number of frame handles: ") handles = timecode.Timecode( fps, str("00:00:00:" + handles)) #Convert frame handles to SMPTE Timecode clip_num = 0 print("Working on it...") for root, folders, files in os.walk(dir): for file in files: clip = (dir + ("/") + str(file)) clip_num += 1 with exiftool.ExifTool( ) as et: #Generates new Source In and Outs startTC = et.get_tag('StartTimecode', clip) #get Start TC duration = et.get_tag('Duration', clip) #get Duration startTC = timecode.Timecode( fps, str(startTC) ) #Converts Start TC to be readable by Timecode library duration = ( '00:00:' + str(duration) ) #Converts duration from seconds to HH:MM:SS.ss for Timecode library duration = timecode.Timecode( fps, str(duration) ) #Converts duration from HH:MM:SS.ss to HH:MM:SS:FF endTC = startTC + duration #Calculates End TC by adding the converted Start TC to the Duration TC using Timecode library newStartTC = startTC + (handles - 1) newEndTC = endTC - (handles) #Generate Record TC's for EDL if clip_num == 1: recTCIN = timecode.Timecode(fps, '01:00:00:00') else: recTCIN = ("0" + str(clip_num) + ":00:00:00") recTCIN = timecode.Timecode(fps, str(recTCIN)) recTCOUT = (recTCIN + duration) - ((handles * 2) - 1) #Export to EDL edl = open(edlPath, 'r+') new_lines = ("00000" + str(clip_num) + " " + file + " V C " + str(newStartTC) + " " + str(newEndTC) + " " + str(recTCIN) + " " + str(recTCOUT) + "\n* FROM CLIP NAME: " + file + "\n* SOURCE FILE: " + file + "\n") with open(edlPath, 'a') as edl: edl.write("\n") edl.write(new_lines) edl.close() title_edl() else: print("Enter folder path")
def __init__(self, filepath): # Establish its filepath if os.path.isfile(filepath): self.filepath = filepath self.filename = os.path.basename(self.filepath) self.pathurl = 'file://localhost/' + quote( self.filepath.replace(os.sep, '/') ) self.name = os.path.splitext(self.filename)[0] else: print(filepath, ': this file does not exist.') return None # Probe it and its probe result to the object data = subprocess.run( [ 'ffprobe', self.filepath, '-print_format', 'json', '-show_format', '-show_streams', '-v', 'quiet' ], shell=True, capture_output=True ) # Save the probe data self.probe = json.loads(data.stdout) video_streams = [ stream for stream in self.probe['streams'] if stream['codec_type'] == 'video' ] audio_streams = [ stream for stream in self.probe['streams'] if stream['codec_type'] == 'audio' ] if len(video_streams) >= 1: # Consider it video self.mediaType = 'video' elif len(video_streams) == 0 and len(audio_streams) >= 1: # Consider it audio self.mediaType = 'audio' elif len(video_streams) == 0 and len(audio_streams) == 0: self.mediaType = None print(self.filepath, ': this file contains neither video or audio streams') return None print(self.name, self.mediaType) # But does it have embedded audio? self.audioStreams = len(audio_streams) if self.mediaType == 'video': # Work with only the first video stream, if multiple video_stream = video_streams[0] self.frameCount = video_stream['nb_frames'] # Trim off "1/" before FPS, and make it an integer. self.frameRate = int(video_stream['codec_time_base'][2:]) self.duration = video_stream['duration_ts'] # Create a timebase object, with which we can make TC/frame conversions self._timebase = timecode.Timecode(self.frameRate) found_timecode = None # Search for the timecode inside the video stream if 'timecode' in video_stream['tags']: found_timecode = video_stream['tags']['timecode'] # If it's not there, check outside in the Format elif 'timecode' in self.probe['format']['tags']: found_timecode = self.probe['format']['tags']['timecode'] if found_timecode: try: self._timebase.validateTC(found_timecode) self.timecode = found_timecode self.startFrame = self._timebase.toFrames(found_timecode) self.timecode_displayformat = DEFAULT_VIDEO_TIMECODE_DISPLAYFORMAT except ValueError: # Well, it had something, but it didn't validate print(self.filepath, ': this file doesn\'t have embedded timecode.') self.startFrame = 0 self.timecode = '00:00:00:00' self.timecode_displayformat = DEFAULT_VIDEO_TIMECODE_DISPLAYFORMAT else: # Couldn't find it. print(self.filepath, ': this file doesn\'t have embedded timecode.') self.startFrame = 0 self.timecode = '00:00:00:00' self.timecode_displayformat = DEFAULT_VIDEO_TIMECODE_DISPLAYFORMAT # More video attributes for attrib in [ 'codec_name', 'width', 'height' ]: setattr(self, attrib, video_stream[attrib]) # More complicated attributes if 'sample_aspect_ratio' in video_stream: if video_stream['sample_aspect_ratio'] == '1:1': self.pixelaspectratio = 'square' else: # Otherwise stick the ratio there in it self.pixelaspectratio = video_stream['sample_aspect_ratio'] if 'field_order' in video_stream: if video_stream['field_order'] == 'progressive': self.fielddominance = 'none' else: # This could potentially be problematic and not be the correct value. # Whatever, I'm not forward thinking enough to facilitate interlaced media. self.fielddominance = video_stream['field_order'] else: self.fielddominance = 'none' # Need to connect these to Ffprobe and be accurate self.alphatype = 'none' self.anamorphic = 'FALSE' if self.audioStreams > 0: # If there is any audio at all, catalog it # Base most of the attributes on the very first audio stream # Ignore subsequent audio streams (note, this doesn't refer to channels) audio_stream = audio_streams[0] self.audio_sample_rate = int(audio_stream['sample_rate']) self.audio_channels = audio_stream['channels'] self.audio_bit_depth = audio_stream['bits_per_raw_sample'] else: # Otherwise, be explicit self.audio_channels = 0 if self.mediaType == 'audio': # Establish the timecode & duration, only for audio files # Not video with embedded audio BEXT_DATA = {} if 'comment' in self.probe['format']['tags']: comment = self.probe['format']['tags']['comment'] if comment[0] == 'z': # If it starts with z, it's probably BEXT chunks # Create keys and values for the BEXT data for k, v in [ line.split('=') for line in comment.splitlines() ]: BEXT_DATA[k] = v if 'zSPEED' in BEXT_DATA.keys(): # Regex match in the format: ##.###AA (e.g. 25.000ND) match = re.match( r"(\d*?\.\d*)(\w*)", BEXT_DATA['zSPEED'], re.I ) if match: rate, dropframe = match.groups() # Try end up with a clean integer. Only float if it's a decimal. if float(rate).is_integer(): framerate = int( float(rate) ) else: framerate = float(rate) # Check if it's a valid frame rate and then assign it if framerate in VALID_FRAME_RATES: self.frameRate = framerate if not hasattr(self, 'frameRate'): # If we didn't manage to find the frame rate self.frameRate = DEFAULT_VIDEO_FRAME_RATE # Search for an audio time-of-day timecode # Time_reference is usually the number of samples since midnight. if 'time_reference' in self.probe['format']['tags']: self._timebase = timecode.Timecode(self.frameRate) samples_since_midnight = int(self.probe['format']['tags']['time_reference']) seconds_since_midnight = samples_since_midnight / self.audio_sample_rate self.startFrame = int(seconds_since_midnight * self.frameRate) self.timecode = self._timebase.toTC(self.startFrame) else: # No timecode found at all self.startFrame = 0 self.timecode = '00:00:00:00' self.timecode_displayformat = DEFAULT_VIDEO_TIMECODE_DISPLAYFORMAT # Audio duration is: File duration (seconds, float) * Video frame rate # e.g. 60 second recording * 25 FPS = 1,500 frames # Finish with int() because we always need a whole number of frames. self.duration = int( float(audio_stream['duration']) * self.frameRate )
from urllib.parse import quote # APPLICATION ASSETS import timecode from ffprobe_xml_structure import * # DEFAULTS DEFAULT_AUDIO_SAMPLE_RATE = 48000 DEFAULT_AUDIO_BIT_DEPTH = 16 DEFAULT_AUDIO_CHANNELS = 2 DEFAULT_VIDEO_FRAME_RATE = 25 DEFAULT_VIDEO_TIMECODE_DISPLAYFORMAT = 'NDF' TIMEBASE = timecode.Timecode(DEFAULT_VIDEO_FRAME_RATE) VALID_FRAME_RATES = [ 23.976, 23.98, 24, 25, 29.97, 30 ] DEFAULT_SEQUENCE_COLOURDEPTH = 24 # bit DEFAULT_SEQUENCE_AUDIO_OUTPUT_CHANNELS = 2 DEFAULT_LABEL_COLOUR_VIDEO = 'Iris' DEFAULT_LABEL_COLOUR_AUDIO = 'Caribbean' DEFAULT_LABEL_COLOUR_SEQUENCE = 'Rose' DEFAULT_LABEL_COLOUR_BIN = 'Mango' DEFAULT_XMEML_VERSION = '4' # PROTOTYPE FILE DATA # TEMPORARY TEMP_LIST_OF_FILES = [
transcode_plate_glob = os.path.join(transcode_dir, '%s.*.exr'%version_name) transcode_plate_frames = sorted(glob.glob(transcode_plate_glob)) rowdict['3.2K Transcode Exists?'] = 'YES' if len(transcode_plate_frames) != framecount and len(transcode_plate_frames) != (framecount + 1): rowdict['Plate Message'] = 'ERROR: Transcoded plate exists but image sequence is incomplete.' else: if B_EXR_LIBRARY_EXISTS: slate_file = OpenEXR.InputFile(transcode_plate_frames[0]) slate_tc_obj = slate_file.header().get('timeCode') tmp_slate_frame = int(os.path.basename(transcode_plate_frames[0]).split('.')[-2]) # timecode.Timecode('24', start_timecode='00:01:22:23') if slate_tc_obj: slate_tc_str = "%02d:%02d:%02d:%02d" % ( slate_tc_obj.hours, slate_tc_obj.minutes, slate_tc_obj.seconds, slate_tc_obj.frame) logging.info('Found frame %s with timecode %s.'%(os.path.basename(transcode_plate_frames[0]), slate_tc_str)) tmp_slate_frame = timecode.Timecode('24', start_timecode = slate_tc_str).frame_number start_file = OpenEXR.InputFile(transcode_plate_frames[1]) start_tc_obj = start_file.header().get('timeCode') tmp_start_frame = int(os.path.basename(transcode_plate_frames[2]).split('.')[-2]) if start_tc_obj: start_tc_str = "%02d:%02d:%02d:%02d" % ( start_tc_obj.hours, start_tc_obj.minutes, start_tc_obj.seconds, start_tc_obj.frame) logging.info('Found frame %s with timecode %s.' % ( os.path.basename(transcode_plate_frames[1]), start_tc_str)) tmp_start_frame = timecode.Timecode('24', start_timecode = start_tc_str).frame_number if tmp_start_frame != (tmp_slate_frame + 1): rowdict['Plate Message'] = 'ERROR: Transcoded plate has bad timecode values.' else: logging.warning('No transcoded plate exists for shot %s, plate %s.'%(version_shot, version_name)) rowdict['Plate Message'] = 'ERROR: No transcoded plate exists.'
def generate_sequence_structure(self): """Generates a Sequence structure suitable for XML<->EDL conversion :return: Sequence """ import timecode from anima.env.mayaEnv import Maya m = Maya() fps = m.get_fps() # export only the first sequence, ignore others sequencers = self.sequences.get() if len(sequencers) == 0: return None sequencer = sequencers[0] time = pm.PyNode('time1') seq = Sequence() seq.name = str(sequencer.get_sequence_name()) seq.rate = Rate(timebase=str(fps), ntsc=False) seq.timecode = str(timecode.Timecode( framerate=seq.rate.timebase, frames=time.timecodeProductionStart.get() + 1 )) seq.duration = sequencer.duration media = Media() video = Video() media.video = video for shot in sequencer.shots.get(): clip = Clip() clip.id = str(shot.full_shot_name) clip.name = str(shot.full_shot_name) clip.duration = shot.duration + 2 * shot.handle.get() clip.enabled = True clip.start = shot.sequenceStartFrame.get() clip.end = shot.sequenceEndFrame.get() + 1 # clips always start from 0 and includes the shot handle clip.in_ = shot.handle.get() # handle at start clip.out = shot.handle.get() + shot.duration # handle at end clip.type = 'Video' # always video for now f = File() f.name = os.path.splitext( os.path.basename(str(shot.output.get())) )[0] f.duration = shot.duration + 2 * shot.handle.get() f.pathurl = str('file://localhost/%s' % shot.output.get()) clip.file = f track_number = shot.track.get() - 1 # tracks should start from 0 try: track = video.tracks[track_number] except IndexError: track = Track() video.tracks.append(track) track.clips.append(clip) # set video resolution video.width = shot.wResolution.get() video.height = shot.hResolution.get() seq.media = media return seq
sequence_total_duration = file_last['startFrame'] + file_last['duration'] - file_first['startFrame'] # Quickly set up the sequence sequence.set('id', 'AUTOSEQUENCE_SEQUENCE_1') setElem(sequence, 'duration', str(sequence_total_duration)) # FRAMERATE.................. for media in list_of_files: # TEMP ATTRIBUTES DURING TESTING filename = os.path.basename(media['filepath']) filepath = media['filepath'] framerate = media['framerate'] startTC = media['startTC'] duration = media['duration'] startFrame = timecode.Timecode(framerate).toFrames(startTC) print(startFrame) index += 1 masterID = str(index) + '_' + filename masterID_component = masterID + '_COMPONENT_1' # Create a new master clip, blank from the template master_clip_root = ET.fromstring(pr_xml_video_masterclip) # Set its name and other attributes setElem(master_clip_root, 'name', filename) setElem(master_clip_root, 'masterclipid', masterID) master_clip_root.set('id', masterID) setElem(master_clip_root.find('rate'), 'timebase', str(framerate))
import pydpx_meta import timecode import glob files = [] # time code sample files = glob.glob("/root/test/*.dpx") files = sorted(files) print("-------------------") # display time code for file in files: dpx = pydpx_meta.DpxHeader(file) print(dpx.tv_header.time_code) print("-------------------") # increment 1 frame to time code dpx = pydpx_meta.DpxHeader(files[0]) tc = timecode.Timecode(dpx.tv_header.frame_rate_for_tc, dpx.tv_header.time_code) delta_tc = timecode.Timecode(dpx.tv_header.frame_rate_for_tc, "00:00:00:00") for file in files: dpx = pydpx_meta.DpxHeader(file) tc += delta_tc dpx.tv_header.time_code = tc print(dpx.tv_header.time_code)