def getTabSeparatedRecord (self): # return tab separated record theLine = self.event_id + "\t" + self.reel_id + "\t" + self.eventType theLine = theLine + "\t" + self.cutType + "\t" theLine = theLine + self.source_in + "\t" + self.source_out + "\t" theLine = theLine + self.record_in + "\t" + self.record_out + "\t" theLine = theLine + self.slateNumber + "\t" + self.takeNumber + "\t" theLine = theLine + str(Timecode.timeCode2frameCount(self.record_out) - Timecode.timeCode2frameCount(self.record_in)) + newline return (theLine)
def addHeadsAndTails (self, handleLenght, sourceOrRecord): if sourceOrRecord == "source": self.source_in = Timecode.frameCount2timeCode (Timecode.timeCode2frameCount(self.source_in) - handleLenght) self.source_out = Timecode.frameCount2timeCode (Timecode.timeCode2frameCount(self.source_out) + handleLenght) elif sourceOrRecord == "record": self.record_in = Timecode.frameCount2timeCode (Timecode.timeCode2frameCount(self.record_in) - handleLenght) self.record_out = Timecode.frameCount2timeCode (Timecode.timeCode2frameCount(self.record_out) + handleLenght)
def shiftTimeCode (self, shiftAmount, sourceOrRecord): if sourceOrRecord == "source": self.source_in = Timecode.frameCount2timeCode (Timecode.timeCode2frameCount(self.source_in) + shiftAmount) self.source_out = Timecode.frameCount2timeCode (Timecode.timeCode2frameCount(self.source_out) + shiftAmount) elif sourceOrRecord == "record": self.record_in = Timecode.frameCount2timeCode (Timecode.timeCode2frameCount(self.record_in) + shiftAmount) self.record_out = Timecode.frameCount2timeCode (Timecode.timeCode2frameCount(self.record_out) + shiftAmount)
def getTimecodeSync(self, timecode, tcAttrName, attrs, fps, timecodeFps, timecodeMultiplier, offset=0): tcSyncTime = None if timecode is not None and tcAttrName in attrs and attrs[tcAttrName]: tcSyncTime = self.attr('timecode', atLocation=attrs[tcAttrName]) if tcSyncTime is not None: tcSyncValue = Timecode.TCFtoInt(tcSyncTime, fps) try: diff = Timecode.TCSub(tcSyncTime, timecode, timecodeFps) offset += Timecode.TCFtoInt(diff, timecodeFps) * timecodeMultiplier except Exception as e: self.logger.error('Error calculating timecode difference: %s' % str(e)) return tcSyncTime, -1 # import traceback # traceback.print_exc() return tcSyncTime, offset
def _parse_stream(self): while True: strl = self._next_chunk() # search for next LIST/strl chunk if strl.fcc == b"LIST" and strl.sub_fcc == b"strl": break # skip LIST/odml chunks elif strl.fcc == b"LIST" and strl.sub_fcc == b"odml": self._skip_chunk(strl) # back up if anything else is found else: self._put_back(strl) return False self._log.write("Stream definition #{0}".format(len(self._stream_data))) strh = self._require_chunk(b"strh") stream_header = self._read_struct_chunk(strh, StreamHeader) self._log.write("Stream header") self._log.writeobj(stream_header) bitmap_info = None strf = self._require_chunk(b"strf") if stream_header.fccType == b"vids": bitmap_info = self._read_struct_chunk(strf, BitmapInfoHeader) self._log.write("Bitmap info header") self._log.writeobj(bitmap_info) else: self._skip_chunk(strf) codec_data = None c = self._next_chunk() if c.fcc == b"strd": codec_data = self._read_chunk_content(c) self._log.write("Codec data: {0} bytes", len(codec_data)) c = self._next_chunk() stream_name = None if c.fcc == b"strn": stream_name = _from_asciiz(self._read_chunk_content(c)) self._log.write("Stream name: {0!r}", stream_name) else: self._put_back(c) if bitmap_info is not None: vs = InputVideoStream(self) vs.stream_num = len(self._stream_data) vs.width = bitmap_info.Width vs.height = bitmap_info.Height vs.frame_rate = Timecode.interpret_frame_rate( stream_header.Rate / float(stream_header.Scale)) vs.codec = stream_header.fccHandler vs.codec_data = codec_data vs.suggested_buffer_size = stream_header.SuggestedBufferSize vs.bit_depth = bitmap_info.BitCount vs.compression = bitmap_info.Compression vs.size_image = bitmap_info.SizeImage self.video_streams.append(vs) self._stream_data.append(StreamInfo( stream_header, bitmap_info, codec_data, stream_name)) return True
def microseconds_per_frame(self): return round(1e6 / Timecode.interpret_frame_rate(self.frame_rate))
def timecode_to_frame(self, timecode): return Timecode.parse_timecode(timecode, self.frame_rate)
def parseRawEDL2 (edl): """ Another version of parsing a 'raw' EDL object. The edl must be sorted by event for this to work correctly. It store clipname and keyclip name data in the eventobjects """ last_event_index = -1 from_clip = None to_clip = None key_clip = None l = len (edl.eventList) for i in range (l): #for event in edl.eventList: event = edl.eventList [i] if last_event_index != -1 and last_event_index < l: last_event = edl.eventList [last_event_index] else: last_event = AN_EDL_EVENT (None, None, None, None) if debug > 3: print "process event: %s - id in list: %s" % (event.getEventID(), edl.eventList[i].getEventID()) print event.printAsString () for comment in event.commentList: # Check if we have the clipname if re.match (r'\* FROM CLIP NAME', comment): # Get the clipname part from_clip = string.strip (string.split (comment, ':') [1]) if re.match (r'\* TO CLIP NAME', comment): # Get the clipname part to_clip = string.strip (string.split (comment, ':') [1]) # Check if we have the key clip name, with more info if re.match (r'\* KEY CLIP NAME', comment): # Get the name part seperated by a ':' key_clip = string.strip (string.split (comment, ':') [1]) if event.cutType != "C": # Check for a black event - which doesn't need a clipname if not event.blackevent: # Set the to 'to clip' to use as clipname for this event if to_clip: event.setClipName (to_clip) event.setToClipName (to_clip) else: print "Warning: event %s has no 'to clip' to use in comment list %s" % (event.event_id, event.commentList) if event.event_id == last_event.event_id: # Check for a black event - which doesn't need a clipname if not last_event.blackevent: if from_clip: # Set the 'from clip' to use as clipname for the last event last_event.setClipName (from_clip) # Compute correct source out for the last event based on effect duration if debug > 3: print "Source out original: %s" % last_event.getSourceOut () source_out = Timecode.timeCodeAddFrames (last_event.getSourceOut (), event.effectDuration, fps=edl.fps) last_event.setSourceOut (source_out) # Set the 'from clip' for the current event event.setFromClipName (from_clip) else: print "Warning: event %s has no 'from clip' to use in comment list %s" % (event.event_id, event.commentList) else: # We have a regular cut event if from_clip: # Set the 'from clip' to use as clipname event.setClipName (from_clip) else: print "Warning: event %s has no 'from clip' to use in comment list %s" % (event.event_id, event.commentList) # Set processed event to be last_event last_event_index += 1
def parseEDL2object2 (edl_file, filename, shiftEventCount = 0, fillGaps = False, gvg = 0, edlobject = None): """ parseEDL2object """ # Keep track of position in file before each readline last_line = 0 stop = False new_event = None last_event = None # Keep track of lowest and highest record time codes lowestRecordTC = None highestRecordTC = None firstline = TRUE firstevent = TRUE eventindex = 0 #lastEvent = 0 last_event = AN_EDL_EVENT (None, None, None, None) lastRecOut = 0 step = 0 ne = "" le = "" #print "debug: %s" % debug lookfor = r'\d\d\d' if gvg == 1: lookfor = r'\d\d\d\d' # Create an empty edl if edlobject: edl = edlobject else: edl = AN_EDL (filename, "") # Read the header, title and whatever upto first event number while not stop: last_line = edl_file.tell () l = edl_file.readline ().strip () if l.startswith ("TITLE:"): junk, essence = l.split (":") edl.title = essence.strip () elif l.startswith ("FCM:"): junk, essence = l.split (":") edl.fcm = essence.strip () elif re.match (lookfor, l): # found event number - quit parsing header edl_file.seek (last_line) stop = True else: # Addition comments/text in header before first event edl.header.append (l.strip ()) #return edl stop = False # Read the edl body while not stop: last_line = edl_file.tell () l = edl_file.readline () if l == '': break else: l = l.strip() # First look for an event number if re.match (lookfor, l): # We found an event - process this event line # Split the line into separate elements ls = l.split () ne = AN_EDL_EVENT (ls[0], ls[1], ls[2], ls[3]) le = "" # Check for blackevents if ls[1] == "BL": ne.blackevent = True else: ne.blackevent = False if ls[3] == "D": if debug > 2: print "Found dissolve" ne.duration = ls[4] ne.isEffectElement = True ne.effectDuration = int(ls[4]) indexShift = 1 elif ls[3].startswith ("W"): if debug > 2: print "Found wipe" ne.duration = ls[4] ne.isEffectElement = True ne.effectDuration = int(ls[4]) indexShift = 1 elif ls[3] == "K": indexShift = 1 else: indexShift = 0 ne.source_in = ls[4 + indexShift] # Source timecode inpoint ne.source_out = ls[5 + indexShift] # Source timecode outpoint ne.record_in = ls[6 + indexShift] # Record timecode inpoint ne.record_out = ls[7 + indexShift] # Record timecode outpoint if debug > 3: print "Source In: %s Out: %s Record In: %s Out:%s" % (ne.source_in, ne.source_out, ne.record_in, ne.record_out) edl.eventList.append (ne) # Compare Last record out and new record out # For a regular cut the should be equal # If the Last out point is before the current one we have a gap # If the Last out is later - we have overlap and will need to trim if debug > 3: print "Check last and new record out...",ls[6 + indexShift], lastRecOut if firstevent == FALSE: # compare last record out to record in for gaps if Timecode.timeCodeCompare (ls[6 + indexShift], lastRecOut) != 0: # Insert additional in/out for black event # Take last record out for new rec. in and current # record in for new rec. out. # and don't forget to riple the event count if debug > 3: print "Found time gap in record timecode" lastRecOut = ls[7 + indexShift] if firstevent: firstevent = FALSE # Point last event to the just created and filled event object last_event = ne # By using the event object itself, the event index becomes obsolete eventindex = eventindex + 1 ne = "" # Look for a motion effect line elif re.match (r'\M2 ', l): # True if we found a comment if debug > 2: print "Found Motion effect" last_event.hasSpeedChange = True last_event.newFPS = float (l.split ()[2]) # Look for a comment line elif re.match (r'\* ', l): # True if we found a comment if debug > 2: print "Eventindex: %s" % eventindex ei = eventindex - 1 edl.eventList [ei].addCommentLine (l) elif re.match (r'>>> SOURCE', l): sourcereel = A_EDL_SOURCE_ID (ls) edl.sourceReelList.append (sourcereel) if debug > 3: print "Found source reel line", sourcereel.reel_1, sourcereel.reel_2, sourcereel.reelCode else: try: if (le ==""): le = eventindex - 1 edl.eventList [le].commentList.append (l) except: pass return (edl)
def initialise(self, interface, attrs): directory = self.resolvePath(attrs['directory']) if not directory: return False prefix = attrs['prefix'] prefixFilename = self.resolvePath(attrs['prefixFilename']) if prefix and not prefixFilename: return calibration = attrs['calibration'] calibrationFilename = self.resolvePath(attrs['calibrationFilename']) calibrationLocation = self.resolvePath(attrs['calibrationLocation']) if calibration and (not calibrationFilename and not calibrationLocation): return False movieFilenames = [] try: for file in os.listdir(directory): if prefixFilename and not file.startswith(prefixFilename): continue if file.endswith('.avi') or file.endswith( '.mov') or file.endswith('mp4'): movieFilenames.append(os.path.join(directory, file)) except WindowsError as e: self.logger.error('Could not find videos: % s' % str(e)) if not movieFilenames: # TODO: Here we'll have to clear the cameras etc. return False # Windows will produce a wonky order, i.e. 1, 10, 11, .., 2, 3, .. # Use natural sorting to rectify movieFilenames.sort(key=self.alphaNumKey) self.camera_ids = [] self.camera_names = [] self.movies = [] self.mats = [] vheights = [] vwidths = [] timecodes = [] hasTimecode = False useTimecode = attrs['useTimecode'] if 'useTimecode' in attrs else True offset = attrs['offset'] if 'offsets' in attrs and attrs['offsets']: offsets = eval(attrs['offsets']) else: offsets = [offset] * len(movieFilenames) for ci, mf in enumerate(movieFilenames): self.logger.info('Loading MovieReader: %s' % mf) movieData = MovieReader.open_file(mf, audio=False, frame_offset=offsets[ci]) if movieData['vbuffer'] is not None: self.movies.append(movieData) self.timecodeOffsets.append(0) if 'timecode' in movieData and movieData['timecode']: hasTimecode = True timecodes.append(movieData['timecode']) # Make sure we have all the cameras before continuing if len(self.movies) != len(movieFilenames): self.logger.error('Could not load all movies in sequence') return # Make sure we have as many time codes as movies (if we have any) if hasTimecode and len(self.movies) != len(timecodes): self.logger.error('Not all movie files have a time code') return # See if we can get the offsets using the time codes if hasTimecode and useTimecode: print 'Video timecodes:', timecodes fps_all = [round(m['fps']) for m in self.movies] print 'FPS:', fps_all timecodeValues = [ Timecode.TCFtoInt(tc, fps) for tc, fps in zip(timecodes, fps_all) ] tcOrderDesc = [ timecodes.index(tc) for tc in sorted(timecodes, reverse=True) ] # Set the first offset to 0 firstTcIndex = tcOrderDesc[0] self.timecodeOffsets[firstTcIndex] = 0 largestTc = timecodes[firstTcIndex] offsetStartIndex = 1 # We can also get the timecode destination from an incoming location, e.g. 2D detections if 'timecodeLocation' in attrs and attrs['timecodeLocation']: tcSyncTime = interface.attr( 'timecode', atLocation=attrs['timecodeLocation']) if tcSyncTime is not None: tcSyncValue = Timecode.TCFtoInt(tcSyncTime, fps_all[0]) if tcSyncValue < timecodeValues[firstTcIndex]: self.logger.error( 'Sync timecode %s is smaller than video timecodes (%s).' % (tcSyncTime, largestTc)) return largestTc = tcSyncTime offsetStartIndex = 0 self.timecode = largestTc self.logger.info('Setting timecode to: %s' % (largestTc)) # Calculate the offset for each camera to get it up to speed with the target timecode # TODO: Replace hard coded timecode fps and multiplier timecodeFps, timecodeMultiplier = 25., 2. for tcInd in tcOrderDesc[offsetStartIndex:]: diff = Timecode.TCSub(largestTc, timecodes[tcInd], timecodeFps) self.timecodeOffsets[tcInd] = Timecode.TCFtoInt( diff, timecodeFps) * timecodeMultiplier if self.timecodeOffsets: print 'Video timecode offsets:', self.timecodeOffsets self.camera_ids = [ 'Camera %d' % ci for ci in xrange(len(movieFilenames)) ] self.movies = self.movies if not calibrationLocation: calibrationLocation = interface.root() if calibrationFilename or interface.hasAttr( 'mats', atLocation=calibrationLocation): if calibrationFilename: # TODO: Detect filetype, e.g. .cal and .xcp and handle accordingly try: self.mats, rawCalData = OptitrackReader.load_CAL( calibrationFilename) if not self.mats: return False except IOError as e: self.logger.error('Could not load calibration file: %s' % str(e)) return False else: self.mats = interface.attr('mats', atLocation=calibrationLocation) if not self.mats: self.logger.error('Could not find calibration mats: %s' % calibrationLocation) return False else: from GCore import Calibrate for ci, (cid, md) in enumerate(zip(self.camera_ids, self.movies)): if md is not None: self.mats.append( Calibrate.makeUninitialisedMat( ci, (md['vheight'], md['vwidth']))) for md in self.movies: vheights.append(md['vheight']) vwidths.append(md['vwidth']) Ps = interface.getPsFromMats(self.mats) self.attrs = { 'vheight': vheights, 'vwidth': vwidths, 'camera_ids': self.camera_ids, 'Ps': Ps, 'mats': self.mats, 'colour': eval(attrs['colour']) } if self.camera_names: self.attrs['camera_names'] = self.camera_names self.initialised = True return True