Beispiel #1
0
 def getRenderCam(self, shot):
     camera = cmds.shot(shot, q=True,cc=True)
     if camera:
         return cmds.shot(shot, q=True,cc=True)
     else:
         self.error('No camera linked to %s' % shot)
         return 'perspShape'
Beispiel #2
0
    def connectLoc(self, arg):
        locName = mc.ls(sl=True)
        self.listSel = []
        for loc in locName:
            print loc
            gpu_name = loc.split('_loc')
            self.listSel.append(gpu_name[0])

        #self.listSel = mc.ls(selection = True)
        mc.select(clear=True)
        for self.sel in self.listSel:
            self.locName = self.sel + "_loc"
            #mc.cutKey(self.sel, option='keys')
            self.parentCon = mc.parentConstraint(self.locName,
                                                 self.sel,
                                                 maintainOffset=False)  #True
            self.scaleCon = mc.scaleConstraint(self.locName,
                                               self.sel,
                                               maintainOffset=False)
            mc.parent(self.locName, 'ARLoc_Grp')
            for shotSq in mc.sequenceManager(listShots=True):
                self.currentShotStart = mc.shot(shotSq, q=True, st=True)
                self.currentShotEnd = mc.shot(shotSq, q=True, et=True)

                mc.cutKey(self.locName,
                          time=((self.currentShotStart + 1),
                                (self.currentShotEnd - 1)),
                          option='keys')
Beispiel #3
0
    def _applyStartFrameOverride(self):
        """
		If a start frame override has been set, then adjust the sequence start frames
		for all audio and video clips so that the earliest shot/audio that we created
		is at the specified frame.
		"""
        if self.startFrameOverride is None or self.minFrameInFile is None:
            return

        offset = (self.minFrameInFile - self.startFrameOverride)

        for clip in self.clip_dict.itervalues():
            if clip is None:
                # TODO: raise warning here. We failed to create a matching
                # shot/audio node for some reason
                pass
            else:
                type = cmds.nodeType(clip)
                if type == "shot":
                    start = float(cmds.shot(clip, q=1,
                                            sequenceStartTime=1)) - offset
                    end = float(cmds.shot(clip, q=1,
                                          sequenceEndTime=1)) - offset
                    cmds.shot(clip,
                              e=1,
                              sequenceStartTime=start,
                              sequenceEndTime=end)
                elif type == "audio":
                    start = float(cmds.sound(clip, q=1, offset=1)) - offset
                    cmds.sound(clip, e=1, offset=start)
Beispiel #4
0
def shiftSequencer(frame=0, mute=False): 
    """ shift the whole time line """ 
    shots = mc.ls(type='shot')
    infoDict = dict()

    if shots: 
        for shot in shots: 
            sequenceStartFrame = mc.getAttr('%s.sequenceStartFrame' % shot)

            if not sequenceStartFrame in infoDict.keys(): 
                infoDict.update({sequenceStartFrame: [shot]})
            else: 
                infoDict[sequenceStartFrame].append(shot)

        # positive move from last shot
        if frame > 0: 
            shotDict = sorted(infoDict.iteritems())[::-1]

        if frame < 0: 
            shotDict = sorted(infoDict.iteritems())
        
        for key, shots in shotDict: 
            for shotName in shots: 
                shiftShot(shotName, frame=frame)
                mc.shot(shotName, e=True, mute=mute)
Beispiel #5
0
    def connectLoc (self,arg):
        locName = mc.ls(sl=True)
        self.listSel = []
        for loc in locName:
            print loc
            #gpu_name = loc.split('_loc')
            gpuNameSpace = loc.rsplit('_',5)[0] #060117 edit because namespace AD_AR are change
            nameSplit = loc.split(gpuNameSpace)[1].split('_',1)[1].split('_loc')[0]
            gpu_name = '%s:%s'%(gpuNameSpace,nameSplit)
            self.listSel.append(gpu_name)

        #self.listSel = mc.ls(selection = True)

        mc.select(clear=True)
        for self.sel in self.listSel:
            #self.locName = self.sel+"_loc"

            self.locName = self.sel.replace(':','_')+"_loc" #060117 edit because namespace AD_AR are change
            #mc.cutKey(self.sel, option='keys')
            self.parentCon = mc.parentConstraint(self.locName,self.sel,maintainOffset = False) #True
            self.scaleCon = mc.scaleConstraint(self.locName,self.sel,maintainOffset = False)
            mc.parent(self.locName,'ARLoc_Grp')
            for shotSq in mc.sequenceManager(listShots = True):
                self.currentShotStart = mc.shot( shotSq,q=True,st=True)
                self.currentShotEnd = mc.shot (shotSq,q=True,et=True )

                mc.cutKey(self.locName,time=((self.currentShotStart+1), (self.currentShotEnd-1)), option='keys')
Beispiel #6
0
    def _writeTrack(self, seq_elem, isVideo, nodeList, trackNumber):
        """
		Write the video/audio track. nodeList is a list of all shot/audio nodes in
		the track.
		"""
        track_info = {"type": ("audio", "video")[isVideo]}

        numLocked = 0
        numEnabled = 0

        sortedNodeList = nodeList

        if isVideo:
            sortedNodeList.sort(videoClipCompare)
        else:
            sortedNodeList.sort(audioClipCompare)

        for clip in sortedNodeList:
            if isVideo:
                numLocked = numLocked + int(cmds.shot(clip, q=1, lock=1))
                numEnabled = numEnabled + int(not cmds.shot(clip, q=1, mute=1))
            else:
                numEnabled = numEnabled + int(
                    not cmds.sound(clip, q=1, mute=1))

        track_info["locked"] = (numLocked == len(nodeList))
        track_info["enabled"] = (numEnabled == len(nodeList))
        track_info["trackNumber"] = trackNumber

        track_elem = self.translator.writeTrack(seq_elem, track_info)

        for clip in sortedNodeList:
            self._writeClip(track_elem, isVideo, clip)
Beispiel #7
0
    def autoTranfer (self,arg):
        self.listSel = mc.ls(selection = True) 

        if not mc.objExists('ARLoc_Grp'):
                mc.group( em=True, name='ARLoc_Grp')
        self.bakeLoc = []
        self.delPar = []
        self.delScale = []
        self.selList = []
        for self.sel in self.listSel:
            #self.locName = self.sel+"_loc"
            self.locName = self.sel.replace(':','_')+"_loc" #060117 edit because namespace AD_AR are change
            self.loc = mc.spaceLocator(p=(0,0,0),name = self.locName )
            self.parentLoc = mc.parentConstraint(self.sel,self.loc,maintainOffset = False)
            self.scaleLoc = mc.scaleConstraint(self.sel,self.loc,maintainOffset = False) #
            self.bakeLoc.append(self.loc[0]) #because loc is list
            self.delPar.append(self.parentLoc[0]) #because delPar is list
            self.selList.append(self.sel)
            #mc.bakeResults(self.loc,simulation=True,time = (self.timeSliderMin,self.timeSliderMax))
            #mc.delete(self.parentLoc)
            #mc.cutKey(self.sel, option='keys')
            #self.parentCon = mc.parentConstraint(self.loc,self.sel,maintainOffset = True)
            #self.scaleCon = mc.scaleConstraint(self.loc,self.sel,maintainOffset = True)
            #mc.parent(self.loc,'ARLoc_Grp')
            #mc.cutKey(self.loc,time=((self.timeSliderMin+1), (self.timeSliderMax-1)), option='keys')
        
        print self.delPar
        self.animNodes = mc.ls (type='animCurve')
        self.firstKey = mc.findKeyframe(self.animNodes,which='first')
        self.lastKey = mc.findKeyframe(self.animNodes,which='last')

        if self.firstKey < 101:
            self.firstKey = 101

        # isolate viewport for faster baking 
        mayaTools.isolateObj(True) 

        # bake locator 
        mc.bakeResults(self.bakeLoc,simulation=True,time = (self.firstKey,self.lastKey))

        # restore viewport back
        mayaTools.isolateObj(False)

        mc.delete(self.delPar)
        mc.delete(self.delScale)
        mc.cutKey(self.selList, option='keys')
        #return
        for self.sel in self.listSel:
            #self.locName = self.sel+"_loc"
            self.locName = self.sel.replace(':','_')+"_loc" #060117 edit because namespace AD_AR are change
            #mc.cutKey(self.sel, option='keys')
            self.parentCon = mc.parentConstraint(self.locName,self.sel,maintainOffset = False) #True
            self.scaleCon = mc.scaleConstraint(self.locName,self.sel,maintainOffset = False)
            mc.parent(self.locName,'ARLoc_Grp')
            for shotSq in mc.sequenceManager(listShots = True):
                self.currentShotStart = mc.shot( shotSq,q=True,st=True)
                self.currentShotEnd = mc.shot (shotSq,q=True,et=True )

                mc.cutKey(self.locName,time=((self.currentShotStart+1), (self.currentShotEnd-1)), option='keys')
Beispiel #8
0
def get_camera():
    sel = mc.ls(selection=True)
    cam_shot = mc.sequenceManager(currentShot=True, query=True)
    cam_list = []
    cam_panel = []

    if sel:
        for c in sel:
            if mc.listRelatives(c, type='camera'):
                cam_list.append(c)

            elif mc.listRelatives(c, type='nurbsCurve') and mc.listRelatives(
                    c, allDescendents=True, type='camera'):
                cam_shape = mc.listRelatives(c,
                                             allDescendents=True,
                                             type='camera')[0]
                parent_node = mc.listRelatives(cam_shape, parent=True)[0]
                cam_list.append(parent_node)

    if cam_list:
        cam = cam_list[0]

    elif not cam_list and cam_shot:
        if not mc.listRelatives(mc.shot(
                cam_shot, currentCamera=True, query=True),
                                shapes=True):
            cam = mc.listRelatives(mc.shot(cam_shot,
                                           currentCamera=True,
                                           query=True),
                                   parent=True)[0]
        else:
            cam = mc.shot(cam_shot, currentCamera=True, query=True)

    else:
        all_cam = mc.ls(type='camera')
        cam = mc.listRelatives(all_cam[1], parent=True)[0]

    for p in mc.getPanel(type="modelPanel"):
        if mc.modelPanel(p, query=True, camera=True) == cam:
            cam_panel = p

        elif mc.modelPanel(p, query=True,
                           camera=True) == mc.listRelatives(cam,
                                                            shapes=True)[0]:
            cam_panel = p

        else:
            continue

    if not cam_panel:
        mc.warning('No Panel with this camera. Put one and refresh')

    camera = Camera(cam, mc.getAttr(cam + '.focalLength'),
                    mc.getAttr(cam + '.nearClipPlane'), cam_panel)
    return camera
Beispiel #9
0
def bakeSequenceScale(s):

    print "baking shot camera: ", s

    #create shot render cam
    renderCam = mc.shot(s, cc=True, q=True)
    copiedRenderCam = mc.duplicate(renderCam,
                                   name=renderCam + "_baked_" + s,
                                   un=True)[0]
    print "copied render cam for ", s, " : ", copiedRenderCam

    #shot sequence vars
    seq_startTime = mc.shot(s, sst=True, q=True)
    seq_endTime = mc.shot(s, set=True, q=True)
    seq_duration = mc.shot(s, sequenceDuration=True, q=True)

    #get shot info
    #this assumes start time is never subframe
    startTime = mc.shot(s, st=True, q=True)
    #get actual end time, api endtime doesnt return subframe.
    mc.sequenceManager(currentTime=seq_endTime)
    endTime = mc.currentTime(q=True)

    print renderCam, ":"
    print "camera time:", startTime, "=>", endTime

    #set current time to start time
    mc.setKeyframe(copiedRenderCam, hi="both", t=startTime)
    print "Created initial keys"
    mc.setKeyframe(copiedRenderCam, hi="both", t=endTime)
    print "Created ending keys"

    #remove any keyframes that's not in this frame range
    print "cleanup keyframes."
    mc.cutKey(copiedRenderCam,
              clear=True,
              time=(":" + str(startTime - 0.01), ),
              option="keys")
    mc.cutKey(copiedRenderCam,
              clear=True,
              time=(str(endTime + 0.01) + ":", ),
              option="keys")

    #set end time to scale
    scaleEndTime = startTime + seq_duration - 1

    print "scaling to: ", startTime, " => ", scaleEndTime

    mc.scaleKey(copiedRenderCam,
                time=(startTime, endTime),
                newStartTime=startTime,
                newEndTime=scaleEndTime,
                timePivot=0)

    return copiedRenderCam
Beispiel #10
0
def updateShotAttrs(dsShot,task,seqID,seqName,projName,epiName):
    dsTask = task
    startVal = cmds.shot(dsShot, q=True, st=True)
    endVal = cmds.shot(dsShot, q=True, et=True)
    shotDur = cmds.shot(dsShot,q=True,sd=True)
    camVal = cmds.shot(dsShot,q=True,cc=True)

    ## TALK TO SHOTGUN ##
    seqObj = sgTools.sgGetSequence(seqName,projName,epiName,['shots','id'])
    shotObj = seqObj['shots']

    """ remove name spaces"""
    if re.search(":",dsShot):
        sSplit = dsShot.split(":")
        dsShot = sSplit[-1]

    for shot in shotObj:

        if str(shot['name']) == str(dsShot):
            shotID = shot['id']
            break
        else:
            shotID = 0

    ## TALK TO SHOTGUN ##
    shotTasks = sgTools.sgGetShotTasks(shotID,["step","content"])

    for t in shotTasks:
        if str(t['step']['name']) == str(task):
            shotTaskID = t['id']
            taskID = t['step']['id']
            break
        else:
            shotTaskID = 000000

    ## TALK TO SHOTGUN ##
    try:
        userOBJ = sgTools.sgGetTask(shotTaskID,['task_assignees','entity'])
        user =  userOBJ['task_assignees'][0]['name']
        version = "v000"
    except:
        user = "******"
        version = "none"    
    try:
        cmds.setAttr('dsMetaData.'+str(dsShot)+'_shotID',shotID)
        cmds.setAttr('dsMetaData.'+str(dsShot)+'_shotStart',startVal)
        cmds.setAttr('dsMetaData.'+str(dsShot)+'_shotEnd',endVal)
        cmds.setAttr('dsMetaData.'+str(dsShot)+'_shotTaskName',dsTask,type="string")
        cmds.setAttr('dsMetaData.'+str(dsShot)+'_shotTaskID',shotTaskID)
        cmds.setAttr('dsMetaData.'+str(dsShot)+'_User',user,type="string")
    except:
        pass
Beispiel #11
0
def _build_shot(item, track_no, track_range, existing_shot=None):
    camera = None
    if existing_shot is None:
        camera = cmds.camera(name=item.name.split('.')[0] + '_cam')[0]
    cmds.shot(existing_shot or item.name.split('.')[0],
              e=existing_shot is not None,
              shotName=item.name,
              track=track_no,
              currentCamera=camera,
              startTime=item.trimmed_range().start_time.value,
              endTime=item.trimmed_range().end_time_inclusive().value,
              sequenceStartTime=track_range.start_time.value,
              sequenceEndTime=track_range.end_time_inclusive().value)
Beispiel #12
0
def clear_sequencer_track_shots(index):
    """
    Remove all shots found on the corresponding to the given index
    :param int index:
    """
    shots = [
        shot for shot in mc.ls(type="shot")
        if mc.getAttr(shot + ".track") == index
    ]
    for shot in shots:
        mc.shot(shot, edit=True, lock=False)
    mc.delete(shots)
    mc.shotTrack(removeTrack=index)
Beispiel #13
0
def createCamSequencer(shotData, handle=25):
    i = 0
    for shot in shotData:
        cutIn = shot['sg_cut_in']
        cutOut = shot['sg_cut_out']
        if handle:
            offset = i * handle
            print cutIn
            print cutOut
            cutIn = cutIn + offset
            cutOut = cutOut + offset
            i = i + 1
        print shot['cam_name']
        cmds.shot(shot['code'], startTime=cutIn, endTime=cutOut, sequenceStartTime=cutIn, sequenceEndTime=cutOut, currentCamera=shot['cam_name'])
Beispiel #14
0
 def do_create_sequence(self, shot_name):
     camera_name = 'cam_%s' % shot_name
     shot_name = 'shot_%s' % shot_name
     if mc.objExists(camera_name):
         QMessageBox.information(self, 'error', '%s exists' % camera_name)
         return
     new_cam = mc.camera()
     mc.setAttr(new_cam[1] + '.filmFit', 1)
     mc.parent(new_cam[0], 'camera')
     mc.rename(mc.ls(sl=True)[0], camera_name)
     mc.rename(
         mc.listRelatives(mc.ls(sl=True)[0], c=1, shapes=1)[0],
         camera_name + 'Shape')
     mc.shot(shot_name, currentCamera=camera_name)
def _build_shot(item, track_no, track_range, existing_shot=None):
    camera = None
    if existing_shot is None:
        camera = cmds.camera(name=item.name.split('.')[0] + '_cam')[0]
    cmds.shot(
        existing_shot or item.name.split('.')[0],
        e=existing_shot is not None,
        shotName=item.name,
        track=track_no,
        currentCamera=camera,
        startTime=item.trimmed_range().start_time.value,
        endTime=item.trimmed_range().end_time_inclusive().value,
        sequenceStartTime=track_range.start_time.value,
        sequenceEndTime=track_range.end_time_inclusive().value
    )
Beispiel #16
0
def _video_url_for_shot(shot):
    current_file = os.path.normpath(cmds.file(q=True, sn=True))
    return os.path.join(
        os.path.dirname(current_file), 'playblasts',
        '{base_name}_{shot_name}.mov'.format(
            base_name=os.path.basename(os.path.splitext(current_file)[0]),
            shot_name=cmds.shot(shot, q=True, shotName=True)))
Beispiel #17
0
 def set_ae_view(self, index):
     if index.isValid():
         node = self.dependency_model.invisibleRootItem()[index.row()]
         idx = self.ae_view.camera_combo.findText(str(cmds.shot(node.nodeName(), q=True, cc=True)) + 'Shape')
         self.ae_view.camera_combo.setCurrentIndex(idx)
         pm.select(node)
     self.data_mapper.setCurrentModelIndex(index)
Beispiel #18
0
def split_shot(shot, frame, padding=0, name=None):
    """
    Split shot at given frame. Adapt animation and other shot if padding is set.
    :param str shot: representing maya shot node.
    :param float frame: split time.
    :param float padding: time range to add between the split in maya timeline.
    :rtype: str representing the maya shot node created.
    """
    start_frame = cmds.getAttr(shot + ".startFrame")
    end_frame = cmds.getAttr(shot + ".endFrame")
    if not start_frame < frame < end_frame:
        raise ValueError("{} not in shot {}".format(frame, shot))
    # Split the shot.
    old_sequence_end_frame = cmds.getAttr(shot + ".sequenceEndFrame")
    cmds.setAttr(shot + ".endFrame", frame - 1)
    sequence_end_frame = cmds.getAttr(shot + ".sequenceEndFrame")
    new_shot = cmds.shot(name,
                         shotName=cmds.getAttr(shot + ".shotName"),
                         startTime=frame + 1,
                         endTime=end_frame + 1,
                         sequenceStartTime=sequence_end_frame + 1,
                         sequenceEndTime=old_sequence_end_frame)
    # Deal with shot padding.
    if not padding:
        return new_shot
    curves = cmds.ls(type=ANIMATION_CURVES_TYPES)
    if curves:
        hold_animation_curves(curves, frame, padding)
    to_shift = filter_locked_shots(cmds.ls(type="shot"))
    shift_shots(to_shift, padding, after=frame)
    return new_shot
Beispiel #19
0
 def getShots(self):
     list = []
     for shot in cmds.ls(type="shot"):
         if re.match("(s[0-9][0-9][0-9][0-9])",shot):
             self.shList.addItem(shot)
             list.append(shot)
         else:
             item = QtGui.QListWidgetItem(shot)
             item.setFlags(QtCore.Qt.NoItemFlags)
             self.shList.addItem(item)
             list.append(shot)
     if list:
         if not re.match("(s[0-9][0-9][0-9][0-9])",shot):
             for shot in list:
                 self.error('Shot: %s is not named correctly.' % shot)
             self.error('\nCouldnt find any shots with the correct name standard: s0000')
             self.error('Please make sure that you have named your shots correctly\n')
     
         """for i in range(self.shList.count()):
             item = self.shList.item(i)
             print str(item.flags())"""
     else:
         st = cmds.getAttr("defaultRenderGlobals.startFrame")
         et = cmds.getAttr("defaultRenderGlobals.endFrame")
         shot = cmds.shot('s0010', sst=st, set=et, cc='persp')
         self.shList.addItem('s0010')
Beispiel #20
0
def filter_locked_shots(shots):
    """
    Filter out all shots locked.
    :param list[str] shots: Maya shot nodes.
    :rtype: list[str]
    :return: Maya shot nodes.
    """
    return [s for s in shots if not cmds.shot(s, query=True, lock=True)]
Beispiel #21
0
def _read_track(shots):
    v = otio.schema.Track(kind=otio.schema.track.TrackKind.Video)

    last_clip_end = 0
    for shot in shots:
        seq_start = int(cmds.shot(shot, q=True, sequenceStartTime=True))
        seq_end = int(cmds.shot(shot, q=True, sequenceEndTime=True))

        # add gap if necessary
        fill_time = seq_start - last_clip_end
        last_clip_end = seq_end + 1
        if fill_time:
            v.append(_get_gap(fill_time))

        # add clip
        v.append(_read_shot(shot))

    return v
def _read_track(shots):
    v = otio.schema.Track(kind=otio.schema.track.TrackKind.Video)

    last_clip_end = 0
    for shot in shots:
        seq_start = int(cmds.shot(shot, q=True, sequenceStartTime=True))
        seq_end = int(cmds.shot(shot, q=True, sequenceEndTime=True))

        # add gap if necessary
        fill_time = seq_start - last_clip_end
        last_clip_end = seq_end + 1
        if fill_time:
            v.append(_get_gap(fill_time))

        # add clip
        v.append(_read_shot(shot))

    return v
Beispiel #23
0
def _read_shot(shot):
    rate = FPS.get(cmds.currentUnit(q=True, time=True), 25)
    start = int(cmds.shot(shot, q=True, startTime=True))
    end = int(cmds.shot(shot, q=True, endTime=True)) + 1

    video_reference = otio.schema.ExternalReference(
        target_url=_video_url_for_shot(shot),
        available_range=otio.opentime.TimeRange(
            otio.opentime.RationalTime(value=start, rate=rate),
            otio.opentime.RationalTime(value=end - start, rate=rate)))

    return otio.schema.Clip(name=cmds.shot(shot, q=True, shotName=True),
                            media_reference=video_reference,
                            source_range=otio.opentime.TimeRange(
                                otio.opentime.RationalTime(value=start,
                                                           rate=rate),
                                otio.opentime.RationalTime(value=end - start,
                                                           rate=rate)))
Beispiel #24
0
def list_used_sequencer_track_indexes():
    """
    List all the camera sequencer tracks indexes containing at least one shot.
    :rtype: list[int]
    """
    return list(
        set(
            mc.shot(shot, track=True, query=True)
            for shot in mc.ls(type="shot")))
Beispiel #25
0
def camDict():
    list = []
    seqShots = cmds.sequenceManager(listShots=True)
    
    for each in seqShots:
        startFrame = cmds.getAttr(each+".startFrame")
        endFrame = cmds.getAttr(each+".endFrame")
        dict = {'shot': each, 'camera' : cmds.shot(each, q=True, cc = True), 'startFrame': int(startFrame), 'endFrame': int(endFrame)};
        list.append(dict)
    return list
Beispiel #26
0
def correct_cam(shotName):
    cam_name = shotName + '_cam'
    st = 0
    et = 0

    if mc.shot(shotName, q=True ,currentCamera=True) == cam_name:
        st = mc.getAttr('%s.startFrame' %(shotName))
        et = mc.getAttr('%s.endFrame' %(shotName))

    return cam_name,st,et
Beispiel #27
0
    def _writeClip(self, track_elem, isVideo, clip):

        # Note: we'll make the clip name and id the same as the maya node-name
        # used for linking of audio and video
        clip_info = {"name": clip, "id": clip}

        if isVideo:
            seqStartTime = cmds.shot(clip, q=1, sequenceStartTime=1)
            clip_info["duration"] = cmds.shot(clip, q=1, sequenceDuration=1)
            clip_info["start"] = seqStartTime

            # Final frame in editorial is always exclusive (bug 342715), but it
            # is not in Maya, so we must add 1 at the end here when exporting
            seqEndTime = cmds.shot(clip, q=1, sequenceEndTime=1) + 1
            clip_info["end"] = seqEndTime
            clip_info["enabled"] = not cmds.shot(clip, q=1, mute=1)

            inTime = cmds.shot(clip, q=1, clipZeroOffset=1)
            clip_info["in"] = inTime
            clip_info["out"] = inTime + seqEndTime - seqStartTime

            # TODO: pre/post hold

        else:
            seqOffset = cmds.sound(clip, q=1, offset=1)
            silence = float(cmds.getAttr((clip + ".silence")))
            mediaIn = cmds.sound(clip, q=1, sourceStart=1)
            mediaOut = cmds.sound(clip, q=1, sourceEnd=1)
            clip_info["start"] = (seqOffset + silence)
            clip_info["end"] = seqOffset + silence + mediaOut - mediaIn
            clip_info["in"] = mediaIn
            clip_info["out"] = mediaOut
            clip_info["duration"] = mediaOut - mediaIn
            clip_info["enabled"] = not cmds.sound(clip, q=1, mute=1)

        clip_elem = self.translator.writeClip(track_elem, clip_info)

        # Note: we won't be able to open this up unless we have a file.
        # So even if there's no file, create a dummy one
        file_info = {"name": clip}

        if isVideo:
            imagePlane = cmds.shot(clip, q=1, clip=1)
            if imagePlane is not None:
                try:
                    # Using Maya API to get absolute path for the image plane media
                    node = _nameToNode(imagePlane)
                    file_info["pathurl"] = OpenMayaRender.MRenderUtil(
                    ).exactImagePlaneFileName(node)
                except:
                    file_info["pathurl"] = cmds.getAttr(imagePlane +
                                                        ".imageName")
                file_info["duration"] = cmds.shot(clip, q=1, clipDuration=1)
        else:
            file_info["pathurl"] = cmds.sound(clip, q=1, file=1)
            file_info["duration"] = cmds.sound(clip, q=1, length=1)

        file_elem = self.translator.writeFile(clip_elem, file_info)
def _video_url_for_shot(shot):
    current_file = os.path.normpath(cmds.file(q=True, sn=True))
    return os.path.join(
        os.path.dirname(current_file),
        'playblasts',
        '{base_name}_{shot_name}.mov'.format(
            base_name=os.path.basename(os.path.splitext(current_file)[0]),
            shot_name=cmds.shot(shot, q=True, shotName=True)
        )
    )
Beispiel #29
0
 def setData(self, index, value, role=QtCore.Qt.EditRole):
     if not index.isValid():
         return False
     status = False
     node = self.__node_list[index.row()]
     if role == QtCore.Qt.EditRole:
         try:
             if index.column() == 0:
                 node.rename(value)
             elif index.column() == 1:
                 node.startFrame.set(value)
             elif index.column() == 2:
                 node.endFrame.set(value)
             elif index.column() == 3:
                 cmds.shot(node.nodeName(), e=True, cc=value)
             status = True
         except Exception as e:
             print e.message
             status = False
     return status
Beispiel #30
0
def shiftShot(shotName):
    """ split only shotName """
    # assume that startFrame and sequenceStartFrame are the same
    startFrame = mc.getAttr('%s.startFrame' % shotName)
    endFrame = mc.getAttr('%s.endFrame' % shotName)
    duration = endFrame - startFrame + 1
    shiftFrameCount = config.shotStartFrame - startFrame

    if not shiftFrameCount == 0:
        maya_utils.shiftKey(frame=shiftFrameCount)
        logger.info('Shift key %s frames success' % shiftFrameCount)

        maya_utils.shiftSequencer(frame=shiftFrameCount, mute=True)
        logger.info('Shift sequencer %s frames success' % shiftFrameCount)
        mc.shot(shotName, e=True, mute=False)

        endRange = config.shotStartFrame + duration - 1
        mc.playbackOptions(min=config.shotStartFrame, max=endRange)
        mc.playbackOptions(ast=config.shotStartFrame, aet=endRange)
        logger.info('Set range to %s-%s' % (config.shotStartFrame, endRange))
		def getStereoCams(sht):
			leftCam = ""
			rightCam = ""
			prevCamShape = cmds.shot(sht,q=True,cc=True)
			prevCam = cmds.listRelatives(prevCamShape,p=True)
			prevCamParent = cmds.listRelatives(prevCam,p=True)
			for obj in cmds.listRelatives(prevCamParent):
				if cmds.objectType(obj) == 'stereoRigTransform':
					leftCam = str(cmds.listConnections(obj+".leftCam",source=True)[0])
					rightCam = str(cmds.listConnections(obj+".rightCam",source=True)[0])
			return[leftCam,rightCam]
Beispiel #32
0
def cleanCam(list):
    #Dupplicate, group, unlock, parentConstrain cameras
    for each in list:
        dict = each
        cam = dict['camera']
        cam = re.sub(r'Shape','',cam)
        start = dict['startFrame']
        end = dict['endFrame']
        shotName = dict['shot']
        newCam = cam+"_Clean"
        newCamShape = cam+"_CleanShape"
        cmds.duplicate(cam,n = newCam,rc=True)
        cmds.parent(newCam,"exportCam_Grp") 
        cmds.setAttr(newCam+".tx", lock=False )
        cmds.setAttr(newCam+".ty", lock=False )
        cmds.setAttr(newCam+".tz", lock=False )
        cmds.setAttr(newCam+".rx", lock=False )
        cmds.setAttr(newCam+".ry", lock=False )
        cmds.setAttr(newCam+".rz", lock=False )
        cmds.setAttr(newCamShape+".hfa", lock=False )
        cmds.setAttr(newCamShape+".vfa", lock=False )
        cmds.setAttr(newCamShape+".fl", lock=False )
        cmds.setAttr(newCamShape+".lsr", lock=False )
        cmds.setAttr(newCamShape+".fs", lock=False )
        cmds.setAttr(newCamShape+".fd", lock=False )
        cmds.setAttr(newCamShape+".sa", lock=False )
        cmds.setAttr(newCamShape+".coi", lock=False )
        cmds.connectAttr( cam+".hfa", newCamShape+".hfa" )
        cmds.connectAttr( cam+".vfa", newCamShape+".vfa" )
        cmds.connectAttr( cam+".fl", newCamShape+".fl" )
        cmds.connectAttr( cam+".lsr", newCamShape+".lsr" )
        cmds.connectAttr( cam+".fs", newCamShape+".fs" )
        cmds.connectAttr( cam+".fd", newCamShape+".fd" )
        cmds.connectAttr( cam+".sa", newCamShape+".sa" )
        cmds.connectAttr( cam+".coi", newCamShape+".coi" )
        cmds.parentConstraint( cam, newCam )
        cmds.bakeResults( newCam, t=(start,end), sb=1, at=["tx","ty","tz","rx","ry","rz"])
        cmds.bakeResults( newCamShape, t=(start,end), sb=1, at=["hfa","vfa","fl","lsr","fs","fd","sa","coi"])
        #remove parentContrain
        cmds.delete(newCam+"_parentConstraint1")
        cmds.shot(shotName, e=True, cc = newCam )
def _read_shot(shot):
    rate = FPS.get(cmds.currentUnit(q=True, time=True), 25)
    start = int(cmds.shot(shot, q=True, startTime=True))
    end = int(cmds.shot(shot, q=True, endTime=True)) + 1

    video_reference = otio.schema.ExternalReference(
        target_url=_video_url_for_shot(shot),
        available_range=otio.opentime.TimeRange(
            otio.opentime.RationalTime(value=start, rate=rate),
            otio.opentime.RationalTime(value=end - start, rate=rate)
        )
    )

    return otio.schema.Clip(
        name=cmds.shot(shot, q=True, shotName=True),
        media_reference=video_reference,
        source_range=otio.opentime.TimeRange(
            otio.opentime.RationalTime(value=start, rate=rate),
            otio.opentime.RationalTime(value=end - start, rate=rate)
        )
    )
Beispiel #34
0
	def getSequencerData(self):
		#List to be populated and then returned
		sequencerData = []
		#Store shot names as a list
		sequencerVar = ma.sequenceManager(lsh = True)
		#In case of there being no data to pull, return empty list
		try:
			#For each shot on the list, retrieve its information
			for sVT in sequencerVar:
				#tempList for storing data
				sVTListTemp = []
				#append cameraName, frameStart and frameEnd
				sVTListTemp.append(str(ma.shot(sVT, q = True, cc = True)))
				sVTListTemp.append(int(ma.shot(sVT, q = True, st = True)))
				sVTListTemp.append(int(ma.shot(sVT, q = True, et = True)))
				#append information to shotPulledList
				sequencerData.append(sVTListTemp)
				#once all shot info has been stored within shotPulledList return
			return sequencerData
		except TypeError:
			return sequencerData
Beispiel #35
0
def shift_shots(shots, offset, before=None, after=None):
    """
    Shift given shot in the maya timeline by the offset.
    :param list|str shots: representing maya shot nodes.
    :param float offset:
    :param float before: filter shots set before given time in the maya timeline.
    :param float after: filter shots set after given time in the maya timeline
    """
    if not offset:
        return
    if before:
        shots = [s for s in shots if cmds.getAttr(s + ".startFrame") < before]
    if after:
        shots = [s for s in shots if cmds.getAttr(s + ".endFrame") > after]
    if offset > 0:
        shots = reversed(shots)

    for shot in shots:
        start_frame = cmds.getAttr(shot + ".startFrame") + offset
        end_frame = cmds.getAttr(shot + ".endFrame") + offset
        cmds.shot(shot, edit=True, startTime=start_frame, endTime=end_frame)
Beispiel #36
0
    def _applyLinks(self):
        """
		Read the link information and associate audio clips to shots
		"""

        # Link info is a list of dictionaries
        link_info = self.translator.getLinks()
        for link in link_info:
            # note: Maya currently only supports linking of one
            # audio clip to one video clip.
            audio = None
            video = None
            for clip in link.itervalues():
                mayaNode = self.clip_dict.get(clip, None)
                if mayaNode is not None:
                    type = cmds.nodeType(mayaNode)
                    if type == "shot":
                        video = mayaNode
                    elif type == "audio":
                        audio = mayaNode
            if audio is not None and video is not None:
                cmds.shot(video, e=1, linkAudio=audio)
Beispiel #37
0
def playbackToShot():
    # takes plaback range and filename in this format q####_anim_hero.ma and creates shot with current camera
    shotNodeList = []
    shotNodeList_tmp = cmds.ls(type="shot")

    filePath = cmds.file(q=True,l=True)[0]
    fileSplit = filePath.split("/")
    nameSplit = fileSplit[-1].split("_")
    dsShot = nameSplit[0]

    for s in shotNodeList_tmp:
        if re.search("s[0-9][0-9][0-9][0-9]",s):
            shotNodeList.append(s)

    if shotNodeList == []:
        startVal = int(cmds.playbackOptions(q=True,minTime=True))
        endVal = int(cmds.playbackOptions(q=True,maxTime=True))
        pbPanel = cmds.getPanel( withFocus=True )
        activeCam = cmds.modelEditor(pbPanel,q=True,cam=True)
        myShot = cmds.shot(dsShot, st=startVal, et=endVal)
        cmds.shot(myShot, e=True, sst=startVal, set=endVal)
        cmds.shot(myShot, e=True, cc=activeCam)
		def setAudioToCorrectPath():
			scenePath = cmds.file(q=True,sceneName=True)
			scene_template = tk.template_from_path(scenePath)
			flds = scene_template.get_fields(scenePath)
			audio_template = tk.templates["shot_published_audio"]

			tank = sgtk.tank_from_entity('Project', 66)

			allShots = cmds.ls(type="shot")
			allAudio = cmds.ls(type="audio")
			reportList = []
			returnList = []
			for seqShot in allShots:
				audio = cmds.shot(seqShot,q=True,audio=True)
				audioFile = cmds.getAttr(audio+".filename")# "W:/RTS/1_PREPROD/13_ANIMATIC/q340/splitshots/wav new 01/q340_s260_snd_v001.wav";
				#print audioFile
				flds['Shot'] = flds['Sequence']+"_"+seqShot
				audioOutputFile = audio_template.apply_fields(flds)
				#audioOutputPath = str.replace(str(audioOutputPath),"\\","/")
				#print audioFile
				audioFile = str.replace(str(audioFile),"Z:/Richard The Stork","W:/RTS")
				audioOutputPath = str.rsplit(str(audioOutputFile),"\\",1)[0]
				print audioOutputPath
				if os.path.exists(audioOutputPath):
					audioOutputFile = findLastVersion(audioOutputPath,True,True)
					if audioOutputFile != 0:
						newAudio = str.rsplit(audioOutputFile,"/",1)[-1]
						newAudio = str.split(newAudio,".")[0]
						print newAudio
						cmds.delete(audio)
						ref = cmds.file( audioOutputFile, r=True, type="audio",mergeNamespacesOnClash=False, namespace="audio")
						#
						offset = cmds.getAttr(seqShot+".sequenceStartFrame")
						cmds.setAttr(newAudio+".offset", offset)
						cmds.connectAttr(newAudio+".message", seqShot+".audio")
						
						shotEnd =  cmds.getAttr(seqShot +".sequenceEndFrame")
						audioEnd = cmds.getAttr(newAudio+".endFrame")
						if audioEnd < shotEnd:
							reportList += [newAudio + "  is shorter than shot !!!"]
						if audioEnd > shotEnd:
							reportList += [newAudio + "  was longer than shot. now cut to match!!!"]
							cmds.setAttr(newAudio+".endFrame",shotEnd+1)

						returnList += [newAudio]
				else:
					print "skipped ", audio
			for report in reportList:
				print report
			return returnList
Beispiel #39
0
    def setPopup(self):
        self.CamPopupCombo.clear()
        row = self.shList.currentRow()
        data = self.shList.item(row)
        shot = data.text()
        self.priotyCombo.setCurrentIndex(0)
        shotNew = shot.split("_")
        sName = str(shotNew[0])
        startVal = cmds.shot(sName, q=True, st=True)
        endVal = cmds.shot(sName, q=True, et=True)
        self.StartFrameInput.setText(str(startVal))
        self.EndFrameInput.setText(str(endVal))
        camShot = cmds.shot(sName,q=True,cc=True)
        self.CamPopupCombo.addItem(camShot)
        
        
        self.renderer_LE.setText(str(cmds.getAttr("defaultRenderGlobals.ren")))
        self.frameExt_LE.setText(str(self.getExtension()))

        camList = cmds.ls(type="camera")
        for cam in camList:
            if not cam == camShot and cam not in ['frontShape', 'sideShape', 'perspShape', 'topShape']:
                self.CamPopupCombo.addItem(cam)
Beispiel #40
0
 def getStereoCams(sht):
     leftCam = ""
     rightCam = ""
     prevCamShape = cmds.shot(sht, q=True, cc=True)
     prevCam = cmds.listRelatives(prevCamShape, p=True)
     prevCamParent = cmds.listRelatives(prevCam, p=True)
     for obj in cmds.listRelatives(prevCamParent):
         if cmds.objectType(obj) == 'stereoRigTransform':
             leftCam = str(
                 cmds.listConnections(obj + ".leftCam", source=True)[0])
             rightCam = str(
                 cmds.listConnections(obj + ".rightCam",
                                      source=True)[0])
     return [leftCam, rightCam]
Beispiel #41
0
def camSeqRender(sh):
    allshots = mc.sequenceManager(lsh=True)
    print allshots
    for rs in allshots:
        if rs == sh:
            s=rs

    print s
    
    renderCam = mc.shot(s, cc=True, q=True)
    startTime = mc.shot(s, st=True, q=True)
    endTime = mc.shot(s, et=True, q=True)
    
    #get it for nuke 
    seq_startTime = mc.shot(s, sst=True, q=True)
    seq_endTime = mc.shot(s, set=True, q=True)
    
    offsetTime = int(seq_startTime-startTime)
    
    print s, startTime, endTime,"(",seq_startTime, seq_endTime, ")", renderCam

    print "start rendering: ", renderCam
    
    camTransform = mc.listRelatives(renderCam, p=True)[0]

    #mel.eval("lookThroughModelPanel %s modelPanel4;"%(renderCam))

    mel.eval('currentTime %s ;'%(startTime))
    while(startTime <= endTime):
        mel.eval('renderWindowRender redoPreviousRender renderView;')
        startTime += 1
        mel.eval('currentTime %s ;'%(startTime))
    
    callString = "mv /X/projects/luna/SHOTS/"+os.getenv("SHOT")+"/chichang/images/elements/tmp/ /X/projects/luna/SHOTS/"+os.getenv("SHOT")+"/chichang/images/elements/"+camTransform+"_"+str(offsetTime)+"/"
    mycmd=subprocess.Popen(callString, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
    output, error=mycmd.communicate()
Beispiel #42
0
def read_sequence():
    rate = FPS.get(cmds.currentUnit(q=True, time=True), 25)
    shots = cmds.ls(type='shot') or []
    per_track = {}

    for shot in shots:
        track_no = cmds.shot(shot, q=True, track=True)
        if track_no not in per_track:
            per_track[track_no] = []
        per_track[track_no].append(shot)

    timeline = otio.schema.Timeline()
    timeline.global_start_time = otio.opentime.RationalTime(0, rate)

    for track_no in reversed(sorted(per_track.keys())):
        track_shots = per_track[track_no]
        timeline.tracks.append(_read_track(track_shots))

    return timeline
def read_sequence():
    rate = FPS.get(cmds.currentUnit(q=True, time=True), 25)
    shots = cmds.ls(type='shot') or []
    per_track = {}

    for shot in shots:
        track_no = cmds.shot(shot, q=True, track=True)
        if track_no not in per_track:
            per_track[track_no] = []
        per_track[track_no].append(shot)

    timeline = otio.schema.Timeline()
    timeline.global_start_time = otio.opentime.RationalTime(0, rate)

    for track_no in reversed(sorted(per_track.keys())):
        track_shots = per_track[track_no]
        timeline.tracks.append(_read_track(track_shots))

    return timeline
Beispiel #44
0
def run(pre_roll=0, ao=True):

    movpaths = []
    shot_selected = cmds.ls(sl=True, type='shot')

    shot_list = shot_selected if shot_selected else cmds.sequenceManager(listShots=True)

    for shot_node in shot_list:
        start = cmds.getAttr(shot_node + '.sequenceStartFrame') - pre_roll
        end   = cmds.getAttr(shot_node + '.sequenceEndFrame') + (1 if pre_roll else 0)
        camera = cmds.shot(shot_node, q=True, currentCamera=True)

        print shot_node, start, end, camera, ao
        cmds.currentTime(start, e=True)
        movie_path,version_file = playblastControl.setPlayblast(shot_node, start, end, camera, AOswitch=ao)

        if os.path.exists(movie_path):
            movpaths.append([movie_path,version_file])

    return movpaths
Beispiel #45
0
def GetShotDuration():
    dsShot = str(os.getenv('SHOT'))
    ShotDuration = cmds.shot(str(dsShot), q=True, sd=True)
    return ShotDuration
Beispiel #46
0
 def createNewShot(self,s,e,name,cam):
     cmds.shot(name,st=s,et=e,sst=s,set=e,cc=cam,shotName=name)
Beispiel #47
0
def shot(*args, **kwargs):
    res = cmds.shot(*args, **kwargs)
    if not kwargs.get('query', kwargs.get('q', False)):
        res = _factories.maybeConvert(res, _general.PyNode)
    return res
    def execute(self, **kwargs):
        """
        Main hook entry point
        :returns:       A list of any items that were found to be published.  
                        Each item in the list should be a dictionary containing 
                        the following keys:
                        {
                            type:   String
                                    This should match a scene_item_type defined in
                                    one of the outputs in the configuration and is 
                                    used to determine the outputs that should be 
                                    published for the item
                                    
                            name:   String
                                    Name to use for the item in the UI
                            
                            description:    String
                                            Description of the item to use in the UI
                                            
                            selected:       Bool
                                            Initial selected state of item in the UI.  
                                            Items are selected by default.
                                            
                            required:       Bool
                                            Required state of item in the UI.  If True then
                                            item will not be deselectable.  Items are not
                                            required by default.
                                            
                            other_params:   Dictionary
                                            Optional dictionary that will be passed to the
                                            pre-publish and publish hooks
                        }
        """

        def isCamSelected(shotName):
            camList = []
            selectetShots = []
            for cam in cmds.ls(sl=True):
                par = cmds.listRelatives(cam, parent=True, fullPath=True)
                if par != None:
                    cam = str.split(str(par[0]), "|")[1]
                if cam not in camList:
                    camList += [cam]
                    noNamespace = str.split(str(cam), ":")[0]
                    selectetShots += [str.split(noNamespace, "_")[-1]]
            result = False
            if shotName in selectetShots:
                result = True
            return result

        items = []

        # get the main scene:
        scene_name = cmds.file(query=True, sn=True)
        if not scene_name:
            raise TankError("Please Save your file before Publishing")

        scene_path = os.path.abspath(scene_name)
        name = os.path.basename(scene_path)

        # create the primary item - this will match the primary output 'scene_item_type':
        items.append({"type": "work_file", "name": name})

        tk = self.parent.tank
        scenePath = cmds.file(q=True, sceneName=True)
        scene_template = tk.template_from_path(scenePath)
        flds = scene_template.get_fields(scenePath)

        # get shotgun info about what shot are needed in this sequence
        fields = ["id"]
        sequence_id = self.parent.shotgun.find("Sequence", [["code", "is", flds["Sequence"]]], fields)[0]["id"]
        fields = ["id", "code", "sg_asset_type", "sg_cut_in", "sg_cut_out", "sg_status_list"]
        filters = [["sg_sequence", "is", {"type": "Sequence", "id": sequence_id}]]
        assets = self.parent.shotgun.find("Shot", filters, fields)
        sg_shots = []
        for sht in assets:
            if sht["sg_status_list"] != "omt":
                sg_shots += [str.split(sht["code"], "_")[1]]

        # define used cameras and shots in the camera sequencer
        shots = cmds.ls(type="shot")
        shotCams = []
        unUsedCams = []
        items.append(
            {
                "type": "setting",
                "name": "NO overscan",
                "description": "set overscan Value to 1(no extra space used)",
                "selected": False,
            }
        )
        items.append(
            {
                "type": "setting",
                "name": "set Cut in",
                "description": "set Cut in to 1001 for each individual shot",
                "selected": False,
            }
        )
        if flds["Step"] == "s3d":
            items.append(
                {
                    "type": "setting",
                    "name": "Only render LEFT(main) cam",
                    "description": "set Cut in to 1001 for each individual shot",
                    "selected": False,
                }
            )
        for sht in shots:
            shotCam = cmds.shot(sht, q=True, currentCamera=True)
            shotCams += [shotCam]
            select = True
            if cmds.ls(sl=True) != []:
                select = isCamSelected(sht)

            if sht not in sg_shots:
                items.append(
                    {
                        "type": "shot",
                        "name": sht,
                        "description": "!!! shot not in shotgun  ->  " + shotCam,
                        "selected": select,
                    }
                )
            else:
                items.append({"type": "shot", "name": sht, "description": "    " + shotCam, "selected": select})

            # print shotCam

        for sg_shot in sg_shots:
            if sg_shot not in shots:
                print(sg_shot + " cam is not in scene...")
                items.append(
                    {
                        "type": "shot",
                        "name": sg_shot,
                        "description": "!!! missing shot? check shotgun  ->  ",
                        "selected": False,
                    }
                )

        return items
Beispiel #49
0
def GetShotFrame():
    dsShot = str(os.getenv('SHOT'))
    ShotStart = cmds.shot(str(dsShot), q=True, st=True)
    currentTime = cmds.currentTime(query=True)
    return currentTime - ShotStart + 1
Beispiel #50
0
 def getDuration(self, shot):
     return cmds.shot(shot, q=True, sst=True), cmds.shot(shot, q=True, set=True)
	def execute(self, tasks, work_template, comment, thumbnail_path, sg_task, primary_task, primary_publish_path, progress_cb, **kwargs):
		"""
		Main hook entry point
		:param tasks:				   List of secondary tasks to be published.  Each task is a 
										dictionary containing the following keys:
										{
											item:   Dictionary
													This is the item returned by the scan hook 
													{   
														name:		   String
														description:	String
														type:		   String
														other_params:   Dictionary
													}
												   
											output: Dictionary
													This is the output as defined in the configuration - the 
													primary output will always be named 'primary' 
													{
														name:			 String
														publish_template: template
														tank_type:		String
													}
										}
						
		:param work_template:		   template
										This is the template defined in the config that
										represents the current work file
			   
		:param comment:				 String
										The comment provided for the publish
						
		:param thumbnail:			   Path string
										The default thumbnail provided for the publish
						
		:param sg_task:				 Dictionary (shotgun entity description)
										The shotgun task to use for the publish	
						
		:param primary_publish_path:	Path string
										This is the path of the primary published file as returned
										by the primary publish hook
						
		:param progress_cb:			 Function
										A progress callback to log progress during pre-publish.  Call:
										
											progress_cb(percentage, msg)
											 
										to report progress to the UI
						
		:param primary_task:			The primary task that was published by the primary publish hook.  Passed
										in here for reference.  This is a dictionary in the same format as the
										secondary tasks above.
		
		:returns:					   A list of any tasks that had problems that need to be reported 
										in the UI.  Each item in the list should be a dictionary containing 
										the following keys:
										{
											task:   Dictionary
													This is the task that was passed into the hook and
													should not be modified
													{
														item:...
														output:...
													}
													
											errors: List
													A list of error messages (strings) to report	
										}
		"""
		def FindFirstImageOfSequence(FolderPath):
			ImgsList=[]
			for file in (os.listdir(FolderPath)):
				SeqImgName = str.split(str(file),".")[1]
				ImgsList.append(SeqImgName)
			First_elmnt=ImgsList[0]
			return First_elmnt
			
		def FindFirstImageOfSequence(FolderPath):
			ImgsList=[]
			for file in (os.listdir(FolderPath)):
				if file.endswith(".png"):
					SeqImgName = str.split(str(file),".")[1]
					ImgsList.append(int(SeqImgName))
				First_elmnt=ImgsList[0]
			return First_elmnt

		def FindLastImageOfSequence(FolderPath):
			ImgsList=[]
			for file in (os.listdir(FolderPath)):
				if file.endswith(".png"):
					SeqImgName = str.split(str(file),".")[1]
					ImgsList.append(int(SeqImgName))
				Last_elmnt=ImgsList[-1]
			return Last_elmnt
			
		def FindLengthOfSequence(FolderPath):
			ImgsList=[]
			for file in (os.listdir(FolderPath)):
				if file.endswith(".png"):
					SeqImgName = str.split(str(file),".")[1]
					ImgsList.append(int(SeqImgName))
				Length_seq=len(ImgsList)
			return Length_seq
			
		def MakeListOfSequence(FolderPath):
			ImgsList=[]
			for file in (os.listdir(FolderPath)):
				if file.endswith(".png"):
					SeqImgName = str.split(str(file),".")[1]
					ImgsList.append(int(SeqImgName))
			return ImgsList

		def FindMissingFramesFromSequence(SequenceSet,inputStart,inputEnd):
			# my_list= list(range( int(FindFirstImageOfSequence(os.path.dirname(RenderPath)))	, int(FindLastImageOfSequence(os.path.dirname(RenderPath)))	 ))
			my_list= list(range( inputStart, inputEnd))
			MissingFrames =  set(my_list)-set(SequenceSet)
			return sorted(MissingFrames)
			
		def combineMediaFiles(fileList,output,concatTxt=None, ffmpeg_path = "ffmpeg"):
			rootPath = str.split(str(fileList[0]),"/q")[0]
			mediaType = str.rsplit(str(fileList[0]),".",1)[1]
			mediaFilePresent = False
			mediaListFile = rootPath+'/tmp_'+mediaType+'List.txt'
			if concatTxt != None:
				mediaListFile = concatTxt
			with open(mediaListFile, 'w') as mediaTxtFile:
				for mediaFile in fileList:
					if os.path.exists(mediaFile):
						mediaFilePresent = True
						#print mediaFile
						shotPath = str.split(str(mediaFile),"/")[-1]
						if 'Sequences' in mediaFile:
							shotPath = str.split(str(mediaFile),"Sequences")[1][1:]
						if concatTxt != None:
							shotPath = str.split(str(mediaFile),os.path.dirname(concatTxt))[1][1:]
						mediaTxtFile.write("file '" +shotPath+"'")
						mediaTxtFile.write('\r\n')
					else:
						print("AUDIO FILE NOT FOUND :  " + str(mediaFile))
						# results.append({"task":"audio stuff", "errors":("AUDIO FILE NOT FOUND :  " + str(mediaFile))})
			if mediaFilePresent:
				# command = os.path.normpath(ffmpeg_path + ' -f concat -i '+mediaListFile+' -c copy '+output + " -y")
				# command = os.path.normpath(ffmpeg_path + ' -f concat -r 24 -i '+mediaListFile+' -vcodec mjpeg -r 24 -qscale 1 -pix_fmt yuvj420p -acodec pcm_s16le -ar 48000 -ac 2 '+output + " -y")
				command = os.path.normpath(ffmpeg_path + ' -f concat -r 24 -i '+mediaListFile+' -vcodec mjpeg -r 24 -qscale 1 -pix_fmt yuvj420p '+output + " -y")
				command = str.replace(str(command), "\\" , "/")
				#print command
				value = subprocess.call(command, creationflags=CREATE_NO_WINDOW, shell=False)
				return output
			else:
				return None
		
		def findLastVersion(FolderPath,returnFile=False,returnFilePath=False):
			if os.path.exists(FolderPath):
				fileList=os.listdir(FolderPath)
			else:
				return 0
			if fileList != []:
				fileList.sort()
				lastVersion = fileList[-1]
				version = int(re.findall('\d+', lastVersion)[-1])
				if returnFilePath:
					return FolderPath+"/"+lastVersion
				if returnFile:
					return lastVersion
				return version
				#return str(FolderPath+"/"+lastVersion)
			else:
				return 0
		
		def orderMovs(movList,orderList):
			tmp = ""

		def setAudioToCorrectPath():
			scenePath = cmds.file(q=True,sceneName=True)
			scene_template = tk.template_from_path(scenePath)
			flds = scene_template.get_fields(scenePath)
			audio_template = tk.templates["shot_published_audio"]

			tank = sgtk.tank_from_entity('Project', 66)

			allShots = cmds.ls(type="shot")
			allAudio = cmds.ls(type="audio")
			reportList = []
			returnList = []
			for seqShot in allShots:
				audio = cmds.shot(seqShot,q=True,audio=True)
				audioFile = cmds.getAttr(audio+".filename")# "W:/RTS/1_PREPROD/13_ANIMATIC/q340/splitshots/wav new 01/q340_s260_snd_v001.wav";
				#print audioFile
				flds['Shot'] = flds['Sequence']+"_"+seqShot
				audioOutputFile = audio_template.apply_fields(flds)
				#audioOutputPath = str.replace(str(audioOutputPath),"\\","/")
				#print audioFile
				audioFile = str.replace(str(audioFile),"Z:/Richard The Stork","W:/RTS")
				audioOutputPath = str.rsplit(str(audioOutputFile),"\\",1)[0]
				print audioOutputPath
				if os.path.exists(audioOutputPath):
					audioOutputFile = findLastVersion(audioOutputPath,True,True)
					if audioOutputFile != 0:
						newAudio = str.rsplit(audioOutputFile,"/",1)[-1]
						newAudio = str.split(newAudio,".")[0]
						print newAudio
						cmds.delete(audio)
						ref = cmds.file( audioOutputFile, r=True, type="audio",mergeNamespacesOnClash=False, namespace="audio")
						#
						offset = cmds.getAttr(seqShot+".sequenceStartFrame")
						cmds.setAttr(newAudio+".offset", offset)
						cmds.connectAttr(newAudio+".message", seqShot+".audio")
						
						shotEnd =  cmds.getAttr(seqShot +".sequenceEndFrame")
						audioEnd = cmds.getAttr(newAudio+".endFrame")
						if audioEnd < shotEnd:
							reportList += [newAudio + "  is shorter than shot !!!"]
						if audioEnd > shotEnd:
							reportList += [newAudio + "  was longer than shot. now cut to match!!!"]
							cmds.setAttr(newAudio+".endFrame",shotEnd+1)

						returnList += [newAudio]
				else:
					print "skipped ", audio
			for report in reportList:
				print report
			return returnList
		
		def getStereoCams(sht):
			leftCam = ""
			rightCam = ""
			prevCamShape = cmds.shot(sht,q=True,cc=True)
			prevCam = cmds.listRelatives(prevCamShape,p=True)
			prevCamParent = cmds.listRelatives(prevCam,p=True)
			for obj in cmds.listRelatives(prevCamParent):
				if cmds.objectType(obj) == 'stereoRigTransform':
					leftCam = str(cmds.listConnections(obj+".leftCam",source=True)[0])
					rightCam = str(cmds.listConnections(obj+".rightCam",source=True)[0])
			return[leftCam,rightCam]

		wtd_fw = self.load_framework("tk-framework-wtd_v0.x.x")
		ffmpeg = wtd_fw.import_module("pipeline.ffmpeg")
		# ffmpeg.test()
		
		def _register_publish(path, name, sg_task, publish_version, tank_type, comment, thumbnail_path, context = None):
			"""
			Helper method to register publish using the 
			specified publish info.
			"""
			ctx = self.parent.tank.context_from_path(str(path))
			# construct args:
			args = {"tk": self.parent.tank,"sg_status_list": "cmpt","context": context,"comment": comment,"path": path,"name": name,"version_number": publish_version,"thumbnail_path": thumbnail_path,"sg_task": sg_task,"published_file_type":tank_type,"user": ctx.user,"created_by": ctx.user}
			print "-------------------"
			for a in args:
				print a , args[a]
			# print args
			# register publish;
			sg_data = tank.util.register_publish(**args)
			print 'Register in shotgun done!'
			
			return sg_data


		def orderShots(shotDictList):
			valueOrderList = []
			valueOrderListSecondary = []
			listIndex = 0
			for sht in shotDictList:
				orderNr = str("00000"+str(sht['sg_cut_order']))[-4:]
				addValue = str(listIndex)
				if sht['sg_status_list'] == 'omt':
					addValue = 'omt'
				if sht['parent_shots'] == []:
					valueOrderList += [orderNr+">>"+addValue]
				else:
					valueOrderListSecondary += [orderNr+">>"+addValue]
				listIndex += 1
			valueOrderList = sorted(valueOrderList)+sorted(valueOrderListSecondary)
			orderedList = []
			for sht in valueOrderList:
				addValue = str.split(sht,'>>')[1]
				if addValue != "omt":
					orderedList+=[shotDictList[int(addValue)]]
			return orderedList

		def checkSoundCut():
			allShots = cmds.ls(type="shot")
			allAudio = cmds.ls(type="audio")
			sequenceList = []
			for seqShot in allShots:
				#print "---", seqShot
				shotStart =  int(cmds.getAttr(seqShot +".sequenceStartFrame"))
				shotEnd  =   int(cmds.getAttr(seqShot +".sequenceEndFrame"))
				#print shotStart
				#print shotEnd
				sequenceList.append({"shot": seqShot})
				audioList = []
				audioIn = []
				audioOut = []
				for aud in allAudio:
					
					add = False
					aIn = 0
					aOut= 0
					audioStart   =  int(cmds.getAttr(aud+".offset" ))
					audioEnd = int(cmds.getAttr(aud+".endFrame"))-1
					audioOriginalDuration = int(cmds.getAttr(aud+".duration"))
					audioDuration  =  audioEnd-audioStart
					
						
					if shotStart < audioStart < shotEnd:
						add = True
						if audioEnd > shotEnd:
							aOut = shotEnd - audioStart
					if shotStart < audioEnd < shotEnd:
						add = True
						aIn = audioDuration-(audioEnd-shotStart)
					if audioStart < shotStart < audioEnd:
						add = True
						aIn = shotStart - audioStart
					if audioStart < shotEnd < audioEnd:
						add = True
						aOut = audioDuration-(audioEnd-shotEnd)+1
					
					if add:
						audioList.append([aud,aIn,aOut])

				sequenceList[-1]["audioList"] = audioList
			
			tmpFolder = "C:/temp"
			if not os.path.exists(tmpFolder):
				os.makedirs(tmpFolder)

			scenePath = cmds.file(q=True,sceneName=True)
			scene_template = tk.template_from_path(scenePath)
			audio_template = tk.templates["shot_published_audio"]
			flds = scene_template.get_fields(scenePath)
			flds['Step'] = 'snd'
			soundCheckList = ["These audio cuts dont match the camera cuts."]
			for audio in sequenceList:
				if audio['audioList'] != []:
					#print audio['shot']
					flds['Shot'] = flds['Sequence']+"_"+str(audio['shot'])
					i=0
					newAudio =[]
					soundCheckList += [""]
					soundCheckList += ["audio files overlapping shot " + audio['shot']]
					for aud in audio['audioList']:
						inSec = float(aud[1])/24
						outSec = float(aud[2])/24
						print aud[0],inSec,outSec
						# print "------>>>>>>>>>", aud
						soundCheckList += [aud[0] +"   cut in offset = "+ str(aud[1]) +"   cut out offset = "+ str(aud[2])]

			return sequenceList
		def MakeSoundCuts(ffmpegPath,Input,Output,Position,Duration ):
			time01= Position
			time02= Duration
			if os.path.isfile(Output):
				os.remove(Output)
			subprocess.call('%s -i "%s" -ss "%s" -t "%s" -acodec copy "%s"' %(ffmpegPath,Input,time01,time02,Output))
		def fixSound(sequenceList):
			tmpFolder = "C:/temp"
			if not os.path.exists(tmpFolder):
				os.makedirs(tmpFolder)
			scenePath = cmds.file(q=True,sceneName=True)
			scene_template = tk.template_from_path(scenePath)
			audio_template = tk.templates["shot_published_audio"]
			flds = scene_template.get_fields(scenePath)
			flds['Step'] = 'snd'
			for audio in sequenceList:
				if audio['audioList'] != []:
					print audio['shot']
					flds['Shot'] = flds['Sequence']+"_"+str(audio['shot'])
					i=0
					newAudio =[]
					for aud in audio['audioList']:
						inSec = float(aud[1])/24
						outSec = float(aud[2])/24
						print aud[0],inSec,outSec
						if outSec == 0.0:
							outSec = 10000
						input = cmds.getAttr(aud[0]+'.filename')
						output = tmpFolder+"/"+audio['shot']+"_part"+str(i)+str.split(str(input),"/")[-1]
						i+=1
						MakeSoundCuts(ffmpegPath,input,output,inSec,outSec)
						newAudio +=[output]
					audioOutput = audio_template.apply_fields(flds)
					
					# version UP
					latestVersion = findLastVersion(os.path.dirname(audioOutput))+1
					flds['version'] = latestVersion
					audioOutput = audio_template.apply_fields(flds)
					# combine
					mergedAudio = combineMediaFiles(newAudio,audioOutput,tmpFolder+"/tmp_wavList.txt",ffmpegPath)
					
					ver = str(findLastVersion(os.path.dirname(audioOutput),True))
					newAudioName = str.rsplit(ver,"_",1)[0]
					cmds.file( audioOutput, i=True, type="audio",mergeNamespacesOnClash=False, namespace=flds['Shot']+"_audio",resetError=True)
					crappyAudioName = str.split(ver,".")[0]
					cmds.rename(crappyAudioName,newAudioName)
					cutIn = cmds.getAttr(audio['shot']+".startFrame")
					cutOut = cmds.getAttr(audio['shot']+".endFrame")
					cutDuration = cutOut-cutIn
					cmds.setAttr(newAudioName+".offset", cutIn)
					cmds.setAttr(newAudioName+".sourceEnd",cutDuration+1)
					cmds.connectAttr(newAudioName+".message", audio['shot']+".audio",f=True)
					print "-----------------------------------------------------------------------------________________-------------------------------------------------------------------------"
					# PUBLISH
					file_template = tk.template_from_path(audioOutput)
					flds = file_template.get_fields(audioOutput)
					print audioOutput
					ctx = tk.context_from_path(audioOutput)

					print ctx

					sg_task = tk.shotgun.find("Task",[['content', 'is',"Sound"],["entity",'is',ctx.entity]], ['id'])
					try:
						sg_task = sg_task[0]
					except indexError:
						print "SKIPPED - are the folders already created on shotgun?????"
						errors.append("SKIPPED - are the folders already created on shotgun?????")
					if sg_task != []:
						_register_publish(audioOutput,newAudioName,sg_task,flds['version'],"Audio", "publish","",ctx)
					else:
						print "SKIPPED - are the folders already created on shotgun?????"
						errors.append("SKIPPED - are the folders already created on shotgun?????")
						#popup('error',"skipped creation of "+newAudioName+" - are the folders already created on shotgun??")

			for audio in sequenceList:
				if audio['audioList'] != []:
					for aud in audio['audioList']:
						if cmds.objExists(aud[0]):
							print "------------------",aud[0],"------------------------"
							cmds.delete(aud[0])


		shots = cmds.ls(type="shot")
		shotCams = []
		unUsedCams = []

		sides=["L","R"]

		pbShots = []
		CutInList = []
		parentShotList = []
		
		# these booleans can be used for 
		noOverscan = False
		resetCutIn = False

		# template stuff...
		# tk = tank.tank_from_path("W:/RTS/Tank/config")
		tk = self.parent.tank
		scenePath = cmds.file(q=True,sceneName=True)
		scene_template = tk.template_from_path(scenePath)
		flds = scene_template.get_fields(scenePath)
		flds['width'] = 1724
		flds['height'] = 936
		pb_template = tk.templates["maya_seq_playblast_publish"]
		pb_template_current = tk.templates["maya_seq_playblast_current"]
		pbArea_template = tk.templates["maya_seq_playblast_publish_area"]
		audio_template = tk.templates["shot_published_audio"]
		mov_template = tk.templates["maya_seq_playblast_publish_currentshots_mov"]
		concatMovTxt = tk.templates["maya_seq_playblast_publish_concatlist"]
		pbMov = tk.templates["maya_seq_playblast_publish_mov"]
		pbMp4 = tk.templates["maya_seq_playblast_review_mp4"]

		# get extra shot info through shotgun
		fields = ['id']
		sequence_id = self.parent.shotgun.find('Sequence',[['code', 'is',flds['Sequence'] ]], fields)[0]['id']
		fields = ['id', 'code', 'sg_asset_type','sg_cut_order','sg_cut_in','sg_cut_out','sg_cut_duration','sg_status_list','parent_shots']
		filters = [['sg_sequence', 'is', {'type':'Sequence','id':sequence_id}]]
		assets= self.parent.shotgun.find("Shot",filters,fields)
		results = []
		errors = []
		ffmpegPath = '"'+os.environ.get('FFMPEG_PATH')
		if "ffmpeg.exe" not in ffmpegPath:
			ffmpegPath += "\\ffmpeg.exe"
		ffmpegPath += '"'
		soundFixList = checkSoundCut()
		print soundFixList
		fixSound(soundFixList)

		for task in tasks:
			item = task["item"]
			output = task["output"]
			errors = []
			
			#get shots from scan scene
			if item["type"] == "shot":
				shotTask = [item["name"]][0]
				pbShots += [shotTask]
			# get corresponding cut values from shotgun
				for sht in assets:
					shot_from_shotgun = str.split(sht['code'],"_")[1]
					if shot_from_shotgun == shotTask:
						CutInList += [sht['sg_cut_in']]
						parentShotList += [sht['parent_shots']]
			
			# set extra settings
			if item["type"] == "setting":
				if item["name"]=="overscan":
					noOverscan = True
				if item["name"]=="set Cut in":
					resetCutIn = True

			# if there is anything to report then add to result
			if len(errors) > 0:
				# add result:
				results.append({"task":task, "errors":errors})

		# temporarily hide cams and curves
		modPan = cmds.getPanel(type="modelPanel")
		for pan in modPan:
			cmds.modelEditor( pan,e=True, alo= False, polymeshes =True )
			cmds.modelEditor( pan,e=True,displayAppearance="smoothShaded")
			cmds.modelEditor( pan,e=True,displayTextures=True)
			allobjs = cmds.ls(type= "transform")
			boundingboxObjsList = []


			for i in allobjs:
				if cmds.getAttr(i+".overrideEnabled"):
					if cmds.getAttr(i+".overrideLevelOfDetail") == 1:
						boundingboxObjsList.append(i)
						cmds.setAttr(i+".overrideLevelOfDetail",0)
						
		currentselection= cmds.ls(sl=True)
		cmds.select(cl=True)
		
		cmds.headsUpDisplay(lv=False)

		CamsList = cmds.listCameras()
		for Cam in CamsList:
			cmds.camera(Cam, e=True, dr=True, dgm=True,ovr=1.3)
		

		#Get USER
		USER = sgtk.util.get_current_user(tk)
		if USER == None:
			USER = {'email': '*****@*****.**',
			 'id': 63,
			 'image': 'https://sg-media-usor-01.s3.amazonaws.com/7df0575d53fc3b61c36343837da18effb72bb6ff/86f714413d0a2c68382b706e8e45991d41a0ffed/thumb_t.jpg?AWSAccessKeyId=AKIAIFHY52V77FIVWKLQ&Expires=1415784134&Signature=%2Ff4qeNQMq4oHscIKePb1IrtRPZQ%3D',
			 'login': '******',
			 'name': 'WTD RND',
			 'type': 'HumanUser'}


		# audio stuff
		'''
		stepVersion = flds['version']
		step = flds['Step']
		audioList = []
		for sht in shots:
			#print sht
			flds['Shot'] = (flds['Sequence']+"_"+sht)
			flds['version'] = findLastVersion(os.path.dirname(audio_template.apply_fields(flds)))
			flds['Step'] = 'snd'
			print flds['version']
			if flds['version'] > 0:
				audioList += [str.replace(str(audio_template.apply_fields(flds)),"\\","/")]
		flds['Shot'] = flds['Sequence']
		flds['version'] = stepVersion #set version back
		flds['Step'] = step
		
		audioOutput = pbArea_template.apply_fields(flds)+"/"+flds['Sequence']+"_"+flds['Step']+".wav"
		if audioList != []:
			combinedAudio = combineMediaFiles(audioList,audioOutput, ffmpeg_path = ffmpegPath)
		print ("combined audio at  " + audioOutput)
		'''


		# replacedAudio = setAudioToCorrectPath()
		# for aud in replacedAudio:
		# 	results.append({"task":{'item': aud , 'output': 'replaced' }})

		Test = True;
		#Test = False;
		if Test:
			j = 0
			RenderPath = ""
			for pbShot in pbShots:
				CutIn = CutInList[j]
				if parentShotList[j] != []:
					parentShot = str.split(parentShotList[j][0]['name'],"_")[-1]
				j += 1
				
				sequenceName = flds ['Sequence']
				shotName = pbShot
				
				# ... correct this in the templates?
				flds['Shot'] = flds['Sequence']+"_"+pbShot

				

				#get camera name from sequence shot 
				shotCam = cmds.shot(pbShot, q=True, currentCamera=True)

				# overscanValue = cmds.getAttr(shotCam+".overscan")
				cmds.setAttr(shotCam+".overscan", 1.3)
				if noOverscan:
					cmds.setAttr(shotCam+".overscan", 1)


				shotCams = [shotCam]
				previewCam = shotCam
				if flds['Step'] == 's3d':
					shotCams = getStereoCams(pbShot)
				s = 0
				for shotCam in shotCams:
					side = sides[s]
					s += 1
					if flds['Step'] == 's3d':
						flds['eye'] = side

					cmds.shot(pbShot, e=True, currentCamera=shotCam)
					focal = cmds.getAttr(shotCam+'.focalLength')
					# make outputPaths from templates
					RenderPath = pb_template.apply_fields(flds)
					pbPath = str.split(str(RenderPath),".")[0]
					renderPathCurrent = pb_template_current.apply_fields(flds)
					pbPathCurrent = str.split(str(renderPathCurrent),".")[0]
					if not os.path.exists(os.path.dirname(pbPathCurrent)):
						os.makedirs(os.path.dirname(pbPathCurrent))
					pbPathCurrentMov = mov_template.apply_fields(flds)
					if not os.path.exists(os.path.dirname(pbPathCurrentMov)):
						os.makedirs(os.path.dirname(pbPathCurrentMov))

					# report progress:
					progress_cb(0, "Publishing", task)

					shotStart = cmds.shot(pbShot,q=True,sequenceStartTime=True)
					shotEnd = cmds.shot(pbShot,q=True,sequenceEndTime=True)
					progress_cb(25, "Making playblast %s" %pbShot)
					cmds.playblast(indexFromZero=False,filename=(pbPath),fmt="iff",compression="png",wh=(flds['width'], flds['height']),startTime=shotStart,endTime=shotEnd,sequenceTime=1,forceOverwrite=True, clearCache=1,showOrnaments=1,percent=100,offScreen=True,viewer=False,useTraxSounds=True)
					progress_cb(50, "Placing Slates %s" %pbShot)
					
					Film = "Richard the Stork"
					#GET CURRENT DATE
					today = datetime.date.today()
					todaystr = today.isoformat()
					
					"""
						Adding Slates to playblast files
					"""
					for i in range(int(shotStart),int(shotEnd)+1):
						FirstPartName = RenderPath.split( '%04d' )[0]
						EndPartName = '%04d' % i + RenderPath.split( '%04d' )[-1]
						ImageFullName = FirstPartName + EndPartName
						pbFileCurrent = pbPathCurrent+"."+EndPartName
						print 'fld ===>',flds
						print 'USER===>',USER
						ffmpeg.ffmpegMakingSlates(inputFilePath= ImageFullName, outputFilePath= ImageFullName, topleft = flds ['Sequence']+"_"+flds['Step']+"_v"+str('%03d' % (flds['version'])), topmiddle = Film, topright = str(int(CutIn))+"-"+str('%04d' %(i-int(shotStart)+CutIn))+"-"+str('%04d' %(int(shotEnd)-int(shotStart)+CutIn))+"  "+str('%04d' %(i-int(shotStart)+1))+"-"+str('%04d' %(int(shotEnd)-int(shotStart)+1)), bottomleft = shotName+" - focal_Length "+ str(focal), bottommiddle = USER['name'], bottomright = todaystr , ffmpegPath =ffmpegPath, font = "C:/Windows/Fonts/arial.ttf"  )
						print("COPYING PNG "+ImageFullName+"  TO  "+pbFileCurrent+"  FOR SHOT  " + shotName)
						shutil.copy2(ImageFullName, pbFileCurrent)
					
					shotAudio = audio_template.apply_fields(flds)
					shotAudio = findLastVersion(os.path.dirname(shotAudio),True,True)
					if shotAudio == 0:
						print " NO PUBLISHED AUDIO FOUND"
						for aud in [parentShot,pbShot]:
							try:
								audio = cmds.shot(pbShot,q=True,audio=True)
								shotAudio = '"'+cmds.getAttr(audio+".filename")+'"'
								shotAudio = str.replace(str(shotAudio),"Z:/Richard The Stork/",'W:/RTS/')
								print "used audio from maya :  ", shotAudio
							except:
								shotAudio = ''
					print ffmpeg.ffmpegMakingMovie(inputFilePath=renderPathCurrent, outputFilePath=pbPathCurrentMov, audioPath=shotAudio, start_frame=int(shotStart),end_frame=int(shotEnd), framerate=24 , encodeOptions='libx264',ffmpegPath=ffmpegPath)
				# end_frame=shotEnd
				cmds.shot(pbShot, e=True, currentCamera=previewCam)
			
			if currentselection != []:
				cmds.select(currentselection)
			
			if flds['Step'] == 'lay':
				sides = ['']
			for side in sides:
				if flds['Step'] == 's3d':
					flds['eye'] = side
					
				
				RenderPath = pb_template.apply_fields(flds)

				for i in boundingboxObjsList:
					cmds.setAttr(i+".overrideEnabled",True)
					cmds.setAttr(i+".overrideLevelOfDetail",1)
				sequenceTest= MakeListOfSequence(os.path.dirname(RenderPath))
				FistImg= int(FindFirstImageOfSequence(os.path.dirname(RenderPath))) 
				LastImg= int(FindLastImageOfSequence(os.path.dirname(RenderPath)))

				FramesMissingList= FindMissingFramesFromSequence( sequenceTest ,FistImg ,LastImg )
				
				"""
					Copy empty frames
				"""
				# blackFrame = False
				# blackFrameName = ""
				# for n in FramesMissingList:
					# if blackFrame == False:
						# blackFrameName = FirstPartName+str('%04d' % n)+".png"
						# value = subprocess.call('%s -f lavfi -i color=c=black:s="%s" -vframes 1 "%s"' %(ffmpegPath,(str(flds['width'])+"x"+ str(flds['height'])),FirstPartName+str('%04d' % n)+".png"))
						# print '%s -f lavfi -i color=c=black:s="%s" -vframes 1 "%s"' %(ffmpegPath,(str(flds['width'])+"x"+ str(flds['height'])),FirstPartName+str('%04d' % n)+".png")
						# blackFrame = True
					
					# newFrameName = FirstPartName+str('%04d' % n)+".png"
					# if blackFrameName != newFrameName:
						# shutil.copy2(blackFrameName, newFrameName)	

				FirstImageNumber= FindFirstImageOfSequence(os.path.dirname(RenderPath))
				FirstImageNumberSecond= FirstImageNumber/24

				'''
				makeSeqMov
				'''
				concatTxt = concatMovTxt.apply_fields(flds)
				pbMovPath = pbMov.apply_fields(flds)
				pbMp4Path = pbMp4.apply_fields(flds)
				pbMp4Path = str.replace(str(pbMp4Path),'\\','/')

				pbMovFile =  str.split(str(pbMovPath),os.path.dirname(pbMovPath))[1][1:]

				# movList = []
				# for mov in os.listdir(os.path.dirname(pbPathCurrentMov)):
				# 	movList += [os.path.dirname(pbPathCurrentMov)+"/"+mov]
				# print movList

				assetsOrdered = orderShots(assets)
				movList = []
				for ass in assetsOrdered:
					for mov in os.listdir(os.path.dirname(pbPathCurrentMov)):
						movName = str.split(str(mov),".")[0]
						if ass['code'] == movName:
							movList += [os.path.dirname(pbPathCurrentMov)+"/"+mov]


				makeSeqMov = True
				if makeSeqMov:
					if not os.path.exists(os.path.dirname(pbMovPath)):
						self.parent.ensure_folder_exists(os.path.dirname(pbMovPath))
						# os.makedirs(os.path.dirname(pbMovPath))
					
					if not os.path.exists(os.path.dirname(pbMp4Path)):
						print "creating", pbMp4Path
						self.parent.ensure_folder_exists(os.path.dirname(pbMp4Path))
						print "created", pbMp4Path
						# os.makedirs(os.path.dirname(pbMp4Path))
					"""
						SEQUENCE MOV and MP4 Creation
					"""
					print "Making mov and mp4: \n", pbMovPath, ' --- ', pbMp4Path
					print combineMediaFiles(movList,pbMovPath,concatTxt,ffmpegPath)
					
					amount = 0
					while not os.path.exists(pbMovPath) and amount < 10:
						time.sleep(1)
						amount += 1
						
					print ffmpeg.ffmpegMakingMovie(pbMovPath,pbMp4Path,encodeOptions="libx264",ffmpegPath=ffmpegPath)
							
						
					# ----------------------------------------------
					# UPLOAD MP4
					# ----------------------------------------------
					
					upload = True
					if upload:
						user = self.parent.context.user
						scenePath = cmds.file(q=True,sceneName=True)
						ctx = self.parent.tank.context_from_path(scenePath)
						fields = ['id']
						sg_task = self.parent.shotgun.find("Task",[['content', 'is',ctx.step['name']],["entity",'is',ctx.entity]], fields)
						
						data = {'project': {'type':'Project','id':66},
								'entity': {'type':'Sequence', 'id':int(sequence_id)},
								'code': flds ['Sequence']+"_"+flds['Step']+"_v"+str('%03d' % (flds['version'])),
								'sg_path_to_frames':os.path.dirname(RenderPath),
								'sg_path_to_movie':pbMovPath,
								'user': user,
								'created_by': user,
								'updated_by': user,
								'sg_task': sg_task[0]
								}

						if not os.path.exists(os.path.dirname(pbMp4Path)):
							os.makedirs(os.path.dirname(pbMp4Path))
						
						result = tk.shotgun.create('Version', data)
						print "---> UPLOADING ",pbMp4Path
						executed = tk.shotgun.upload("Version",result['id'],pbMp4Path,'sg_uploaded_movie')
						print executed
				
					# PUBLISH
					if sg_task != []:
						version = findLastVersion(os.path.dirname(pbMovPath))
						#sg_task = sg_task[0]
						print sg_task
						_register_publish(pbMovPath,pbMovFile,sg_task,version,"Movie", "published playblast mov","",ctx)
						print "PUBLISHED"
					else:
						print "SKIPPED PUBLISH"
				
					
				# print "TODO : make mov of whole sequence with audio"
		return results