def getInfo(self, color, colorID, keyframingOption): ''' Gather informations about the current selection, the current time and possible time range highligted in the timeline. Convert color argument into a tuple if it is not already. Either call addColoredKey() or addNonColoredKey() or both, depending on gathered informations and user preferences. :param color: rgb values corresponding to one of the ui button's color :param colorID: string representing one of the ui button. Used to update keytick's color in the timeline. :param keyframingOption : int representing the keyframing option ''' #Selection sel = list(pmc.selected()) if not sel: pmc.confirmDialog( title='Color key error', message='Nothing selected! Please select something.', button='OK', defaultButton='OK') return # Color if not isinstance(color, tuple): color = color.getRgb() #Current frame currentFrame = int(pmc.currentTime(q=True)) #Highlighted time range playBackSlider = mel.eval('$tmpVar=$gPlayBackSlider') if pmc.timeControl(playBackSlider, q=True, rangeVisible=True) == True: rangeHighlighted = pmc.timeControl(playBackSlider, q=True, rangeArray=True) lowerBound = int(rangeHighlighted[0]) upperBound = int(rangeHighlighted[1]) # If a time range is highlighted, call addColorKey() only self.addColoredKey(color, colorID, sel, currentFrame, lowerBound, upperBound) else: onlyKeyedAttrPb = [] # Only call addNonColoredKey() if keyframing preference is set to keyframe all or keyframe only keyed attr if keyframingOption != -4: onlyKeyedAttrPb = self.addNonColoredKey(sel) # If user preference is set to key only keyed attr and none of the attributes of a node have ever been keyframed, # warn the user. if onlyKeyedAttrPb: for node in onlyKeyedAttrPb: pmc.warning( 'Your keyframing preference is currently set to keyframe only keyed attributes. None of the attributes of %s have ever been keyframed, skipped. See script editor for more.' % node) # Every other case else: self.addColoredKey(color, colorID, sel, currentFrame, currentFrame, currentFrame)
def togglePlaybackSnap(): """Toggle playback snapping on and off""" gPlayBackSlider = pm.getMelGlobal( 'string', 'gPlayBackSlider' ) new_value = not bool( pm.timeControl( gPlayBackSlider, q=1, snap=1 ) ) pm.timeControl( gPlayBackSlider, e=1, snap=new_value ) print 'Playback Snapping: %s' % ( 'Off', 'On' )[new_value]
def getTimeRange(self, *args): # get time range aPlayBackSliderPython = pm.mel.eval('$tmpVar=$gPlayBackSlider') timeRange = pm.timeControl( aPlayBackSliderPython, q=True, rangeArray=True ) if timeRange[1] - timeRange[0] < 2.0: timeRange = [ pm.playbackOptions( q=True, minTime=True), pm.playbackOptions( q=True, maxTime=True) ] pm.intField( self.startFrameIntField, e=True, value=timeRange[0] ) pm.intField( self.endFrameIntField, e=True, value=timeRange[1] )
def get_frame_range(): """ Gets the selected frame range If nothing selected, gets the timeslider start end :return [start_frame, end_frame] """ frame_range = [0, 1] selected_frame_range_slider = pm.mel.eval('$temp=$gPlayBackSlider') if pm.timeControl(selected_frame_range_slider, query=True, rangeVisible=True): frame_range = pm.timeControl(selected_frame_range_slider, query=True, rangeArray=True) else: start_frame = pm.playbackOptions(query=True, min=True) end_frame = pm.playbackOptions(query=True, max=True) frame_range = [start_frame, end_frame] return frame_range
def check_frame_range_selection(): """checks if there is any range selected in the time slider Because it breaks shots to be playblasted as a whole the user should not have selected a range in the time slider """ start, end = pm.timeControl(pm.melGlobals['$gPlayBackSlider'], q=1, rangeArray=True) if end - start > 1: raise PublishError( 'Please deselect the playback range in <b>TimeSlider</b>!!!')
def _onPress(self): ''' This is called when the tool is first pressed down. Here we'll grab some initial values and create the callback ''' # Grab the initial press position, we'll use this for reference self.pressPosition = pmc.draggerContext(animSketchTool.contextname, query=True, anchorPoint=True) # Create the timer callback, this will watch idle events when held down # We'll grab a reference to the id to remove later self.callbackID = om.MTimerMessage.addTimerCallback( 1 / self.framerate, self._onIdleFrame) # Grab the start time, this way we can track the duration of the hold self.startTime = time.time() # Grab the current frame in the timeline to start from self.startFrame = pmc.currentTime(q=True) # Grab the start value for each target self.targetStartValues = [ pmc.getAttr(target) for target in self.targets ] # Set the initial input value if self.singleAxis: self.input = 0 else: self.lastInput = self.input self.input = self.pressPosition # Set a keyframe at the frame just before the startFrame for target in self.targets: pmc.setKeyframe(target, t=self.startFrame) # Begin the scrub, this way sound will play pmc.timeControl(self.playbackSlider, edit=True, beginScrub=True)
def getTimeRange(self, *args): # get time range aPlayBackSliderPython = pm.mel.eval('$tmpVar=$gPlayBackSlider') timeRange = pm.timeControl(aPlayBackSliderPython, q=True, rangeArray=True) if timeRange[1] - timeRange[0] < 2.0: timeRange = [ pm.playbackOptions(q=True, minTime=True), pm.playbackOptions(q=True, maxTime=True) ] pm.intField(self.startFrameIntField, e=True, value=timeRange[0]) pm.intField(self.endFrameIntField, e=True, value=timeRange[1])
def _onRelease(self): ''' This is called when the tool is released. Here we'll remove our callback and cleanup everything. ''' # Remove the callback om.MMessage.removeCallback(self.callbackID) # Set the current tool to the move tool pmc.setToolTo('moveSuperContext') # Simplify the animation curves if self.simplify: for target in self.targets: pmc.filterCurve(target, f='simplify', startTime=self.startFrame, endTime=pmc.currentTime(q=True), timeTolerance=self.tolerance) # End the scrub pmc.timeControl(self.playbackSlider, edit=True, endScrub=True)
def check_frame_range_selection(): """checks if there is any range selected in the time slider Because it breaks shots to be playblasted as a whole the user should not have selected a range in the time slider """ start, end = pm.timeControl( pm.melGlobals['$gPlayBackSlider'], q=1, rangeArray=True ) if end - start > 1: raise PublishError( 'Please deselect the playback range in <b>TimeSlider</b>!!!' )
def makePreviwe(self, percent, width, height): fileName = self.tempDir + self.getScene() + '.mov' aPlayBackSliderPython = pm.mel.eval('$tmpVar=$gPlayBackSlider') soundFile = pm.timeControl( aPlayBackSliderPython, q=True, sound=True) previewFile = pm.playblast(fp=4, format='qt', sound = soundFile, forceOverwrite=True, percent=percent, filename = fileName , viewer=0, quality=100, widthHeight=(width, height), compression="PNG") qtPath = 'C:\\Program Files (x86)\\QuickTime\\QuickTimePlayer.exe' subprocess.Popen([qtPath, previewFile])
def custom_playblast(): # default paths filename = os.path.splitext(os.path.basename(pm.system.sceneName()))[0] movieDir = pm.workspace.fileRules['movie'] + "/" movieDir.replace('\\', '/') # get camera name view = OpenMayaUI.M3dView.active3dView() camPath = OpenMaya.MDagPath() view.getCamera(camPath) # returns camera shape node camShapeName = camPath.partialPathName() cam = camPath.transform() # returns MObject OpenMaya.MDagPath.getAPathTo(cam, camPath) camName = camPath.partialPathName() # get render resolution resolution = [ int(pm.getAttr("defaultResolution.width")), int(pm.getAttr("defaultResolution.height")) ] # prompt for postfix message = "Camera: %s\n\nResolution: %dx%d\n\nFilename:" % ( camName, resolution[0], resolution[1]) filename = None try: filename = pm.system.fileInfo['playblastFilename'] except: pass if filename is None: # Regex example: # Filename in the format "/my/path/sh_0010_ANI_workshop_0010.ma" # is converted to "sh_0010_ANI_workshop" filename = os.path.splitext(os.path.basename(pm.system.sceneName()))[0] pattern = re.compile('(.*?)_[0-9+]') match = re.match(pattern, filename) if match is not None: filename = match.group(1) result = pm.promptDialog(title="Playblast", message=message, button=["Playblast", "Cancel"], defaultButton="Playblast", cancelButton="Cancel", dismissString="Cancel", text=filename) if result == "Playblast": newName = pm.promptDialog(q=True, text=True) if newName is not "": filename = newName pm.system.fileInfo['playblastFilename'] = filename # get active sound in time slider aPlayBackSliderPython = maya.mel.eval('$tmpVar=$gPlayBackSlider') sound = pm.timeControl(aPlayBackSliderPython, q=True, sound=True) # assemble full path and filename filename = movieDir + filename + ".mov" # disable resolution gate resGateEnabled = pm.getAttr(camShapeName + ".displayResolution") overscan = pm.getAttr(camShapeName + ".overscan") pm.setAttr(camShapeName + ".displayResolution", 1) pm.setAttr(camShapeName + ".overscan", 1) # playblast! pm.animation.playblast(filename=filename, format="qt", compression="H.264", forceOverwrite=True, sequenceTime=False, clearCache=True, showOrnaments=False, offScreen=True, viewer=True, percent=100, quality=100, widthHeight=resolution, sound=sound) # restore gate pm.setAttr(camShapeName + ".displayResolution", resGateEnabled) pm.setAttr(camShapeName + ".overscan", overscan)
def on_playBlast_doit_pushButton_clicked(self): """ :return:None """ width = int(self.size_X_spinBox.value()) # print width height = int(self.size_Y_spinBox.value()) quality = self.quality_spinBox.value() format = self.format_comboBox.currentText() codec = self.codec_comboBox.currentText() fps = self.fps_spinBox.value() percent = self.scale_doubleSpinBox.value() * 100 # print percent mov_name = self.video_name_lineEdit.text() mov_path = self.file_path_lineEdit.text() if not (mov_name and mov_path): logging.error("Plz input all info needed!!") return jpg_path = mov_path + "/oct_playblast_cache" + mov_name start_frame = self.start_frame_spinBox.value() end_frame = self.end_frame_spinBox.value() time_duation = (end_frame - start_frame + 1) / float(fps) off_screen = self.off_screen_checkBox.isChecked() frame = start_frame playbackSlider = mel.eval("$temp = $gPlayBackSlider") sound_path = "no_sound" if not os.path.isfile(ffmpeg_path): if not os.path.isdir(ffmpeg_path.strip('/ffmpeg.exe')): try: os.mkdir(ffmpeg_path.strip('/ffmpeg.exe')) except: logging.error(u"请确保我的文档中有maya文件夹!") shutil.copyfile(file_path + "/bin/ffmpeg.exe", ffmpeg_path) try: soundStr = pm.PyNode( pm.timeControl(playbackSlider, q=1, sound=1, fpn=1)) sound_path = soundStr.getAttr("filename") except: pass # sound_offset = soundStr.getAttr("offset") / fps mov_file = mov_path + "/" + mov_name + ".mov" if os.path.isfile(mov_file): if_cast = cmds.confirmDialog( t="warning", m="File:\" %s \" exists, replace it?" % mov_file, b=['Yes', 'No']) if if_cast == "Yes": try: os.remove(mov_file) except WindowsError: logging.error( u"This file has been opened and cannot be replaced!Please close it and try again!" ) return else: return if os.path.isdir(jpg_path): shutil.rmtree(jpg_path) while frame < (end_frame + 1): cmds.currentTime(frame) picture = playBlastCMD.capture(panel=self._playblast_panel, width=width, height=height, percent=percent, filename=jpg_path + "/" + mov_name, frame=frame, quality=quality, off_screen=off_screen, framePadding=4) # print picture seq = str(frame).zfill(4) # all_hud_info.append(self.hud_text(picture.replace("####", "%s" % seq))) if self.draw_hud_checkBox.isChecked(): self.draw_hud_text( self.hud_text(picture.replace("####", "%s" % seq))) frame += 1 # if all_hud_info: # print "=================================" # print "Drawing HUD...please wait..." # map(self.draw_hud_text, all_hud_info) start_number = str(start_frame).zfill(4) time.sleep(0.5) # if self.stop_drawing and not all_hud_info: # backstage_drawing.stop() print "Drawing HUD down.\nCompressing Video..." pic_format = '.png' if self.draw_hud_checkBox.isChecked(): pic_format = '.jpg' self.compress_video(fps=str(fps), time_duation=str(time_duation), start_number=start_number, ffmpeg_path="\"" + ffmpeg_path + "\"", input_path="\"" + jpg_path + "/%s" % (mov_name + ".%04d" + pic_format) + "\"", output_path="\"" + mov_path + "/%s" % (mov_name + ".mov") + "\"", sound="\"" + sound_path + "\"", jpg_path=jpg_path)
def rangeIsSelected(): return timeControl(melGlobals['gPlayBackSlider'], q=True, rv=True)
def selectedTime(dataType=float): ''' Helper for getting a time range selection. ''' return timeControl(melGlobals['gPlayBackSlider'], q=True, ra=True)
def convert2mov(img_input, mov_output, copyInfoFrom=''): ''' the input images should use %04d or #### for frame padding if copyInfoFrom presents, this function will try to copy comment from the presented mov ''' img_input = cf.osPathConvert(img_input) mov_output = cf.osPathConvert(mov_output) copyInfoFrom = cf.osPathConvert(copyInfoFrom) img_dir = os.path.dirname(img_input) img_name = os.path.basename(img_input) if not os.path.isdir(img_dir): print 'Invalid img_input: '+img_input return left_img = sorted(glob.glob(img_input.replace('%V','left').replace('%04d','*').replace('####','*'))) right_img = sorted(glob.glob(img_input.replace('%V','right').replace('%04d','*').replace('####','*'))) if not left_img or not right_img or len(left_img)!=len(right_img): print 'Failed to find left or right images, or the length of left is not equal to the right' return if not mov_output.endswith('.mov') and os.path.isdir(mov_output): mov_output = os.path.join( mov_output, '.'.join(img_name.split('.')[:4])+'.stereo.mov' ) start_frame = int(left_img[0].split('.')[-2]) end_frame = int(left_img[-1].split('.')[-2]) reelname = img_name.split('.')[0] rv = cf.getRVPath() info = {'comment':None, 'audio':None, 'isStereo':True, 'reelname':reelname, 'start_frame':start_frame} if copyInfoFrom and os.path.isfile(copyInfoFrom): tmp = get_comment_from_mov(copyInfoFrom, rv['rvls']) info['comment'] = tmp['comment'] if '.wav' in tmp['comment']: audio_path = [a.strip() for a in tmp['comment'].split(' ') if a.endswith('.wav')] if audio_path and os.path.isfile(audio_path): info['audio'] = audio_path[0] else: ma = str(pm.sceneName()) slider = pm.mel.eval('$tmpVar=$gPlayBackSlider') audios = [a for a in pm.ls(type='audio') if reelname in a.name()] audio_path = '' if audios: audio_node = audios[0] #slider = pm.mel.eval('$tmpVar=$gPlayBackSlider') pm.timeControl(slider, edit=True, displaySound=True, sound=audio_node) audio_path = cf.osPathConvert( audio_node.attr('filename').get() ) info['comment'] = str(start_frame)+'-'+str(end_frame)+' '+ma+' '+('' if not os.path.isfile(audio_path) else audio_path+' ')+str(start_frame) if os.path.isfile(audio_path): info['audio'] = audio_path cmd_str = '' if audio_path == '': cmd_str = '"' + rv['rvio'] + '" [ ' + img_input.replace('%V', 'left') + ' ' + img_input.replace('%V', 'right') + ' ] -outstereo -o ' + mov_output else: cmd_str = '"' + rv['rvio'] + '" [ ' + img_input.replace('%V', 'left') + ' ' + img_input.replace('%V', 'right') + ' ' +audio_path + ' ] -audiorate 48000 -outstereo -o ' + mov_output cmd_str += ' -outparams comment="%s" timecode=%s' % (info['comment'],str(info['start_frame'])) print 'Cmd:', cmd_str p = subprocess.Popen(cmd_str, shell=rv['rv_shell']) (out, err) = p.communicate()
def getSelectedRange(cls): playbackSlider = mel.eval("$tempVar = $gPlayBackSlider") selectedRange = mc.timeControl(playbackSlider, q=1, rangeArray=1) return selectedRange
def main(scene_path='', img_dir='', mov_dir='', shot_name='', pb_offscreen=True, \ cut_in=None, cut_out=None, enable_audio=True, img_only=False, play_on_finish=True, \ display_elements={'dynamics':True, 'nParticles':True}, img_compression='png', \ widthHeight=[2048,858], camera_grp='|cameras', clean_images=True, renderers='base_OpenGL_Renderer'): ''' scene_path should be a path to a maya file with .ma or .mb extension, or will be filled with current scene name automatically if not specified. this maya file name will be used to create mov name: my_scene.ma --> my_scene.stereo.mov Care must be taken that you need install rv player to convert from images to mov, if rv player is not available, you could disable converting by set img_only to True, but still generates images. if img_dir and mov_dir are absent, then the images and mov will be put under the subfolders named as images and data in the same path with the scene file ''' # get rv path if any rv = cf.getRVPath(use_old_version=False) rvio = rv['rvio'] opener = rv['opener'] player = rv['player'] rvls = rv['rvls'] rv_shell = rv['rv_shell'] if not os.path.isfile(rvio): pm.warning('Failed to find rvio path, the left and right images will not be converted to mov file.') # Get current maya file path file_path = pm.system.Path(scene_path) if scene_path else pm.sceneName() file_path = cf.osPathConvert(file_path) print 'file_path', file_path if not file_path: pm.confirmDialog(message ='Invalid scene name or scene path!') return # prepare to get proj and shot info tokens = file_path.split('/') file_name = tokens[-1] if not file_name.endswith('.ma') and not file_name.endswith('.mb'): pm.confirmDialog(message = file_name+': file name should be a maya scene file with .ma or .mb extension') return # prepare mov name mov_file_name = file_name[:-3] + '.stereo.mov' if renderers=='vp2Renderer': # if we use viewport2(vp2) render, the images contain alpha channel, # then we simply use imconvert to composite images instead of chrome keying in nuke # but imconvert can not recognize tif image, so we change the default tif to png img_compression = 'png' # get shot name if not shot_name: if 'shot' in tokens: i = tokens.index('shot') shot_name = tokens[i+2] proj_name = dept_name = '' img_dir = cf.osPathConvert(img_dir) if not img_dir or not os.path.isdir(img_dir): img_dir = os.path.dirname(file_path) + '/images/' + file_name + '/' img_dir = img_dir+'/' if not img_dir.endswith('/') else img_dir if not os.path.isdir(img_dir): try: os.makedirs(img_dir) except: pm.confirmDialog(message = 'Failed to create folder: '+img_dir) return else: # delete old images img_left_list = glob.glob(img_dir+file_name[:-3]+'.left.*.%s' % img_compression) img_right_list = glob.glob(img_dir+file_name[:-3]+'.right.*.%s' % img_compression) try: if clean_images: for img in img_left_list+img_right_list: if os.path.isfile(img): os.remove(img) except: pm.confirmDialog(message = 'Failed to remove old images at '+img_dir) cmd_str = '"' + opener + '" ' + img_dir os.system(cmd_str) return mov_dir = cf.osPathConvert(mov_dir) if not mov_dir or not os.path.isdir(mov_dir): mov_dir = os.path.dirname(file_path) + '/data/' mov_dir = mov_dir+'/' if not mov_dir.endswith('/') else mov_dir if not os.path.isdir(mov_dir): try: os.makedirs(mov_dir) except: pm.confirmDialog(message = 'Failed to create folder: '+mov_dir) return # get cut in and cut out if cut_in is None or cut_out is None: cut_in = pm.animation.playbackOptions(q=True, minTime=True) cut_out = pm.animation.playbackOptions(q=True, maxTime=True) print 'cut_in', cut_in print 'cut_out', cut_out # get audio file if any, we only grab the first audio track audios = [a for a in pm.ls(type='audio')] audio_path = '' if audios and enable_audio: audio_node = audios[0] slider = pm.mel.eval('$tmpVar=$gPlayBackSlider') pm.timeControl(slider, edit=True, displaySound=True, sound=audio_node) audio_path = cf.osPathConvert( audio_node.attr('filename').get() ) if not os.path.isfile(audio_path): audio_path = '' # get image width and height if widthHeight[0]==0 or widthHeight[1]==0: widthHeight = [pm.PyNode('defaultResolution').attr('width').get(), pm.PyNode('defaultResolution').attr('height').get()] # playblast! pb_stereo(img_dir+file_name[:-3], cut_in, cut_out, pb_offscreen=pb_offscreen, display_elements=display_elements, ext=img_compression, widthHeight=widthHeight, camera_grp=camera_grp, renderers=renderers) if img_only: print 'Playblasted left and right images at: ', img_dir return img_dir # prepare rv converting command string # get left and right image path, check the image sequence img_left = img_dir+file_name[:-3]+'.left.#.%s' % img_compression img_right = img_dir+file_name[:-3]+'.right.#.%s' % img_compression img_left_list = sorted( glob.glob(img_left.replace('#','*')) ) # glob doesn't recognize # symbol img_right_list = sorted( glob.glob(img_right.replace('#','*')) ) if not ( img_left_list and len(img_left_list)==len(img_right_list) ): pm.confirmDialog( title='Error', message='Failed to find images or the length of left image is not equal to the right: \n'+img_left+'\n'+img_right ) return cut_in_img = int(img_left_list[0].split('.')[-2]) cut_out_img = int(img_left_list[-1].split('.')[-2]) # rvio command if audio_path == '': cmd_str = '"' + rvio + '" [ ' + img_left + ' ' + img_right + ' ] -outstereo -o ' + mov_dir + mov_file_name else: cmd_str = '"' + rvio + '" [ ' + img_left + ' ' + img_right + ' ' +audio_path+' ] -audiorate 48000 -outstereo -o ' + mov_dir + mov_file_name cmd_str += ' -outfps 24' cmd_str += ' -outparams comment="%s-%s %s %s"' % (cut_in_img, cut_out_img, file_path, audio_path) # store custom contents in comment attribute cmd_str += ' timecode=%s' % cut_in_img # encode timecode if shot_name and rv['version']>=6: cmd_str += ' reelname=%s' % shot_name # encode reelname os.environ['RV_ENABLE_MIO_FFMPEG'] = '1' # RV_ENABLE_MIO_FFMPEG needs to be enable to encode comment attribute # execute rvio command print 'Cmd: \n', cmd_str p = subprocess.Popen(cmd_str, shell=rv_shell) (out, err) = p.communicate() print 'out', out print 'err', err print 'Done' cmd_str = '"'+player+ '" -stereo scanline ' + mov_dir + mov_file_name if play_on_finish: print 'Cmd:', cmd_str p = subprocess.Popen(cmd_str, shell=rv_shell) (out, err) = p.communicate() print 'out', out print 'err', err print 'Done' return