def do_log_channel_msgs(event, message): global db_cursor, statserv, my_channels #this will end up being called on every incoming message (or at least quite a few of them) so we kind of have to do some extra sanity checks if(len(message.parameters)<1): return #definitely not something we're interested in #we have to know what channels to log - we'll join anything that we're invited to if(message.command=="INVITE"): if(message.parameters[0].lower()==statserv.nick.lower()): Network.sendMsg(IRCMessage(":", statserv.nick, "JOIN", message.parameters[1])) my_channels[message.parameters[1].lower()]=True #using a dict here because key lookup in a dict is probably faster than using in return #ignore invite messages, even if they're not for us #messages targeted at a channel should always have a "#" character as the first character of their first parameter (the message's target) if(not message.parameters[0][0]=="#"): return #finally, if we're not supposed to be in a particular channel, we don't log messages for it if(not my_channels.has_key(message.parameters[0].lower())): return client=Client.findByNick(message.source) #this will not work if the service stamp contains something other than nickserv's group id for that nick cl_groupid=client.servicestamp if (client is not None and client.hasMode("r")) else None try: db_cursor.execute("insert into `ff_statserv_channels`(`time`, `channel`, `nick`, `nickserv_groupid`, `message_type`, `message`) values (%s, %s, %s, %s, %s, %s)", ( time.time(), message.parameters[0], message.source, cl_groupid, message.command, " ".join(message.parameters[1:]) #hopefully this won't f**k up )) except Exception as e: log.warning("Can't update channel stats: %s", str(e))
def screen_capture(self, local=True): # Import api modules import maya.OpenMaya as api import maya.OpenMayaUI as apiUI # Grab the last active 3d viewport view = apiUI.M3dView.active3dView() # Change the current time to get the view to refresh import time current_time = currentTime(query=True) time.sleep(0.25) currentTime(current_time+1, edit=True) time.sleep(0.25) currentTime(current_time, edit=True) # Read the color buffer from the view, and save the MImage to disk image = api.MImage() view.readColorBuffer(image, True) if local: file_path = "C:/Temp/%s.%s_%s.png" % (self.sequence.code, self.shot.code, self.shotfile.process) else: file_path = files.local_file(files.dm(self.movie.thumbnail)) # Make sure the directory is created first if not os.path.exists(os.path.dirname(file_path)): try: os.makedirs(os.path.dirname(file_path)) except: log.warning("Couldn't create directory %s" % os.path.dirname(file_path)) return None image.writeToFile(file_path, 'png') return file_path
def _get_qss_files(self, paths=[]): """ Get qss files. :param list path: ist of paths to add to the scan. :returns: dictionary of stylesheet names/filenames. :rtype: dict """ qss_files = dict() if not paths: return [] for path in paths: for fn in os.listdir(path): bn, fext = os.path.splitext(fn) if fext.lower() in ['.qss', '.css']: qss_file = os.path.join(path, fn) if qss_file not in qss_files.values(): style_name = self._parse_stylesheet_name(qss_file) if style_name is None: log.warning('cannot parse style name from "%s".' % qss_file) style_name = 'no-style' log.debug('adding stylesheet "%s" from "%s".' % (style_name, qss_file)) if style_name not in qss_files: qss_files[style_name] = qss_file return qss_files
def _get_config_files(self, paths=[]): """ Get config files. :param list path: ist of paths to add to the scan. :returns: dictionary of config names/filenames. :rtype: dict """ cfg_files = dict() if not paths: return [] for path in paths: for fn in os.listdir(path): bn, fext = os.path.splitext(fn) if fext.lower() in ['.ini', '.cfg']: cfg_file = os.path.join(path, fn) names = bn.split('-') if len(names) < 2: log.warning('improperly named config file: "%s"' % cfg_file) continue style_name, cfg_type = names if style_name not in cfg_files: cfg_files[style_name] = dict(fonts=None, palette=None) log.debug('adding %s config "%s" from "%s".' % (cfg_type, style_name, cfg_file)) cfg_files[style_name][cfg_type] = cfg_file return cfg_files
def __run_tests__(args, properties): """ Execute list of test of a json file """ debug_log(args, 'testing start') output = '' # output string saved in Report_* file passed = True # passed flag with open(args.json_path, 'r') as file: # open the json file and parse it tests = json.load(file) test_list = [] for test in tests: if 'frequency' in test: test_list.append(Test(test, args.json_path)) log.info(f'List of tests: {", ".join([x.name for x in test_list])}') count = 0 for test in test_list: # for each tests count += 1 print() # empty line here log.info(f"Test [{count}/{len(test_list)}]") log.info(f"saving json file for test `{test.name}`") __save_json__(test, args) # save raw json copy log.info(f"drawing graph for test `{test.name}`") __draw_graph__(test, properties, args) # make the graph image log.info(f"preparing test `{test.name}`") start = datetime.datetime.now().strftime(__DATE_FMT__) # stats output += f'{test.name} - {start}' if not test.compatible(TestScope.init(args.scope)): output += f' - {start} - SKIPPED\n' log.warning(f'test `{test.name}` skipped') else: log.info(f"Test `{test.name}`") log.info(f'-- Author: {test.author}') log.info(f'-- Description: {test.description}') result = __run_test__(test, args, properties) end = datetime.datetime.now().strftime(__DATE_FMT__) log.info(f"finish test `{test.name}`") passed = passed and (result == Result.PASSED) result_str = str(result) output += f' - {end} - {result_str}\n' if result == Result.PASSED: log.success(f"test `{test.name}` succeded") else: log.error(f"test `{test.name} failed") if not isinstance(TestScope.init(args.scope), TestScope): # copy output files __copy_output__(test, args, properties) json_name = os.path.split(args.json_path)[-1] report_path = os.path.join(args.report_dir, f'Report_{json_name[:-5]}.txt') with open(report_path, 'w') as file: file.write(output) print('final status: ', passed) debug_log(args, 'testing end') return passed
def do_lusers_update(): global db_cursor #log.debug(str(ffservices.stats)) try: db_cursor.execute( "insert into `ff_statserv_lusers`(`time`,`normal_users`,`invisible_users`,`servers`,`opers`,`channels`) values (UNIX_TIMESTAMP(), %s, %s, %s, %s, %s)", (ffservices.stats["normal"], ffservices.stats["invisible"], ffservices.stats["servers"], ffservices.stats["opers"], ffservices.stats["channels"])) except Exception as e: log.warning("Can't update LUSERS stats for statserv: %s", str(e))
def do_lusers_update(): global db_cursor #log.debug(str(ffservices.stats)) try: db_cursor.execute("insert into `ff_statserv_lusers`(`time`,`normal_users`,`invisible_users`,`servers`,`opers`,`channels`) values (UNIX_TIMESTAMP(), %s, %s, %s, %s, %s)", ( ffservices.stats["normal"], ffservices.stats["invisible"], ffservices.stats["servers"], ffservices.stats["opers"], ffservices.stats["channels"] )) except Exception as e: log.warning("Can't update LUSERS stats for statserv: %s", str(e))
def create_result_entry(self, job_id, test_id, test): """ Inserting a performance result entry for a givent test and given run. Parameters: ----------- - job_id: db job id - test_id: db test id - test: test object """ if not test.has_statistics(): log.warning(f'test `{test.name}` has no statistics') return query = 'SELECT * FROM results WHERE job=%s AND test=%s' res = self.execute(query, (job_id, test_id)) if not res: new_id = self.execute( 'SELECT (MAX(ID)+1) AS id FROM results;')[0]['id'] log.info(f'inserting results for test `{test.name}`') result = 1 if test.is_crashed(): result = 4 elif test.is_failed(): result = 3 elif test.is_skipped(): result = 2 query = f'''INSERT INTO results ( ID, test, job, result, start, duration, cpu_time, cpu_usage_avg, cpu_usage_max, memory_avg, memory_max, io_write, io_read, threads_avg, threads_max, raw_data, output ) VALUES ( {new_id}, {test_id}, {job_id}, {result}, '{test.start}', {test.duration}, {test.cpu_time}, {test.cpu_usage_avg}, {test.cpu_usage_max}, {test.memory_avg}, {test.memory_max}, {test.io_read}, {test.io_write}, {int(test.threads_avg)}, {test.threads_max}, %s, %s);''' self.execute(query, (test.raw_profile(), test.stdout))
def do_log_channel_msgs(event, message): global db_cursor, statserv, my_channels #this will end up being called on every incoming message (or at least quite a few of them) so we kind of have to do some extra sanity checks if (len(message.parameters) < 1): return #definitely not something we're interested in #we have to know what channels to log - we'll join anything that we're invited to if (message.command == "INVITE"): if (message.parameters[0].lower() == statserv.nick.lower()): Network.sendMsg( IRCMessage(":", statserv.nick, "JOIN", message.parameters[1])) my_channels[message.parameters[1].lower( )] = True #using a dict here because key lookup in a dict is probably faster than using in return #ignore invite messages, even if they're not for us #messages targeted at a channel should always have a "#" character as the first character of their first parameter (the message's target) if (not message.parameters[0][0] == "#"): return #finally, if we're not supposed to be in a particular channel, we don't log messages for it if (not my_channels.has_key(message.parameters[0].lower())): return client = Client.findByNick(message.source) #this will not work if the service stamp contains something other than nickserv's group id for that nick cl_groupid = client.servicestamp if (client is not None and client.hasMode("r")) else None try: db_cursor.execute( "insert into `ff_statserv_channels`(`time`, `channel`, `nick`, `nickserv_groupid`, `message_type`, `message`) values (%s, %s, %s, %s, %s, %s)", ( time.time(), message.parameters[0], message.source, cl_groupid, message.command, " ".join(message.parameters[1:]) #hopefully this won't f**k up )) except Exception as e: log.warning("Can't update channel stats: %s", str(e))
def _get_qss_paths(self, paths=[]): """ Read stylesheets from config paths. :param list paths: list of paths to add to the scan. :returns: array of search paths. :rtype: tuple """ if paths and type(paths) in [str, unicode]: paths = [ paths, ] qss_paths = () qss_paths = qss_paths + (options.SCENEGRAPH_STYLESHEET_PATH, ) # read external paths if 'SCENEGRAPH_STYLESHEET_PATH' in os.environ: qpaths = os.getenv('SCENEGRAPH_STYLESHEET_PATH').split(':') if paths: for p in paths: if p not in qpaths: qpaths.append(p) for path in qpaths: if path not in qss_paths: if not os.path.exists(path): log.warning( 'stylesheet path "%s" does not exist, skipping.' % path) continue log.debug('reading external stylesheet path: "%s".' % path) qss_paths = qss_paths + (path, ) return qss_paths
def _get_config_paths(self, paths=[]): """ Read configs from config paths. :param list paths: list of paths to add to the scan. :returns: array of search paths. :rtype: tuple """ if paths and type(paths) in [str, unicode]: paths = [ paths, ] cfg_paths = () cfg_paths = cfg_paths + (options.SCENEGRAPH_CONFIG_PATH, ) # read external paths if 'SCENEGRAPH_CONFIG_PATH' in os.environ: spaths = os.getenv('SCENEGRAPH_CONFIG_PATH').split(':') if paths: for p in paths: if p not in spaths: spaths.append(p) for path in spaths: if path not in cfg_paths: if not os.path.exists(path): log.warning( 'config path "%s" does not exist, skipping.' % path) continue log.debug('reading config external path: "%s".' % path) cfg_paths = cfg_paths + (path, ) return cfg_paths
def execute(self, percent=100, textured=False, dynamics_enabled=False, create_version=False, comment=None, batch=False): debug = 0 # First, make sure we're in GUI mode - can't playblast otherwise from pymel.core.general import about if about(batch=True): log.warning( "Can't perform playblast in batch mode - requires GUI to run") return # We're in GUI mode, continue with playblast log.info( "Performing playblast of shot %s" % self.shot) # Create version if we're playblasting to snapshot for approvals if create_version: log.info( "Publishing shot %s prior to playblast" % self.shot) if not debug: if comment or batch: self.shot.vault() else: from internal.publishing import gui as publish_gui published = publish_gui.publish_shot_gui(self.shot) if not published: log.warning( "Playblast of %s was cancelled" % self.shot) return False # Store a list of selected objects, to restore at the end # Deselect all objects, so that no wireframes show selected = ls(selection=True) select(deselect=True) # Set the start and end timerange appropriately self.set_timerange(create_version = create_version) # Construct the window to playblast through - this stuff is in the # core.gui.animation module from core.gui import animation as anim_gui playblast_window, model_editor = anim_gui.playblast_window( self.shot.process, self.width, self.height, textured=textured, dynamics_enabled=dynamics_enabled) # Need to set then reset the image format in the render globals - for some stupid reason, Autodesk # uses this setting for playblasts as of 2011 - didn't for the last 15 years. default_render_globals = ls('defaultRenderGlobals')[0] prev_image_format = None if default_render_globals: log.info("Setting render globals to IFF for playblast") prev_image_format = default_render_globals.imageFormat.get() default_render_globals.imageFormat.set(7) # 7 == IFF # Do the actual playblast - have to defer the evaluation of the # command, to give the window time to draw playblast_finished = playblast(format="iff", filename=self.local_pblast_name, viewer=False, showOrnaments=False, fp=4, percent=100, fo=True, quality=100) # Reset the render globals to what the user had it set to before if prev_image_format: log.info("Resetting render globals to user defined value: %s" % prev_image_format) default_render_globals.imageFormat.set(prev_image_format) if not playblast_finished: log.warning("User cancelled the playblast for %s - not saving to snapshot" % self.shot) if selected: select(selected) return if create_version: # Publish the movie file to snapshot self.encode() self.publish() # Delete the playblast window now that we're done with it - # use deferred to ensure that the playblast is done before # deleting the window evalDeferred('from pymel.core import deleteUI; deleteUI("%s")' % playblast_window) # Restore selection if selected: select(selected) # Run RV on the resulting images - if we're in batch mode, # skip this, and if we're not creating a version in the DB, # then it's a local playblast - run RV on the local images # instead of on the movie in snapshot if not batch: if create_version: self.playback(movie=True) else: self.playback()
def parse(self, filename): """ Parses a single template file. Data is structured into groups of attributes (ie: 'Transform', 'Attributes') :param str filename: file on disk to read. :returns: dictionary of metadata parameters. :rtype: dict """ if self._initialized: self.initialize() log.debug('reading metadata file: "%s"' % filename) data = dict() if filename is not None: if os.path.exists(filename): parent = data attr_name = None for line in open(filename, 'r'): #remove newlines line = line.rstrip('\n') rline = line.lstrip(' ') rline = rline.rstrip() if not rline.startswith("#") and not rline.startswith( ';') and rline.strip() != "": # parse sections # remove leading spaces # section/attribute header match if re.match(regex.get("section"), rline): section_obj = re.search(regex.get("section_value"), rline) if section_obj: section_type = section_obj.group('attr') section_value = section_obj.group('value') # parse groups if section_type == 'group': if section_value not in parent: parent = data group_data = dict() # set the current parent parent[section_value] = group_data parent = parent[section_value] #print '\nGroup: "%s"' % section_value if section_type == 'attr': attr_data = dict() # connection attributes #attr_data.update(connectable=False) #attr_data.update(connection_type=None) parent[section_value] = attr_data attr_name = section_value #print ' Attribute: "%s"' % attr_name if section_type in ['input', 'output']: conn_data = dict() conn_data.update(connectable=True) conn_data.update( connection_type=section_type) parent[section_value] = conn_data attr_name = section_value #print ' Connection: "%s"' % attr_name else: prop_obj = re.search(regex.get("properties"), rline) if prop_obj: pname = prop_obj.group('name') ptype = prop_obj.group('type') pvalu = prop_obj.group('value') #print 'property: "%s" (%s)' % (pname, rline) value = pvalu if ptype in ['BOOL', 'INPUT', 'OUTPUT']: if ptype == 'BOOL': value = True if pvalu == 'true' else False # return connection types if ptype in ['INPUT', 'OUTPUT']: # data type: pvalu = FILE, DIRECTORY, ETC. value = pvalu.lower() # try and get the actual value else: try: value = eval(pvalu) except: log.warning( 'cannot parse default value of "%s.%s": "%s" (%s)' % (attr_name, pname, pvalu, filename)) #print ' property: %s (%s)' % (prop_obj.group('name'), attr_name) properties = { pname: { 'type': ptype, 'value': value } } parent[attr_name].initialize(properties) else: if rline: log.debug('skipping: "%s"' % rline) return data
def read_metadata(self, verbose=False): """ Initialize node metadata from metadata files on disk. Metadata is parsed by looking at the __bases__ of each node class (ie: all DagNode subclasses will inherit all of the default DagNode attributes). """ import inspect parser = MetadataParser() node_metadata = dict() if verbose: print('\n# DEBUG: building metadata for: "%s" ' % self.Class()) # query the base classes result = [ self.__class__, ] for pc in self.ParentClasses(): if pc.__name__ != 'Node': result.append(pc) sg_core_path = os.path.join(SCENEGRAPH_CORE, 'nodes.py') for cls in reversed(result): cname = cls.__name__ src_file = inspect.getfile(cls) node_type = None if hasattr(cls, 'node_type'): node_type = cls.node_type py_src = src_file.rstrip('c') if verbose: print(' - base class "%s" source file: "%s"' % (cname, py_src)) dirname = os.path.dirname(src_file) basename = os.path.splitext(os.path.basename(src_file))[0] # return the source .py file if it exists if os.path.exists(py_src): src_file = py_src metadata_filename = os.path.join(dirname, '%s.mtd' % basename) # look for code node metadata in ../mtd if py_src == sg_core_path: if node_type: metadata_filename = os.path.join(SCENEGRAPH_METADATA_PATH, '%s.mtd' % node_type) if verbose: print('- metadata file for "{0}": "{1}"' % (cname, metadata_filename)) if not os.path.exists(metadata_filename): if not verbose: log.warning( 'plugin description file "{0}" does not exist.'.format( metadata_filename)) else: print('WARNING: metadata file for "{0}": "{1}" not found'. format(cname, metadata_filename)) continue log.debug('reading plugin metadata file: "%s".' % metadata_filename) # parse the metadata parsed = parser.parse(metadata_filename) for section in parsed: if section not in node_metadata: node_metadata[section] = dict() attributes = parsed.get(section) # parse out input/output here? for attr in attributes: if attr not in node_metadata[section]: node_metadata.get(section)[attr] = dict() attr_properties = attributes.get(attr) node_metadata.get(section).get(attr).initialize( attr_properties) return node_metadata