def test_12_user_agent_is_sys_version(self): from sys import version as sys_version self.assertEqual( self.ga.user_agent, sys_version.replace('\n', ''), '"{}" should be "{}"'.format(self.ga.user_agent, sys_version), )
def create_versions_message(): return "\n".join([ "Timeline version: %s" % get_full_version(), "System version: %s" % ", ".join(platform.uname()), "Python version: %s" % python_version.replace("\n", ""), "wxPython version: %s" % wx.version(), ])
def print_picrust_config(): system_info = [ ("Platform", platform), ("Python/GCC version",python_version.replace('\n', ' ')), ("Python executable",executable)] max_len = max([len(e[0]) for e in system_info]) print "\nSystem information" print "==================" for v in system_info: print "%*s:\t%s" % (max_len,v[0],v[1]) version_info = [ ("NumPy version", numpy_lib_version), ("biom-format version", biom_lib_version), ("PyCogent version", pycogent_lib_version), ("PICRUSt version", picrust_lib_version), ("PICRUSt script version", get_script_version()),] max_len = max([len(e[0]) for e in version_info]) print "\nDependency versions" print "===================" for v in version_info: print "%*s:\t%s" % (max_len,v[0],v[1]) print ""
def print_biom_config(): system_info = [ ("Platform", platform), ("Python/GCC version",python_version.replace('\n', ' ')), ("Python executable",executable)] max_len = max([len(e[0]) for e in system_info]) print "\nSystem information" print "==================" for v in system_info: print "%*s:\t%s" % (max_len,v[0],v[1]) version_info = [ ("NumPy version", numpy_lib_version), ("biom-format library version", biom_lib_version), ("biom-format script version", get_script_version()),] max_len = max([len(e[0]) for e in version_info]) print "\nDependency versions" print "===================" for v in version_info: print "%*s:\t%s" % (max_len,v[0],v[1]) package_info = [ ("SparseObj type", SparseObj) ] max_len = max([len(e[0]) for e in package_info]) print "\nbiom-format package information" print "===============================" for v in package_info: print "%*s:\t%s" % (max_len,v[0],v[1]) print ""
def version_long(): """Return string to be printed with --version-long""" def package_versions(): """Return the package versions.""" from .version import package_version required_packages = self.version_long_packages() found_packages = [] for package_name in sorted(required_packages): package_version( package_name, found_packages, ) return [{key: value} for key, value in found_packages] from sys import version as py_version return { "version": self._version, "python": { "version": py_version.replace("\n", ""), "packages": package_versions(), }, }
def create_versions_message(): return "\n".join([ "Timeline version: %s" % get_version(), "System version: %s" % ", ".join(platform.uname()), "Python version: %s" % python_version.replace("\n", ""), "wxPython version: %s" % wx.version(), ])
def print_picrust_config(): system_info = [("Platform", platform), ("Python/GCC version", python_version.replace('\n', ' ')), ("Python executable", executable)] max_len = max([len(e[0]) for e in system_info]) print "\nSystem information" print "==================" for v in system_info: print "%*s:\t%s" % (max_len, v[0], v[1]) version_info = [ ("NumPy version", numpy_lib_version), ("biom-format version", biom_lib_version), ("PyCogent version", pycogent_lib_version), ("PICRUSt version", picrust_lib_version), ("PICRUSt script version", get_script_version()), ] max_len = max([len(e[0]) for e in version_info]) print "\nDependency versions" print "===================" for v in version_info: print "%*s:\t%s" % (max_len, v[0], v[1]) print ""
def index(): from flask_bootstrap import __version__ as FLASK_BOOTSTRAP_VERSION from platform import version as platform_version from sys import version as sys_version version_info = { 'flask_bootstrap_version': FLASK_BOOTSTRAP_VERSION, 'platform_version': platform_version(), 'sys_version': sys_version.replace('[', '').replace(']', '') } return render_template('index.html', version_info=version_info)
def main(): option_parser, opts, args = parse_command_line_parameters(**script_info) qiime_config = load_qiime_config() rdp_jarpath = get_rdp_jarpath() if rdp_jarpath == None: rdp_version = "Not installed." else: rdp_version = split(rdp_jarpath)[1] java_version = get_java_version() if java_version is None: java_version = "Not installed." system_info = [ ("Platform", platform), ("Python version",python_version.replace('\n', ' ')), ("Python executable",executable)] max_len = max([len(e[0]) for e in system_info]) print "\nSystem information" print "==================" for v in system_info: print "%*s:\t%s" % (max_len,v[0],v[1]) version_info = [ ("PyCogent version", pycogent_lib_version), ("NumPy version", numpy_lib_version), ("matplotlib version", matplotlib_lib_version), ("biom-format version", biom_lib_version), ("qcli version", qcli_lib_version), ("QIIME library version", get_qiime_library_version()), ("QIIME script version", __version__), ("PyNAST version (if installed)", pynast_lib_version), ("RDP Classifier version (if installed)", rdp_version), ("Java version (if installed)", java_version), ("Emperor version", emperor_lib_version)] max_len = max([len(e[0]) for e in version_info]) print "\nDependency versions" print "===================" for v in version_info: print "%*s:\t%s" % (max_len,v[0],v[1]) print "\nQIIME config values" print "===================" max_len = max([len(key) for key in qiime_config]) for key,value in qiime_config.items(): print "%*s:\t%s"%(max_len,key,value) #run the Testcase.main function to do the tests # need to mess with the arg string, otherwise TestCase complains if (opts.test): print "\n\nrunning checks:\n" test_main(argv=["","-v"])
def get_host_specs() -> dict: from os import cpu_count from platform import processor, uname from psutil import virtual_memory from sys import version return { 'host': uname()[1], 'cpu': processor(), 'cores': cpu_count(), 'memory': virtual_memory().total // (1000**2), 'python': version.replace('\n', ' ') }
def get_python_version() -> str: environment = Config.detect_environment() if environment == SHELL: environment = "shell" elif environment == IPYTHON: import IPython environment = "IPython " + IPython.__version__ + ", shell" elif environment == NOTEBOOK: import IPython environment = "IPython " + IPython.__version__ + ", notebook" else: environment = "unknown!" return "Python " + version.replace('\n', '') + " (" + environment + ")"
def generate_export_codes(pickle_file_name, model, filename, target_name, random_state=42): """Generate all library import calls for use in stand alone python scripts. Parameters ---------- pickle_file_name: string Pickle file name for a fitted scikit-learn estimator model: scikit-learn estimator A fitted scikit-learn model filename: string File name of input dataset target_name: string Target name in input data random_state: int Random seed in model Returns ------- pipeline_text: String The Python scripts for applying the current optimized pipeline in stand-alone python environment """ pipeline_text = """# Python version: {python_version} # Results were generated with numpy v{numpy_version}, pandas v{pandas_version} and scikit-learn v{skl_version} # random seed = {random_state} # Training dataset filename = {dataset} # Pickle filename = {pickle_file_name} # Model in the pickle file: {model} import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.externals import joblib from sklearn.utils import check_X_y from sklearn.metrics import make_scorer # NOTE: Edit variables below with appropriate values # path to your pickle file, below is the downloaded pickle file pickle_file = '{pickle_file_name}' # file path to the dataset dataset = '{dataset}' # target column name target_column = '{target_name}' # seed to be used for train_test_split (default in PennAI is 42) seed = {random_state} # Balanced accuracy below was described in [Urbanowicz2015]: the average of sensitivity and specificity is computed for each class and then averaged over total number of classes. # It is NOT the same as sklearn.metrics.balanced_accuracy_score, which is defined as the average of recall obtained on each class. def balanced_accuracy(y_true, y_pred): all_classes = list(set(np.append(y_true, y_pred))) all_class_accuracies = [] for this_class in all_classes: this_class_sensitivity = 0. this_class_specificity = 0. if sum(y_true == this_class) != 0: this_class_sensitivity = \\ float(sum((y_pred == this_class) & (y_true == this_class))) /\\ float(sum((y_true == this_class))) this_class_specificity = \\ float(sum((y_pred != this_class) & (y_true != this_class))) /\\ float(sum((y_true != this_class))) this_class_accuracy = (this_class_sensitivity + this_class_specificity) / 2. all_class_accuracies.append(this_class_accuracy) return np.mean(all_class_accuracies) # load fitted model pickle_model = joblib.load(pickle_file) model = pickle_model['model'] # read input data input_data = pd.read_csv(dataset, sep=None, engine='python') # Application 1: reproducing training score and testing score from PennAI features = input_data.drop(target_column, axis=1).values target = input_data[target_column].values # Checking dataset features, target = check_X_y(features, target, dtype=None, order="C", force_all_finite=True) training_features, testing_features, training_classes, testing_classes = \\ train_test_split(features, target, random_state=seed, stratify=input_data[target_column]) scorer = make_scorer(balanced_accuracy) train_score = scorer(model, training_features, training_classes) print("Training score: ", train_score) test_score = scorer(model, testing_features, testing_classes) print("Testing score: ", test_score) # Application 2: cross validation of fitted model testing_features = input_data.drop(target_column, axis=1).values testing_target = input_data[target_column].values # Get holdout score for fitted model print("Holdout score: ", end="") print(model.score(testing_features, testing_target)) # Application 3: predict outcome by fitted model # In this application, the input dataset may not include target column input_data.drop(target_column, axis=1, inplace=True) # Please comment this line if there is no target column in input dataset predict_target = model.predict(input_data.values) """.format(python_version=version.replace('\n', ''), numpy_version=np.__version__, pandas_version=pd.__version__, skl_version=skl_version, dataset=",".join(filename), target_name=target_name, pickle_file_name=pickle_file_name, random_state=random_state, model=str(model).replace('\n', '\n#')) return pipeline_text
def main(): option_parser, opts, args = parse_command_line_parameters(**script_info) if opts.haiku: print "QIIME provides insight\nmicrobial in nature\nto ecology" exit(0) qiime_config = load_qiime_config() test = opts.test qiime_full_install = opts.qiime_full_install rdp_jarpath = get_rdp_jarpath() if rdp_jarpath is None: rdp_version = "Not installed." else: rdp_version = split(rdp_jarpath)[1] java_version = get_java_version() if java_version is None: java_version = "Not installed." system_info = [("Platform", platform), ("Python version", python_version.replace('\n', ' ')), ("Python executable", executable)] max_len = max([len(e[0]) for e in system_info]) print "\nSystem information" print "==================" for v in system_info: print "%*s:\t%s" % (max_len, v[0], v[1]) print "\nQIIME default reference information" print "===================================" print "For details on what files are used as QIIME's default references, see here:" print " https://github.com/biocore/qiime-default-reference/releases/tag/%s" % qdr_lib_version version_info = [("QIIME library version", get_qiime_library_version()), ("QIIME script version", __version__), ("qiime-default-reference version", qdr_lib_version), ("NumPy version", numpy_lib_version), ("SciPy version", scipy_lib_version), ("pandas version", pandas_lib_version), ("matplotlib version", matplotlib_lib_version), ("biom-format version", biom_lib_version), ("h5py version", h5py_lib_version), ("qcli version", qcli_lib_version), ("pyqi version", pyqi_lib_version), ("scikit-bio version", skbio_lib_version), ("PyNAST version", pynast_lib_version), ("Emperor version", emperor_lib_version), ("burrito version", burrito_lib_version), ("burrito-fillings version", bfillings_lib_version), ("sortmerna version", sortmerna_lib_version), ("sumaclust version", sumaclust_lib_version), ("swarm version", swarm_lib_version), ("gdata", gdata_installed)] if qiime_full_install: version_info += [("RDP Classifier version (if installed)", rdp_version), ("Java version (if installed)", java_version)] max_len = max([len(e[0]) for e in version_info]) print "\nDependency versions" print "===================" for v in version_info: print "%*s:\t%s" % (max_len, v[0], v[1]) print "\nQIIME config values" print "===================" print "For definitions of these settings and to learn how to configure QIIME, see here:" print " http://qiime.org/install/qiime_config.html" print " http://qiime.org/tutorials/parallel_qiime.html\n" max_len = max([len(key) for key in qiime_config]) for key, value in qiime_config.items(): print "%*s:\t%s" % (max_len, key, value) if test: if qiime_full_install: print "\nQIIME full install test results" print "===============================" suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyFull) else: print "\nQIIME base install test results" print "===============================" suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyBase) if opts.verbose: verbosity = 2 else: verbosity = 1 TextTestRunner(stream=stdout, verbosity=verbosity).run(suite)
def _get_python_version(self): return python_version.replace("\n", "")
'snmpOutPkts', 'snmpInBadVersions', 'snmpInBadCommunityNames', 'snmpInBadCommunityUses', 'snmpInASNParseErrs', 'snmpInTooBigs', 'snmpInNoSuchNames', 'snmpInBadValues', 'snmpInReadOnlys', 'snmpInGenErrs', 'snmpInTotalReqVars', 'snmpInTotalSetVars', 'snmpInGetRequests', 'snmpInGetNexts', 'snmpInSetRequests', 'snmpInGetResponses', 'snmpInTraps', 'snmpOutTooBigs', 'snmpOutNoSuchNames', 'snmpOutBadValues', 'snmpOutGenErrs', 'snmpOutSetRequests', 'snmpOutGetResponses', 'snmpOutTraps', 'snmpEnableAuthenTraps', 'snmpSilentDrops', 'snmpProxyDrops', 'snmpTrapOID', 'coldStart', 'snmpSetSerialNo') __sysDescr = MibScalarInstance( sysDescr.name, (0, ), sysDescr.syntax.clone( "PySNMP engine version %s, Python %s" % (__version__, version.replace('\n', ' ').replace('\r', ' ')))) __sysObjectID = MibScalarInstance( sysObjectID.name, (0, ), sysObjectID.syntax.clone( (1, 3, 6, 1, 4, 1, 20408))) class SysUpTime(TimeTicks): createdAt = time() def clone(self, **kwargs): if 'value' not in kwargs: kwargs['value'] = int((time() - self.createdAt) * 100) return TimeTicks.clone(self, **kwargs) __sysUpTime = MibScalarInstance(sysUpTime.name, (0, ), SysUpTime(0))
#!usr/bin/env python # coding=utf8 from __future__ import print_function from sys import version from platform import uname print("Hello world! From a Python agnostic binary.") print("Python version:", version.replace("\n", "")) print("OS version:", " ".join(uname()))
def main(): option_parser, opts, args = parse_command_line_parameters(**script_info) if opts.haiku: print "QIIME provides insight\nmicrobial in nature\nto ecology" exit(0) qiime_config = load_qiime_config() test = opts.test qiime_full_install = opts.qiime_full_install rdp_jarpath = get_rdp_jarpath() if rdp_jarpath is None: rdp_version = "Not installed." else: rdp_version = split(rdp_jarpath)[1] java_version = get_java_version() if java_version is None: java_version = "Not installed." system_info = [ ("Platform", platform), ("Python version", python_version.replace('\n', ' ')), ("Python executable", executable)] max_len = max([len(e[0]) for e in system_info]) print "\nSystem information" print "==================" for v in system_info: print "%*s:\t%s" % (max_len, v[0], v[1]) print "\nQIIME default reference information" print "===================================" print "For details on what files are used as QIIME's default references, see here:" print " https://github.com/biocore/qiime-default-reference/releases/tag/%s" % qdr_lib_version version_info = [ ("QIIME library version", get_qiime_library_version()), ("QIIME script version", __version__), ("qiime-default-reference version", qdr_lib_version), ("NumPy version", numpy_lib_version), ("SciPy version", scipy_lib_version), ("pandas version", pandas_lib_version), ("matplotlib version", matplotlib_lib_version), ("biom-format version", biom_lib_version), ("h5py version", h5py_lib_version), ("qcli version", qcli_lib_version), ("pyqi version", pyqi_lib_version), ("scikit-bio version", skbio_lib_version), ("PyNAST version", pynast_lib_version), ("Emperor version", emperor_lib_version), ("burrito version", burrito_lib_version), ("burrito-fillings version", bfillings_lib_version), ("sortmerna version", sortmerna_lib_version), ("sumaclust version", sumaclust_lib_version), ("swarm version", swarm_lib_version), ("gdata", gdata_installed) ] if qiime_full_install: version_info += [ ("RDP Classifier version (if installed)", rdp_version), ("Java version (if installed)", java_version)] max_len = max([len(e[0]) for e in version_info]) print "\nDependency versions" print "===================" for v in version_info: print "%*s:\t%s" % (max_len, v[0], v[1]) print "\nQIIME config values" print "===================" print "For definitions of these settings and to learn how to configure QIIME, see here:" print " http://qiime.org/install/qiime_config.html" print " http://qiime.org/tutorials/parallel_qiime.html\n" max_len = max([len(key) for key in qiime_config]) for key, value in qiime_config.items(): print "%*s:\t%s" % (max_len, key, value) if test: if qiime_full_install: print "\nQIIME full install test results" print "===============================" suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyFull) else: print "\nQIIME base install test results" print "===============================" suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyBase) if opts.verbose: verbosity = 2 else: verbosity = 1 TextTestRunner(stream=stdout, verbosity=verbosity).run(suite)
class GoogleAnalytics(object): """GA tracker object for preparing and sending data to GA's endpoint.""" tracker_type = 'web' # app or web debug = False logger = None app_name = None app_id = None app_version = None app_installer_id = None client_id = None custom_dimensions = {} custom_metrics = {} data_source = 'python' document_encoding = None hostname = None ip_address = None page = None property_id = None screen_name = None user_agent = sys_version.replace('\n', '') user_id = None user_language = None version = 1 # Configuration def __init__( self, property_id, client_id=None, user_id=None, document_encoding=None, ip_address=None, user_language=None, debug=False, logger=None, ): """Create a new tracker object with base properties. Params: property_id (str): Tracking ID / web property ID. client_id (str): (optional) Anonymous ID of a user, device, or browser instance. user_id (str): Known ID of the user. document_encoding (str): (optional) Encoding character set of the page/document. ip_address (str): (optional) IPv4 address of the user. user_language (str): (optional) ISO 639-1 language of the user. debug (bool): (optional) Whether to send debugging hits. Default: False. Raises: ValueError if debug is not a boolean. """ self.property_id = property_id self.user_id = user_id self.client_id = self.__client_id(client_id) self.document_encoding = document_encoding self.ip_address = ip_address self.user_language = user_language if debug is not None and not isinstance(debug, bool): raise ValueError('debug should be a boolean.') else: self.debug = debug if debug: # create a logger for logging the debugging messages later self.logger = logger or logging.getLogger(__name__) def __client_id(self, client_id): """Set the Client ID from a preset client_id or a new one.""" if not client_id: # no preset client_id, create one. if self.user_id: # use the User ID as the basis for the Client ID. from hashlib import sha1 sha1_hash = sha1() sha1_hash.update(self.user_id) client_id = sha1_hash.hexdigest() else: # create a new Client ID. # the format is similar to the one created by analytics.js. unique_id = self.__random() from time import time timestamp = int(time()) client_id = '{}.{}'.format(unique_id, timestamp) return client_id # Utilities def __cache_buster(self): return self.__random() def __is_number(self, value): return isinstance(value, (float, int)) def __random(self): return int(random_random() * 10**8) # One-time setup def __set_app_parameters( self, app_name, app_id=None, app_version=None, app_installer_id=None, ): """Set the base app properties. Params: app_name (str): Name of the application. app_id (str): (optional) ID of the application. app_version (str): (optional) Version of the application. app_installer_id (str): (optional) Installer ID of the application. """ if app_name is not None: self.tracker_type = 'app' self.app_name = app_name self.app_id = app_id self.app_version = app_version self.app_installer_id = app_installer_id def __set_custom_definitions(self, def_type, dictionary): """Set the base Custom Definitions (Dimensions or Metrics). Params: def_type (str): 'dimensions' or 'metrics'. dictionary (dict): Indices and values. Refer to the specification for custom_dimensions and custom_metrics. Raises: ValueError if def_type is not 'dimensions' or 'metrics'. ValueError if dictionary is not a dict. ValueError if a metric value is not an integer or float. """ if def_type not in ['dimensions', 'metrics']: raise ValueError( 'Unrecognised custom definition: {}.'.format(def_type)) if dictionary is None: return if not isinstance(dictionary, dict): raise ValueError('Expected custom_{} as a dict.'.format(def_type)) if def_type == 'dimensions': custom_definitions = self.custom_dimensions elif def_type == 'metrics': custom_definitions = self.custom_metrics for index, value in dictionary.items(): if def_type == 'metrics' and not self.__is_number(value): raise ValueError( '"{}" custom_metric should be a number.'.format(value)) if def_type == 'dimensions': key_prefix = 'cd' elif def_type == 'metrics': key_prefix = 'cm' key = '{}{}'.format(key_prefix, index) if value is None: if key in custom_definitions: custom_definitions.pop(key, None) else: custom_definitions[key] = value return def __set_custom_dimensions( self, custom_dimensions, ): """Set base Custom Dimension-related properties. Params: custom_dimensions (dict): Custom Dimension indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 'foo', '3': 'bar' } """ self.__set_custom_definitions('dimensions', custom_dimensions) def __set_custom_metrics( self, custom_metrics, ): """Set base Custom Metric-related properties. Params: custom_metrics (dict): Custom Metric indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 10, '4': 5.6 } """ self.__set_custom_definitions('metrics', custom_metrics) def __set_user_id( self, user_id=None, ): """Set base User ID.""" self.user_id = user_id def set( self, user_id=None, custom_dimensions=None, custom_metrics=None, app_name=None, app_id=None, app_version=None, app_installer_id=None, ): """Set the base properties for all hits. All parameters are optional. Params: user_id (str): Known ID of the user. custom_dimensions (dict): Custom Dimension indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 'foo', '3': 'bar' } custom_metrics (dict): Custom Metric indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 10, '4': 5.6 } app_name (str): Name of the application. app_id (str): ID of the application. app_version (str): Version of the application. app_installer_id (str): Installer ID of the application. """ self.__set_custom_dimensions(custom_dimensions) self.__set_custom_metrics(custom_metrics) self.__set_user_id(user_id) self.__set_app_parameters(app_name, app_id, app_version, app_installer_id) # Sending hits def __get_base_payload(self): """Get the base payload for all hits.""" cache_buster = self.__cache_buster() payload = { 'cid': self.client_id, 'de': self.document_encoding, 'ds': self.data_source, 'tid': self.property_id, 'ua': self.user_agent, 'uid': self.user_id, 'uip': self.ip_address, 'ul': self.user_language, 'v': self.version, 'z': cache_buster, } if self.tracker_type == 'web': payload['dh'] = self.hostname payload['dp'] = self.page elif self.tracker_type == 'app': payload['an'] = self.app_name payload['aid'] = self.app_id payload['av'] = self.app_version payload['aiid'] = self.app_installer_id payload['cd'] = self.screen_name return payload def __get_content_groups( self, content_groups, ): """Get the payload for Content Groups. Params: content_groups (list): Content groups. Syntax: [ group, group, ... ] Example: [ 'foo', 'bar' ] Returns: (dict): Payload with Content Group properties. Raises: ValueError if content_groups is not a list. """ payload = {} if content_groups is not None: if not isinstance(content_groups, list): raise ValueError('Expected content_groups as a list.') for i, content_group in enumerate(content_groups): key = 'cg{}'.format(i + 1) payload[key] = content_group return payload def __get_custom_definitions(self, def_type, dictionary): """Get the payload for Custom Definitions (Dimensions or Metrics). Merges the values of dictionary with the base custom_dimensions or custom_metrics dictionaries. Params: def_type (str): 'dimensions' or 'metrics'. dictionary (dict): Indices and values. Refer to the specification for custom_dimensions and custom_metrics. Returns: (dict): Payload with Custom Definition properties. Raises: ValueError if def_type is not 'dimensions' or 'metrics'. ValueError if dictionary is not a dict. ValueError if a metric value is not an integer or float. """ if def_type not in ['dimensions', 'metrics']: raise ValueError( 'Unrecognised custom definition: {}.'.format(def_type)) payload = {} if def_type == 'dimensions': payload.update(self.custom_dimensions) elif def_type == 'metrics': payload.update(self.custom_metrics) if dictionary is not None: if not isinstance(dictionary, dict): raise ValueError( 'Expected custom_{} as a dict.'.format(def_type)) for index, value in dictionary.items(): if def_type == 'metrics' and not self.__is_number(value): raise ValueError( '"{}" custom_metric should be a number.'.format(value)) if def_type == 'dimensions': key_prefix = 'cd' elif def_type == 'metrics': key_prefix = 'cm' key = '{}{}'.format(key_prefix, index) if value is None: if key in payload: payload.pop(key, None) else: payload[key] = value return payload def __get_custom_dimensions(self, custom_dimensions): """Get the payload for Custom Dimensions. Params: custom_dimensions (dict): Custom Dimension indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 'foo', '3': 'bar' } Returns: (dict): Payload with Custom Dimension properties. """ payload = self.__get_custom_definitions( 'dimensions', custom_dimensions, ) return payload def __get_custom_metrics(self, custom_metrics): """Get the payload for Custom Metrics. Params: custom_metrics (dict): Custom Metric indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 10, '4': 5.6 } Returns: (dict): Payload with Custom Metric properties. """ payload = self.__get_custom_definitions( 'metrics', custom_metrics, ) return payload def __get_user_id(self): """Get the payload for User ID.""" payload = {'uid': self.user_id} return payload def __send_hit( self, hit_type, hit_payload, custom_dimensions=None, custom_metrics=None, content_groups=None, ): """Send a hit to the GA collection or validation server. Params: hit_type (str): Type of hit. Refer to HIT_TYPES. hit_payload (dict): Payload of properties to send with the hit. custom_dimensions (dict): (optional) Custom Dimension indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 'foo', '3': 'bar' } custom_metrics (dict): (optional) Custom Metric indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 10, '4': 5.6 } content_groups (list): Content groups. Syntax: [ group, group, ... ] Example: [ 'foo', 'bar' ] Raises: ValueError if hit_type is not found in HIT_TYPES. """ if hit_type not in HIT_TYPES: raise ValueError('Invalid hit_type: {}.'.format(hit_type)) payload = self.__get_base_payload() payload['t'] = hit_type payload.update(hit_payload) custom_dimensions_payload = self.__get_custom_dimensions( custom_dimensions) payload.update(custom_dimensions_payload) custom_metrics_payload = self.__get_custom_metrics(custom_metrics) payload.update(custom_metrics_payload) if hit_type in ['pageview', 'screenview']: content_groups_payload = self.__get_content_groups(content_groups) payload.update(content_groups_payload) # rebuild payload without None values data = {} for key, value in payload.items(): if value is not None: data[key] = value endpoint = GA_DEBUG_ENDPOINT if self.debug else GA_ENDPOINT req = requests.post(endpoint, data=data) if self.debug: response = req.json() self.__handle_debug_response(response['hitParsingResult'][0]) # Public methods for sending hits. # Each method corresponds to a hit type. def send_event( self, event_category, event_action, event_label=None, event_value=None, non_interaction=False, custom_dimensions=None, custom_metrics=None, ): """Send an Event hit. Params: event_category (str): Category of the event. event_action (str): Action of the event. event_label (str): (optional) Label of the event. event_value (int): (optional) Value of the event. non_interaction (bool): (optional) Whether this is non-interactive. Default: False. custom_dimensions (dict): (optional) Custom Dimension indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 'foo', '3': 'bar' } custom_metrics (dict): (optional) Custom Metric indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 10, '4': 5.6 } Raises: ValueError if event_category is None. ValueError if event_action is None. ValueError if event_value is not an integer. ValueError if non_interaction is not a boolean. """ if not event_category: raise ValueError('Missing event_category when sending event hit.') if not event_action: raise ValueError('Missing event_action when sending event hit.') if event_value and not isinstance(event_value, int): raise ValueError( 'event_value should be an integer when sending event hit.') if non_interaction and not isinstance(non_interaction, bool): raise ValueError( 'non_interaction should be a boolean when sending event hit.') hit_payload = { 'ec': event_category, 'ea': event_action, 'el': event_label, 'ev': event_value, 'ni': int(non_interaction), } self.__send_hit( 'event', hit_payload, custom_dimensions, custom_metrics, ) def send_exception( self, ex_description, ex_fatal=False, custom_dimensions=None, custom_metrics=None, ): """Send an Exception hit. Params: ex_description (str): Description of the exception. ex_fatal (bool): (optional) Whether the exception is fatal. Default: False. custom_dimensions (dict): (optional) Custom Dimension indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 'foo', '3': 'bar' } custom_metrics (dict): (optional) Custom Metric indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 10, '4': 5.6 } Raises: ValueError if ex_description is None. ValueError if ex_fatal is not a boolean. """ if not ex_description: raise ValueError( 'Missing ex_description when sending exception hit.') if ex_fatal and not isinstance(ex_fatal, bool): raise ValueError( 'ex_fatal should be a boolean when sending exception hit.') hit_payload = { 'exd': ex_description, 'exf': int(ex_fatal), } self.__send_hit( 'exception', hit_payload, custom_dimensions, custom_metrics, ) def send_pageview( self, page, hostname, title=None, custom_dimensions=None, custom_metrics=None, content_groups=None, ): """Send a Pageviw hit. Params: page (str): Path portion of the page URL. hostname (str): Hostname from which content was hosted. title (str): (optional) Title of the page / document. custom_dimensions (dict): (optional) Custom Dimension indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 'foo', '3': 'bar' } custom_metrics (dict): (optional) Custom Metric indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 10, '4': 5.6 } content_groups (list): Content groups. Syntax: [ group, group, ... ] Example: [ 'foo', 'bar' ] Raises: ValueError if page is None. ValueError if hostname is None. """ if not page: raise ValueError('Missing page when sending pageview hit.') if not hostname: raise ValueError('Missing hostname when sending pageview hit.') self.hostname = hostname self.page = page hit_payload = { 'dh': hostname, 'dp': page, 'dt': title, } self.__send_hit( 'pageview', hit_payload, custom_dimensions, custom_metrics, content_groups, ) def send_screenview( self, screen_name, custom_dimensions=None, custom_metrics=None, content_groups=None, ): """Send a Screenview hit. Params: screen_name (str): Name of the screen. custom_dimensions (dict): (optional) Custom Dimension indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 'foo', '3': 'bar' } custom_metrics (dict): (optional) Custom Metric indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 10, '4': 5.6 } content_groups (list): Content groups. Syntax: [ group, group, ... ] Example: [ 'foo', 'bar' ] Raises: ValueError if screen_name is None. """ if not screen_name: raise ValueError( 'Missing screen_name when sending screenview hit.') self.screen_name = screen_name hit_payload = { 'cd': screen_name, } self.__send_hit( 'screenview', hit_payload, custom_dimensions, custom_metrics, content_groups, ) def send_social( self, social_network, social_action, social_target, custom_dimensions=None, custom_metrics=None, ): """Send a Social hit. Params: social_network (str): Social network of the social interaction. social_action (str): Action of the social interaction. social_target (str): Target of the social interaction. custom_dimensions (dict): (optional) Custom Dimension indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 'foo', '3': 'bar' } custom_metrics (dict): (optional) Custom Metric indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 10, '4': 5.6 } Raises: ValueError if social_network is None. ValueError if social_action is None. ValueError if social_target is None. """ if not social_network: raise ValueError('Missing social_network when sending social hit.') if not social_action: raise ValueError('Missing social_action when sending social hit.') if not social_target: raise ValueError('Missing social_target when sending social hit.') hit_payload = { 'sn': social_network, 'sa': social_action, 'st': social_target, } self.__send_hit( 'social', hit_payload, custom_dimensions, custom_metrics, ) def send_timing( self, timing_category, timing_var, timing_value, timing_label=None, custom_dimensions=None, custom_metrics=None, ): """Send a Timing hit. Params: timing_category (str): Category of the user timing. timing_var (str): Variable of the user timing. timing_value (int): Value of the user timing in milliseconds. timing_label (str): (optional) Label of the user timing. custom_dimensions (dict): (optional) Custom Dimension indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 'foo', '3': 'bar' } custom_metrics (dict): (optional) Custom Metric indices and values. Syntax: { index: value, index: value, ... } Example: { '1': 10, '4': 5.6 } Raises: ValueError if timing_category is None. ValueError if timing_var is None. ValueError if timing_value is None. ValueError if timing_value is not an integer. """ if not timing_category: raise ValueError( 'Missing timing_category when sending timing hit.') if not timing_var: raise ValueError('Missing timing_var when sending timing hit.') if not timing_value: raise ValueError('Missing timing_value when sending timing hit.') if timing_value and not isinstance(timing_value, int): raise ValueError( 'timing_value should be an integer when sending timing hit.') hit_payload = { 'utc': timing_category, 'utv': timing_var, 'utt': timing_value, 'utl': timing_label, } self.__send_hit( 'timing', hit_payload, custom_dimensions, custom_metrics, ) # Debug def __handle_debug_response(self, hit_parsing_result): """Show the message from the validation server.""" valid = hit_parsing_result['valid'] hit = hit_parsing_result['hit'] valid_message = "Valid" if valid else "Invalid" log_message = ["{} hit: {}".format(valid_message, hit)] if not valid: parser_messages = hit_parsing_result['parserMessage'] for parser_message in parser_messages: message_type = parser_message['messageType'] description = parser_message['description'] log_message.append("- {}: {}".format(message_type, description)) self.logger.debug('\n'.join(log_message))
"%s is not %s: %s" % (variable, modes[access_var], fp)) if __name__ == "__main__": option_parser, opts, args = parse_command_line_parameters(**script_info) qiime_config = load_qiime_config() rdp_jarpath = get_rdp_jarpath() if rdp_jarpath == None: rdp_version = "Not installed." else: rdp_version = split(rdp_jarpath)[1] system_info = [ ("Platform", platform), ("Python version",python_version.replace('\n', ' ')), ("Python executable",executable)] max_len = max([len(e[0]) for e in system_info]) print "\nSystem information" print "==================" for v in system_info: print "%*s:\t%s" % (max_len,v[0],v[1]) version_info = [ ("PyCogent version", pycogent_lib_version), ("NumPy version", numpy_lib_version), ("matplotlib version", matplotlib_lib_version), ("QIIME library version", get_qiime_library_version()), ("QIIME script version", __version__), ("PyNAST version (if installed)", pynast_lib_version), ("RDP Classifier version (if installed)", rdp_version)]
def main(): option_parser, opts, args = parse_command_line_parameters(**script_info) if opts.haiku: print "QIIME provides insight\nmicrobial in nature\nto ecology" exit(0) qiime_config = load_qiime_config() test = opts.test qiime_base_install = opts.qiime_base_install rdp_jarpath = get_rdp_jarpath() if rdp_jarpath is None: rdp_version = "Not installed." else: rdp_version = split(rdp_jarpath)[1] java_version = get_java_version() if java_version is None: java_version = "Not installed." system_info = [("Platform", platform), ("Python version", python_version.replace('\n', ' ')), ("Python executable", executable)] max_len = max([len(e[0]) for e in system_info]) print "\nSystem information" print "==================" for v in system_info: print "%*s:\t%s" % (max_len, v[0], v[1]) version_info = [("NumPy version", numpy_lib_version), ("SciPy version", scipy_lib_version), ("matplotlib version", matplotlib_lib_version), ("biom-format version", biom_lib_version), ("qcli version", qcli_lib_version), ("pyqi version", pyqi_lib_version), ("scikit-bio version", skbio_lib_version), ("QIIME library version", get_qiime_library_version()), ("QIIME script version", __version__), ("PyNAST version (if installed)", pynast_lib_version), ("Emperor version", emperor_lib_version)] if not qiime_base_install: version_info += [("RDP Classifier version (if installed)", rdp_version), ("Java version (if installed)", java_version)] max_len = max([len(e[0]) for e in version_info]) print "\nDependency versions" print "===================" for v in version_info: print "%*s:\t%s" % (max_len, v[0], v[1]) print "\nQIIME config values" print "===================" max_len = max([len(key) for key in qiime_config]) for key, value in qiime_config.items(): print "%*s:\t%s" % (max_len, key, value) if test: if qiime_base_install: suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyBase) else: suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyFull) if opts.verbose: verbosity = 2 else: verbosity = 1 TextTestRunner(stream=stdout, verbosity=verbosity).run(suite)
def generate_export_codes(pickle_file_name, model, filename, target_name, mode="classification", random_state=42): """Generate all library import calls for use in stand alone python scripts. Parameters ---------- pickle_file_name: string Pickle file name for a fitted scikit-learn estimator model: scikit-learn estimator A fitted scikit-learn model filename: string File name of input dataset target_name: string Target name in input data mode: string 'classification': Run classification analysis 'regression': Run regression analysis random_state: int Random seed in model Returns ------- pipeline_text: String The Python scripts for applying the current optimized pipeline in stand-alone python environment """ if mode == 'classification': fold = "StratifiedKFold" elif mode == 'regression': fold = "KFold" exported_codes_1 = """# Python version: {python_version} # Results were generated with numpy v{numpy_version}, # pandas v{pandas_version} and scikit-learn v{skl_version}. # random seed = {random_state} # Training dataset filename = {dataset} # Pickle filename = {pickle_file_name} # Model in the pickle file: {model} import numpy as np import pandas as pd import joblib from sklearn.utils import check_X_y from sklearn.metrics import make_scorer, confusion_matrix from sklearn.model_selection import cross_validate, {fold} # NOTE: Edit variables below with appropriate values # path to your pickle file, below is the downloaded pickle file pickle_file = '{pickle_file_name}' # file path to the dataset dataset = '{dataset}' # target column name target_column = '{target_name}' seed = {random_state} # load fitted model pickle_model = joblib.load(pickle_file) model = pickle_model['model'] # read input data input_data = pd.read_csv(dataset, sep=None, engine='python') """.format(python_version=version.replace('\n', ''), numpy_version=np.__version__, pandas_version=pd.__version__, skl_version=skl_version, dataset=",".join(filename), target_name=target_name, pickle_file_name=pickle_file_name, random_state=random_state, model=str(model).replace('\n', '\n#'), fold=fold) exported_codes_2 = exported_codes_1 if mode == "classification": exported_codes_1 += """ # Balanced accuracy below was described in [Urbanowicz2015]: # the average of sensitivity and specificity is computed for each class # and then averaged over total number of classes. # It is NOT the same as sklearn.metrics.balanced_accuracy_score, # which is defined as the average of recall obtained on each class. def balanced_accuracy(y_true, y_pred): all_classes = list(set(np.append(y_true, y_pred))) all_class_accuracies = [] for this_class in all_classes: this_class_sensitivity = 0. this_class_specificity = 0. if sum(y_true == this_class) != 0: this_class_sensitivity = \\ float(sum((y_pred == this_class) & (y_true == this_class))) /\\ float(sum((y_true == this_class))) this_class_specificity = \\ float(sum((y_pred != this_class) & (y_true != this_class))) /\\ float(sum((y_true != this_class))) this_class_accuracy = (this_class_sensitivity + this_class_specificity) / 2. all_class_accuracies.append(this_class_accuracy) return np.mean(all_class_accuracies) # reproducing training score and testing score from PennAI features = input_data.drop(target_column, axis=1).values target = input_data[target_column].values # Checking dataset features, target = check_X_y(features, target, dtype=None, order="C", force_all_finite=True) scorer = make_scorer(balanced_accuracy) # reproducing balanced accuracy scores # computing cross-validated metrics cv = StratifiedKFold(n_splits=10) cv_scores = cross_validate( estimator=model, X=features, y=target, scoring=scorer, cv=cv, return_train_score=True, return_estimator=True ) train_score = cv_scores['train_score'].mean() test_score = cv_scores['test_score'].mean() print("Training score: ", train_score) print("Testing score: ", test_score) # reproducing confusion matrix pred_cv_target = np.empty(target.shape) for cv_split, est in zip(cv.split(features, target), cv_scores['estimator']): train, test = cv_split pred_cv_target[test] = est.predict(features[test]) cnf_matrix = confusion_matrix( target, pred_cv_target, labels=model.classes_) print("Confusion Matrix:", cnf_matrix) """ elif mode == "regression": exported_codes_1 += """ # reproducing training score and testing score from PennAI features = input_data.drop(target_column, axis=1).values target = input_data[target_column].values # Checking dataset features, target = check_X_y(features, target, dtype=None, order="C", force_all_finite=True) # reproducing r2 scores # computing cross-validated metrics cv_scores = cross_validate( estimator=model, X=features, y=target, scoring='r2', cv=10, return_train_score=True, return_estimator=True ) train_score = cv_scores['train_score'].mean() test_score = cv_scores['test_score'].mean() print("Training score: ", train_score) print("Testing score: ", test_score) """ exported_codes_2 += """ # Application 1: cross validation of fitted model on a new dataset testing_features = input_data.drop(target_column, axis=1).values testing_target = input_data[target_column].values # Get holdout score for fitted model print("Holdout score: ", end="") print(model.score(testing_features, testing_target)) # Application 2: predict outcome by fitted model # In this application, the input dataset may not include target column # Please comment this line below if there is no target column in input dataset input_data.drop(target_column, axis=1, inplace=True) predict_target = model.predict(input_data.values) """ return exported_codes_1, exported_codes_2
'snmpOutTooBigs', 'snmpOutNoSuchNames', 'snmpOutBadValues', 'snmpOutGenErrs', 'snmpOutSetRequests', 'snmpOutGetResponses', 'snmpOutTraps', 'snmpEnableAuthenTraps', 'snmpSilentDrops', 'snmpProxyDrops', 'snmpTrapOID', 'coldStart', 'snmpSetSerialNo' ) __sysDescr = MibScalarInstance(sysDescr.name, (0,), sysDescr.syntax.clone("PySNMP engine version %s, Python %s" % (__version__, version.replace('\n', ' ').replace('\r', ' ')))) __sysObjectID = MibScalarInstance(sysObjectID.name, (0,), sysObjectID.syntax.clone((1,3,6,1,4,1,20408))) class SysUpTime(TimeTicks): createdAt = time() def clone(self, **kwargs): if 'value' not in kwargs: kwargs['value'] = int((time()-self.createdAt)*100) return TimeTicks.clone(self, **kwargs) __sysUpTime = MibScalarInstance(sysUpTime.name, (0,), SysUpTime(0)) __sysContact = MibScalarInstance(sysContact.name, (0,), sysContact.syntax.clone('')) __sysName = MibScalarInstance(sysName.name, (0,), sysName.syntax.clone('')) __sysLocation = MibScalarInstance(sysLocation.name, (0,), sysLocation.syntax.clone('')) __sysServices = MibScalarInstance(sysServices.name, (0,), sysServices.syntax.clone(0)) __sysORLastChange = MibScalarInstance(sysORLastChange.name, (0,), sysORLastChange.syntax.clone(0))
'snmpOutNoSuchNames', 'snmpOutBadValues', 'snmpOutGenErrs', 'snmpOutSetRequests', 'snmpOutGetResponses', 'snmpOutTraps', 'snmpEnableAuthenTraps', 'snmpSilentDrops', 'snmpProxyDrops', 'snmpTrapOID', 'coldStart', 'snmpSetSerialNo' ) __sysDescr = MibScalarInstance(sysDescr.name, (0,), sysDescr.syntax.clone( "PySNMP engine version %s, Python %s" % (__version__, version.replace('\n', ' ').replace('\r', ' ')))) __sysObjectID = MibScalarInstance(sysObjectID.name, (0,), sysObjectID.syntax.clone((1, 3, 6, 1, 4, 1, 20408))) class SysUpTime(TimeTicks): createdAt = time() def clone(self, **kwargs): if 'value' not in kwargs: kwargs['value'] = int((time() - self.createdAt) * 100) return TimeTicks.clone(self, **kwargs) __sysUpTime = MibScalarInstance(sysUpTime.name, (0,), SysUpTime(0)) __sysContact = MibScalarInstance(sysContact.name, (0,), sysContact.syntax.clone('')) __sysName = MibScalarInstance(sysName.name, (0,), sysName.syntax.clone(''))
def get_system_info(self): return (("Platform", platform), ("Python/GCC version", python_version.replace('\n', ' ')), ("Python executable", executable))
## This file is distributed under the MIT License (MIT). ## See LICENSE.txt for details. ## ##===------------------------------------------------------------------------------------------===## if __name__ != '__main__': try: __import__('pkg_resources').declare_namespace(__name__) except ImportError: __path__ = __import__('pkgutil').extend_path(__path__, __name__) from .config import __dawn_versioninfo__ __versioninfo__ = __dawn_versioninfo__ __version__ = '.'.join(str(v) for v in __versioninfo__) # # Check python version # from sys import version_info as __dawn_python_version_info if __dawn_python_version_info < (3, 4): from sys import version as __dawn_python_version raise Exception("Dawn (%s) requires at least Python 3.4 (running on %s)" % (__version__, __dawn_python_version.replace('\n', ' '))) # # Import submodules # from .sir import *
def main(): option_parser, opts, args = parse_command_line_parameters(**script_info) if opts.haiku: print "QIIME provides insight\nmicrobial in nature\nto ecology" exit(0) qiime_config = load_qiime_config() test = opts.test qiime_base_install = opts.qiime_base_install rdp_jarpath = get_rdp_jarpath() if rdp_jarpath is None: rdp_version = "Not installed." else: rdp_version = split(rdp_jarpath)[1] java_version = get_java_version() if java_version is None: java_version = "Not installed." system_info = [ ("Platform", platform), ("Python version", python_version.replace('\n', ' ')), ("Python executable", executable)] max_len = max([len(e[0]) for e in system_info]) print "\nSystem information" print "==================" for v in system_info: print "%*s:\t%s" % (max_len, v[0], v[1]) version_info = [ ("PyCogent version", pycogent_lib_version), ("NumPy version", numpy_lib_version), ("SciPy version", scipy_lib_version), ("matplotlib version", matplotlib_lib_version), ("biom-format version", biom_lib_version), ("qcli version", qcli_lib_version), ("pyqi version", pyqi_lib_version), ("scikit-bio version", skbio_lib_version), ("QIIME library version", get_qiime_library_version()), ("QIIME script version", __version__), ("PyNAST version (if installed)", pynast_lib_version), ("Emperor version", emperor_lib_version)] if not qiime_base_install: version_info += [ ("RDP Classifier version (if installed)", rdp_version), ("Java version (if installed)", java_version)] max_len = max([len(e[0]) for e in version_info]) print "\nDependency versions" print "===================" for v in version_info: print "%*s:\t%s" % (max_len, v[0], v[1]) print "\nQIIME config values" print "===================" max_len = max([len(key) for key in qiime_config]) for key, value in qiime_config.items(): print "%*s:\t%s" % (max_len, key, value) if test: if qiime_base_install: suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyBase) else: suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyFull) if opts.verbose: verbosity = 2 else: verbosity = 1 TextTestRunner(stream=stdout, verbosity=verbosity).run(suite)