def build(self):
        ds = self.graph
        self.context = {"ce":
                        "https://raw.githubusercontent.com/Vocamp/ComputationalActivity/master/pattern/ComputationalEnvironment.jsonld"}

        CE = Namespace("http://dase.cs.wright.edu/ontologies/ComputationalEnvironment#")
        CA = Namespace("http://dase.cs.wright.edu/ontologies/ComputationalActivity#")
        DOCKER = Namespace("http://w3id.org/daspos/docker#")
        info = cpuinfo.get_cpu_info()

# ISSUES: We want if the architecture URI's to be created only once on
# build or initial commit. Otherwise, we want to re-read the URI's
#  from the original graph. There are imm

        ds.bind("ce", CE)
        ceuri = URIRef(str(uuid.uuid4()))
        ds.add((ceuri, RDF.type, CE.ComputationalEnvironment))

        osUri = URIRef(str(uuid.uuid4()))
        ds.add((ceuri, CE.hasOperatingSystem, osUri))
        ds.add((osUri, RDFS.label, Literal("linux")))

        processorUri = URIRef(str(uuid.uuid4()))
        ds.add((ceuri, CE.hasHardware, processorUri))

        archUri = URIRef(str(uuid.uuid4()))
        ds.add((processorUri, CE.hasArchitecture,  archUri))
        ds.add((archUri, RDFS.label, Literal("amd64")))
        ds.add((processorUri, CE.hasNumberOfCores,
                Literal("4", datatype=XSD.nonNegativeInteger)))
Example #2
0
def collect_system_information(trainer):
    import psutil

    mem = psutil.virtual_memory()
    trainer.set_job_info('memory_total', mem.total)

    # at this point, theano is already initialised through KerasLogger
    from theano.sandbox import cuda
    trainer.set_job_info('cuda_available', cuda.cuda_available)
    if cuda.cuda_available:
        trainer.on_gpu = cuda.use.device_number is not None
        trainer.set_job_info('cuda_device_number', cuda.active_device_number())
        trainer.set_job_info('cuda_device_name', cuda.active_device_name())
        if cuda.cuda_ndarray.cuda_ndarray.mem_info:
            gpu = cuda.cuda_ndarray.cuda_ndarray.mem_info()
            trainer.set_job_info('cuda_device_max_memory', gpu[1])
            free = gpu[0]/1024/1024/1024
            total = gpu[1]/1024/1024/1024
            used = total-free
            print("%.2fGB GPU memory used of %.2fGB" %(used, total))

    trainer.set_job_info('on_gpu', trainer.on_gpu)

    import cpuinfo
    cpu = cpuinfo.get_cpu_info()
    trainer.set_job_info('cpu_name', cpu['brand'])
    trainer.set_job_info('cpu', [cpu['hz_actual_raw'][0], cpu['count']])
Example #3
0
def get_cpu_info():
    import cpuinfo
    all_info = cpuinfo.get_cpu_info()
    all_info = all_info or {}
    info = {}
    for key in ('vendor_id', 'hardware', 'brand'):
        info[key] = all_info.get(key, 'unknown')
    return info
Example #4
0
def _collect_cpu_info(run_info):
  """Collect the CPU information for the local environment."""
  cpu_info = {}

  cpu_info["num_cores"] = multiprocessing.cpu_count()

  info = cpuinfo.get_cpu_info()
  cpu_info["cpu_info"] = info["brand"]
  cpu_info["mhz_per_cpu"] = info["hz_advertised_raw"][0] / 1.0e6

  run_info["cpu_info"] = cpu_info
 def SysInfoString(self):
     SysInfo = '<b>System Information</b>'
     SysInfo += '<br>OS : {0}'.format(platform.platform())
     SysInfo += '<br>CPU : {0}'.format(cpuinfo.get_cpu_info()['brand'])
     SysInfo += '<br>CPU Utilization : '
     CPUPercent = psutil.cpu_percent(interval = 0, percpu = True)
     for a in CPUPercent:
         SysInfo += 'CPU#{1} : {0}% '.format(a, CPUPercent.index(a))
     MemInfo = psutil.virtual_memory()
     SysInfo += '<br>RAM Total : {0:.2} GB, Used : {1} %'.format(MemInfo.total / math.pow(1024,3), 
                                                                 MemInfo.percent)
     return SysInfo
Example #6
0
def _collect_cpu_info(run_info):
  """Collect the CPU information for the local environment."""
  cpu_info = {}

  cpu_info["num_cores"] = multiprocessing.cpu_count()

  # Note: cpuinfo is not installed in the TensorFlow OSS tree.
  # It is installable via pip.
  import cpuinfo    # pylint: disable=g-import-not-at-top

  info = cpuinfo.get_cpu_info()
  cpu_info["cpu_info"] = info["brand"]
  cpu_info["mhz_per_cpu"] = info["hz_advertised_raw"][0] / 1.0e6

  run_info["machine_config"]["cpu_info"] = cpu_info
def gather_cpu_info():
  """Gather CPU Information.  Assumes all CPUs are the same."""
  cpu_info = test_log_pb2.CPUInfo()
  cpu_info.num_cores = multiprocessing.cpu_count()

  # Gather num_cores_allowed
  try:
    with gfile.GFile('/proc/self/status', 'rb') as fh:
      nc = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', fh.read())
    if nc:  # e.g. 'ff' => 8, 'fff' => 12
      cpu_info.num_cores_allowed = (
          bin(int(nc.group(1).replace(',', ''), 16)).count('1'))
  except errors.OpError:
    pass
  finally:
    if cpu_info.num_cores_allowed == 0:
      cpu_info.num_cores_allowed = cpu_info.num_cores

  # Gather the rest
  info = cpuinfo.get_cpu_info()
  cpu_info.cpu_info = info['brand']
  cpu_info.num_cores = info['count']
  cpu_info.mhz_per_cpu = info['hz_advertised_raw'][0] / 1.0e6
  l2_cache_size = re.match(r'(\d+)', str(info.get('l2_cache_size', '')))
  if l2_cache_size:
    # If a value is returned, it's in KB
    cpu_info.cache_size['L2'] = int(l2_cache_size.group(0)) * 1024

  # Try to get the CPU governor
  try:
    cpu_governors = set([
        gfile.GFile(f, 'r').readline().rstrip()
        for f in glob.glob(
            '/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor')
    ])
    if cpu_governors:
      if len(cpu_governors) > 1:
        cpu_info.cpu_governor = 'mixed'
      else:
        cpu_info.cpu_governor = list(cpu_governors)[0]
  except errors.OpError:
    pass

  return cpu_info
Example #8
0
 def reset(self):
     self.__data = {}
     now = datetime.datetime.now()
     self.__data['seq'] = int(round(time.time() * 1000))
     self.__data['timestamp-client'] = now.isoformat()
     cinfo = cpuinfo.get_cpu_info()
     self.__data['cpu'] = format(cinfo['brand'])
     osinfo = OsInfo()
     oinfo = osinfo.getOsInfo(cinfo)
     self.__data['os'] = oinfo['os']
     self.__data['os-dist'] = oinfo['dist']
     self.__data['os-version'] = oinfo['version']
     self.__data['os-arch'] = oinfo['arch']
     self.__data['os-kernel'] = oinfo['kernel']
     self.__data['cpu-temp'] = cputemp.get_cpu_temp()
     l = Load()
     self.__data['cpu-load'] = l.getCpuLoad()
     self.__data['storage'] = l.getStorageStatus()
     self.__data['network'] = netinfo.get_network_interfaces()
Example #9
0
def display_cpu_information():
    """
    Display CPU information.
    Assumes all CPUs are the same.
    """
    import cpuinfo
    output_string = '\n-- INFORMATION: CPU -------------------\n'
    cpu_info = cpuinfo.get_cpu_info()
    try:
        output_string = add_param_to_output(output_string,
                                            'brand',
                                            cpu_info['brand'])
        output_string = add_param_to_output(output_string,
                                            'vendor id',
                                            cpu_info['vendor_id'])
        output_string = add_param_to_output(output_string,
                                            'model',
                                            cpu_info['model'])
        output_string = add_param_to_output(output_string,
                                            'family',
                                            cpu_info['family'])
        output_string = add_param_to_output(output_string,
                                            'bits',
                                            cpu_info['bits'])
        output_string = add_param_to_output(output_string,
                                            'architecture',
                                            cpu_info['arch'])
        output_string = add_param_to_output(output_string,
                                            'cores',
                                            cpu_info['count'])
        output_string = add_param_to_output(output_string,
                                            'advertised Hz',
                                            cpu_info['hz_advertised'])
        output_string = add_param_to_output(output_string,
                                            'actual Hz',
                                            cpu_info['hz_actual'])
        output_string = add_param_to_output(output_string,
                                            'l2 cache size',
                                            cpu_info['l2_cache_size'])
    except Exception:
        output_string += 'Some CPU information cannot be displayed\n'
    output_string += '----------------------------------------'
    neon_logger.display(output_string)
Example #10
0
def _collect_cpu_info(run_info):
  """Collect the CPU information for the local environment."""
  cpu_info = {}

  cpu_info["num_cores"] = multiprocessing.cpu_count()

  try:
    # Note: cpuinfo is not installed in the TensorFlow OSS tree.
    # It is installable via pip.
    import cpuinfo    # pylint: disable=g-import-not-at-top

    info = cpuinfo.get_cpu_info()
    cpu_info["cpu_info"] = info["brand"]
    cpu_info["mhz_per_cpu"] = info["hz_advertised_raw"][0] / 1.0e6

    run_info["machine_config"]["cpu_info"] = cpu_info
  except ImportError:
    tf.compat.v1.logging.warn(
        "'cpuinfo' not imported. CPU info will not be logged.")
Example #11
0
    def get_job(self):
        """
        Gets job description from a file or from server.

        :return: job description.
        """
        log.info("Trying to get job description.")
        job_desc = self.try_get_json_file(self.args.job_description)
        if job_desc is None:
            log.info("Job description is not saved locally. Asking server.")
            cpu_info = cpuinfo.get_cpu_info()
            mem_info = psutil.virtual_memory()
            disk_space = float(psutil.disk_usage(".").total) / 1024. / 1024.
            # diskSpace = min(diskSpace, 14336)  # I doubt this is necessary, so RM

            data = {
                'cpu': float(cpu_info['hz_actual_raw'][0]) / 1000000.,
                'mem': float(mem_info.total) / 1024. / 1024.,
                'node': self.node_name,
                'diskSpace': disk_space,
                'getProxyKey': False,  # do we need it?
                'computingElement': self.args.queue,
                'siteName': self.args.queue,
                'workingGroup': '',  # do we need it?
                'prodSourceLabel': self.args.job_tag
            }

            _str = self.curl_query("https://%s:%d/server/panda/updateJob" % (self.args.jobserver,
                                                                             self.args.jobserver_port),
                                   ssl=True, body=urllib.urlencode(data))
            # log.debug("Got from server: "+_str)
            try:
                job_desc = json.loads(_str)
            except ValueError:
                log.error("JSON parser failed.")
                log.error("Got from server: "+_str)
                raise

        log.info("Got job description.")
        from job import Job
        job = Job(self, description_fixer(job_desc))
        return job
Example #12
0
 def reset(self):
     a = 1
     self.__data = {}
     from os.path import expanduser
     idFileName = expanduser("~") + '/.hidevid'
     fileExists = os.path.exists(idFileName)
     if ( fileExists ):
         idFileHandle = open(idFileName,'r')
         deviceId = idFileHandle.readline()
         deviceId = deviceId.strip()
         self.__data['id'] = deviceId
         idFileHandle.close
     
     now = datetime.datetime.now()
     self.__data['seq'] = int(round(time.time() * 1000))
     self.__data['tsClient'] = now.isoformat()
     cinfo = cpuinfo.get_cpu_info()
     self.__data['cpu'] = format(cinfo['brand'])
     self.__data['cpuCount'] = cinfo['count']
     osinfo = OsInfo()
     oinfo = osinfo.getOsInfo(cinfo)
     self.__data['os'] = oinfo['os']
     self.__data['osDist'] = oinfo['dist']
     self.__data['osVersion'] = oinfo['version']
     self.__data['osArch'] = oinfo['arch']
     self.__data['osKernel'] = oinfo['kernel']
     self.__data['cpuTemp'] = cputemp.get_cpu_temp()
     l = Load()
     self.__data['cpuLoad'] = l.getCpuLoad()
     MemInfo = meminfo.getMemoryStatus()
     self.__data['memAvail'] = MemInfo['memAvail'] / 1024
     self.__data['memUsed'] = MemInfo['memUsed'] / 1024
     self.__data['swapAvail'] = MemInfo['swpAvail'] / 1024
     self.__data['swapUsed'] = MemInfo['swpUsed'] / 1024
     self.__data['storage'] = l.getStorageStatus()
     self.__data['network'] = netinfo.get_network_interfaces()
     CpuTimes = cputimes.get_cpu_times()
     self.__data['cpuUser'] = CpuTimes['user']
     self.__data['cpuSystem'] = CpuTimes['system']
     self.__data['cpuIdle'] = CpuTimes['idle']
     self.__data['ioWait'] = CpuTimes['iowt']
     self.__data['UpTime'] = CpuTimes['uptime']
Example #13
0
def _rsmanifest():
    from pykern import pkcollections
    from pykern import pkjson
    from pykern.pkcli import rsmanifest
    import cpuinfo
    import datetime
    import os
    import subprocess

    m = rsmanifest.read_all()
    m['sim'] = {
        'run': {
            'datetime': datetime.datetime.utcnow().isoformat(),
            'cpu_info': cpuinfo.get_cpu_info(),
            'pyenv': _pyenv_version(),
            #TODO(robnagler) can't include because of auth/credential
            # values in environment variables
            #'environ': pkcollections.Dict(os.environ),
        },
    }
    pkjson.dump_pretty(m, filename=rsmanifest.BASENAME)
Example #14
0
    def collect_system_information(self):
        values = {}
        mem = psutil.virtual_memory()
        values['memory_total'] = mem.total

        import cpuinfo
        cpu = cpuinfo.get_cpu_info()
        values['resources_limit'] = self.resources_limit
        values['cpu_name'] = cpu['brand']
        values['cpu'] = [cpu['hz_advertised_raw'][0], cpu['count']]
        values['nets'] = {}
        values['disks'] = {}
        values['gpus'] = {}
        values['boot_time'] = psutil.boot_time()

        try:
            for gpu_id, gpu in enumerate(aetros.cuda_gpu.get_ordered_devices()):
                gpu['available'] = gpu_id in self.enabled_gpus

                values['gpus'][gpu_id] = gpu
        except aetros.cuda_gpu.CudaNotImplementedException: pass

        for disk in psutil.disk_partitions():
            try:
                name = self.get_disk_name(disk[1])
                values['disks'][name] = psutil.disk_usage(disk[1]).total
            except Exception:
                # suppress Operation not permitted
                pass

        try:
            for id, net in psutil.net_if_stats().items():
                if 0 != id.find('lo') and net.isup:
                    self.nets.append(id)
                    values['nets'][id] = net.speed or 1000
        except Exception:
            # suppress Operation not permitted
            pass

        return values
Example #15
0
File: views.py Project: vpino/kds
def hdInfo():
	"""
    Informacion del hardware que posee la pc
    """
    
	hardware = {}
	hardware['uname'] = {}
	hardware['cpu'] = {}
	hardware['disk'] = {}
	hardware['memory'] = {}

	#Informacion del equipo
	uname = platform.uname()

	hardware['uname']['system'] = uname[0]
	hardware['uname']['name'] = uname[1]
	hardware['uname']['kernel'] = uname[2]
	hardware['uname']['arquitecture'] = uname[4]

	#Informacion del Procesador
	cpu = cpuinfo.get_cpu_info()

	hardware['cpu']['brand'] = cpu.get('brand')
	hardware['cpu']['count'] = cpu.get('count')

	#Informacion del disco duro
	disk = psutil.disk_usage('/')

	hardware['disk']['total'] = sizeof_fmt(disk.total)
	hardware['disk']['free'] = sizeof_fmt(disk.free)
	hardware['disk']['used'] = sizeof_fmt(disk.used)

	#Informacion de la memoria
	memory = psutil.virtual_memory()

	hardware['memory']['total'] = sizeof_fmt(memory.total)
	hardware['memory']['used'] = sizeof_fmt(memory.active)
	hardware['memory']['free'] = sizeof_fmt(memory.inactive)
	
	return hardware
Example #16
0
def get_hardware_id():
	"""
	Get an unique hardware ID, based on CPU info. All the times called, the value will be the same.

	>>> get_hardware_id()
	'f8ef57d64aae6f3c45200b39di422bd6ca625d9a79655cb3aa6e171ef6f93013aa16c2df2f2b0359dfaf1782ba6fda94300506cdd9b21fdaf1264fbd0e47abb89'

	:return: string with the hardware ID
	:rtype: str
	"""
	import cpuinfo

	_ci = cpuinfo.get_cpu_info()

	cpu_inf = ("%s%s%s%s" % (_ci['hz_actual'],
	                         _ci['brand'],
	                         "".join(_ci['flags']),
	                         _ci['arch'])).replace(" ", "")

	d = hashlib.sha512()
	d.update(cpu_inf.encode(errors="ignore"))

	return d.hexdigest()
Example #17
0
def version_table(print2screen=True):
    """
    This function returns the version numbers of the various pieces of software
    with which this module was tested.

    Notes
    -----
    In order for Hyperopt 0.1 to work, ``networkx`` had to be downgraded by
    running ``pip install networkx==1.11``. This is due to a bug that arises
    with Hyperopt when version 2.0 of ``networkx`` is installed.

    Also include:
        - conda install plotly

    Parameters
    ----------

    print2screen : bool
        Print the version table to screen (``True``) or return it as a
        dictionary (``False``)?

    Returns
    -------

    version_table : dict
        Dictionary containing the version table
    """

    # TODO: Add Keras

    version_table = {
        'Python': ('3.6.6', '.'.join(str(v) for v in version_info[0:3])),
        'Keras': ('2.1.5', ke_version),
        'TensorFlow.': ('1.6.0', tf_version),
        'NumPy': ('1.14.5', np_version),
        'matplotlib': ('2.2.2', plt_version),
        'sklearn': ('0.20.1', sk_version),
        'PyQt5': ('5.6.2', None),
        'pandas': ('0.23.3', pd_version),
        'Hyperopt': ('0.1', hp_version),
        'OS': ('Linux-4.13.0-17-generic-x86_64-with-debian-stretch-sid',
               platform.platform()),
        'CPU': ('Intel(R) Core(TM) i7-7500U CPU @ 2.70GHz',
                cpuinfo.get_cpu_info()['brand']),
        'CUDA': ('8.0.44', None),
        'GPU': ('NVIDIA GeForce GTX', None)
    }

    if print2screen:

        # Maximum length of the software names
        pad = max(map(lambda x: len(x), version_table))

        # Print the table.
        print('software'.rjust(pad), ': baseline', sep='')
        print(''.rjust(pad), '  current', sep='')
        for k in sorted(version_table.keys()):
            print(k.rjust(pad), ': ', version_table[k][0], sep='')
            print(''.rjust(pad), '  ', version_table[k][1], sep='')

    return version_table
Example #18
0
import os
import sys
import logging
import ConfigParser

import cpuinfo
import cv2
from flask import Flask
import pyttsx
import vlc

if "X86_64" == cpuinfo.get_cpu_info()["arch"]:
    import getchar as interface

config = ConfigParser.ConfigParser()
tts_engine = None
face_cascade = None
video = None
vlc_instance = None
player = None
stay_alive = True  # Keep the program running?
speaking = False  # Am I talking when I detect someone else prescence


def initialize():
    """Initialize program"""
    global video
    global vlc_instance
    global tts_engine
    global player
    global face_cascade
Example #19
0
def get_cpu_info():
    # Obtain textual cpu info
    try:
        with open('/proc/cpuinfo', 'r') as f:
            lines = f.readlines()
    except FileNotFoundError:
        lines = []

    cpu_info = {}

    # Extract CPU flags and branch
    if lines:
        try:
            get = lambda k: [i for i in lines
                             if i.startswith(k)][0].split(':')[1].strip()
            cpu_info['flags'] = get('flags').split()
            cpu_info['brand'] = get('model name')
        except IndexError:
            # The /proc/cpuinfo format doesn't follow a standard, and on some
            # more or less exotic combinations of OS and platform it might not
            # be what we expect, hence ending up here
            pass
    if not all(i in cpu_info for i in ('flags', 'brand')):
        # Fallback
        ci = cpuinfo.get_cpu_info()
        cpu_info['flags'] = ci.get('flags')
        cpu_info['brand'] = ci.get('brand')

    # Detect number of logical cores
    logical = psutil.cpu_count(logical=True)
    if not logical:
        # Never bumped into a platform that make us end up here, yet
        # But we try to cover this case anyway, with `lscpu`
        try:
            logical = lscpu()['CPU(s)']
        except KeyError:
            warning("Logical core count autodetection failed")
            logical = 1
    cpu_info['logical'] = logical

    # Detect number of physical cores
    # TODO: on multi-socket systems + unix, can't use psutil due to
    # `https://github.com/giampaolo/psutil/issues/1558`
    mapper = {}
    if lines:
        # Copied and readapted from psutil
        current_info = {}
        for i in lines:
            line = i.strip().lower()
            if not line:
                # New section
                if ('physical id' in current_info
                        and 'cpu cores' in current_info):
                    mapper[current_info['physical id']] = current_info[
                        'cpu cores']
                current_info = {}
            else:
                # Ongoing section
                if (line.startswith('physical id')
                        or line.startswith('cpu cores')):
                    key, value = line.split('\t:', 1)
                    current_info[key] = int(value)
    physical = sum(mapper.values())
    if not physical:
        # Fallback 1: it should now be fine to use psutil
        physical = psutil.cpu_count(logical=False)
        if not physical:
            # Fallback 2: we might end up here on more exotic platforms such a Power8
            # Hopefully we can rely on `lscpu`
            try:
                physical = lscpu()['Core(s) per socket'] * lscpu()['Socket(s)']
            except KeyError:
                warning("Physical core count autodetection failed")
                physical = 1
    cpu_info['physical'] = physical

    return cpu_info
Example #20
0
cpu = cpuinfo()
platform = platforminfo()

if __name__ == "__main__":

    _VALUE_MAPPING = {"CPUInfoBase__get_nbits": "nbits", "getNCPUs": "ncpus"}

    cpu_type = {}
    for name in dir(cpuinfo):
        if name[0] == "_" and name[1] != "_":
            name = name[1:]  # Avoid leading _
            r = getattr(cpu, name)()
            cpu_type[_VALUE_MAPPING.get(name, name)] = r

    cpu_info = other_cpu_info.get_cpu_info()
    cpu_info.pop("python_version")

    cpu_features = {}
    keys = frozenset(
        ("hz_advertised", "hz_actual", "hz_advertised_raw", "hz_actual_raw",
         "l3_cache_size", "l2_cache_size", "l1_data_cache_size",
         "l1_instruction_cache_size", "flags"))
    for key in keys:
        cpu_features[key] = cpu_info.pop(key, None)

    dir_name = os.path.dirname(_OUTPUT_JSON_PATH)
    if dir_name:
        os.makedirs(dir_name, exist_ok=True)

    with open(_OUTPUT_JSON_PATH, 'w') as hw_info_file:
Example #21
0
# Aceita alguma conexão
(conexao, address) = server.accept()
print('Conectado a:', str(address))

while True:
    # Recebe requisicão do cliente
    data = ''
    data = conexao.recv(2048)

    if data.decode('utf-8') == '1':
        request = data.decode('utf-8')
        print('Requisição da opção', request, 'recebida com sucesso!')
        print('Processando...')
        list_percent_cpu = psutil.cpu_percent(interval=1, percpu=True)
        dict_info_cpu = cpuinfo.get_cpu_info()
        num_cores = psutil.cpu_count(logical=True)
        # Cria a lista de resposta
        response = []
        response.append(list_percent_cpu)
        response.append(dict_info_cpu)
        response.append(num_cores)
        # Prepara a lista para o envio
        bytes_resp = pickle.dumps(response)
        # Envia os dados
        conexao.send(bytes_resp)
        print('Resposta da opção', request, 'enviada com sucesso!\n')

    if data.decode('utf-8') == '2':
        request = data.decode('utf-8')
        print('Requisição da opção', request, 'recebida com sucesso!')
Example #22
0
def cpu_model():
    """
    :return: The CPU model name.
    """
    return cpuinfo.get_cpu_info()["brand"] if cpuinfo_available else "Unknown"
Example #23
0
	def test_all(self):
		os_type = helpers.get_os_type()

		if os_type == 'BeOS':
			self.assertEqual(None, cpuinfo.get_cpu_info_from_registry())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_proc_cpuinfo())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_sysctl())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_kstat())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_dmesg())
			self.assertHasResult(cpuinfo.get_cpu_info_from_sysinfo())
			self.assertHasResult(cpuinfo.get_cpu_info_from_cpuid())
			self.assertHasResult(cpuinfo.get_cpu_info())
		elif os_type == 'BSD':
			self.assertEqual(None, cpuinfo.get_cpu_info_from_registry())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_proc_cpuinfo())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_sysctl())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_kstat())
			self.assertHasResult(cpuinfo.get_cpu_info_from_dmesg())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_sysinfo())
			# FIXME: This fails by segfaulting for some reason
			self.assertEqual(None, cpuinfo.get_cpu_info_from_cpuid())
			self.assertHasResult(cpuinfo.get_cpu_info())
		elif os_type == 'Cygwin':
			self.assertEqual(None, cpuinfo.get_cpu_info_from_registry())
			self.assertHasResult(cpuinfo.get_cpu_info_from_proc_cpuinfo())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_sysctl())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_kstat())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_dmesg())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_sysinfo())
			# FIXME: This fails by segfaulting for some reason
			self.assertEqual(None, cpuinfo.get_cpu_info_from_cpuid())
			self.assertHasResult(cpuinfo.get_cpu_info())
		elif os_type == 'MacOS':
			self.assertEqual(None, cpuinfo.get_cpu_info_from_registry())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_proc_cpuinfo())
			self.assertHasResult(cpuinfo.get_cpu_info_from_sysctl())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_kstat())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_dmesg())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_sysinfo())
			# FIXME: This fails by segfaulting for some reason
			self.assertEqual(None, cpuinfo.get_cpu_info_from_cpuid())
			self.assertHasResult(cpuinfo.get_cpu_info())
		elif os_type == 'Linux':
			self.assertEqual(None, cpuinfo.get_cpu_info_from_registry())
			self.assertHasResult(cpuinfo.get_cpu_info_from_proc_cpuinfo())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_sysctl())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_kstat())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_dmesg())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_sysinfo())
			self.assertHasResult(cpuinfo.get_cpu_info_from_cpuid())
			self.assertHasResult(cpuinfo.get_cpu_info())
		elif os_type == 'Solaris':
			self.assertEqual(None, cpuinfo.get_cpu_info_from_registry())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_proc_cpuinfo())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_sysctl())
			self.assertHasResult(cpuinfo.get_cpu_info_from_kstat())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_dmesg())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_sysinfo())
			# FIXME: This fails by segfaulting for some reason
			self.assertEqual(None, cpuinfo.get_cpu_info_from_cpuid())
			self.assertHasResult(cpuinfo.get_cpu_info())
		elif os_type == 'Windows':
			self.assertHasResult(cpuinfo.get_cpu_info_from_registry())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_proc_cpuinfo())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_sysctl())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_kstat())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_dmesg())
			self.assertEqual(None, cpuinfo.get_cpu_info_from_sysinfo())
			self.assertHasResult(cpuinfo.get_cpu_info_from_cpuid())
			self.assertHasResult(cpuinfo.get_cpu_info())
		else:
			raise AssertionError('Unexpected OS type "{0}".'.format(os_type))

		self.assertEqual(True, True)
Example #24
0
def cpu_model():
    """
    :return: The CPU model name.
    """
    return cpuinfo.get_cpu_info()["brand"]
Example #25
0
def start_command(logger, job_backend, env_overwrite=None, volumes=None, cpus=1, memory=1,
                  gpu_devices=None, offline=False):

    home_config = read_home_config()

    env = {}
    if env_overwrite:
        env.update(env_overwrite)

    start_time = time.time()
    env['AETROS_MODEL_NAME'] = job_backend.model_name
    env['AETROS_JOB_ID'] = str(job_backend.job_id)
    env['AETROS_OFFLINE'] = '1' if offline else ''
    env['AETROS_GIT_INDEX_FILE'] = job_backend.git.index_path
    env['DEBUG'] = os.getenv('DEBUG', '')
    env['PYTHONUNBUFFERED'] = os.getenv('PYTHONUNBUFFERED', '1')
    env['PYTHONIOENCODING'] = os.getenv('PYTHONIOENCODING', 'UTF-8')
    env['AETROS_ATTY'] = '1'
    env['AETROS_GIT'] = job_backend.git.get_base_command()

    env['PATH'] = os.getenv('PATH', '')
    if 'PYTHONPATH' not in env:
        env['PYTHONPATH'] = os.getenv('PYTHONPATH', '')

    if os.getenv('AETROS_SSH_KEY_BASE64'):
        env['AETROS_SSH_KEY_BASE64'] = os.getenv('AETROS_SSH_KEY_BASE64')
    elif get_ssh_key_for_host(home_config['host']):
        # we need to read the key into env so the docker container can connect to AETROS
        env['AETROS_SSH_KEY_BASE64'] = open(get_ssh_key_for_host(home_config['host']), 'r').read()

    job_config = job_backend.job['config']
    job = job_backend.get_job_model()

    if 'command' not in job_config:
        job_backend.fail('No "command" given. See Configuration section in the documentation.')

    job_commands = job_config['command']
    docker_image = job_config['image']

    if job_backend.is_simple_model():
        if docker_image:
            simple_command = ['python']
        else:
            simple_command = [sys.executable]

        simple_command += ['-m', 'aetros', 'start-simple', job_backend.model_name + '/' + job_backend.job_id]
        job_commands = {'run': ' '.join(simple_command)}

    if job_commands is None:
        raise Exception('No command specified.')

    if not isinstance(job_commands, list) and not isinstance(job_commands, dict):
        job_commands = [job_commands]

    # replace {{batch_size}} parameters
    if isinstance(job_config['parameters'], dict):
        for key, value in six.iteritems(flatten_parameters(job_config['parameters'])):
            if isinstance(job_commands, list):
                for k, v in enumerate(job_commands):
                    if isinstance(job_commands[k], six.string_types):
                        job_commands[k] = job_commands[k].replace('{{' + key + '}}', simplejson.dumps(value))

            elif isinstance(job_commands, dict):
                for k, v in six.iteritems(job_commands):
                    if isinstance(job_commands[k], six.string_types):
                        job_commands[k] = job_commands[k].replace('{{' + key + '}}', simplejson.dumps(value))

    job_backend.set_system_info('commands', job_commands)
    os.chdir(job_backend.git.work_tree)

    docker_image_built = False

    if docker_image and (job_config['dockerfile'] or job_config['install']):
        rebuild_image = job_config['rebuild_image'] if 'rebuild_image' in job_config else False
        docker_image = docker_build_image(logger, home_config, job_backend, rebuild_image)
        docker_image_built = True

    job_backend.collect_device_information(gpu_devices)

    state = {'last_process': None}
    job_backend.set_system_info('processRunning', False, True)

    def pause():
        if not state['last_process'] or state['last_process'].poll() is not None:
            # no running process
            return

        if docker_image:
            if docker_pause(logger, home_config, job_backend):
                job_backend.set_paused(True)
        else:
            os.killpg(os.getpgid(state['last_process'].pid), signal.SIGSTOP)
            job_backend.set_paused(True)

    def cont():
        if not state['last_process'] or state['last_process'].poll() is not None:
            # no running process
            return

        job_backend.set_paused(False)
        if docker_image:
            docker_continue(logger, home_config, job_backend)
        else:
            os.killpg(os.getpgid(state['last_process'].pid), signal.SIGCONT)

    job_backend.on_pause = pause
    job_backend.on_continue = cont

    if docker_image:
        env['AETROS_GIT_INDEX_FILE'] = '/aetros/' + job_backend.model_name + '.git/' + os.path.basename(env['AETROS_GIT_INDEX_FILE'])

        with job_backend.git.batch_commit('JOB_SYSTEM_INFORMATION'):
            aetros_environment = {'aetros_version': __version__, 'variables': env.copy()}
            if 'AETROS_SSH_KEY' in aetros_environment['variables']: del aetros_environment['variables']['AETROS_SSH_KEY']
            if 'AETROS_SSH_KEY_BASE64' in aetros_environment['variables']: del aetros_environment['variables']['AETROS_SSH_KEY_BASE64']
            job_backend.set_system_info('environment', aetros_environment)

            job_backend.set_system_info('memory_total', memory * 1024 * 1024 * 1024)

            import cpuinfo
            cpu = cpuinfo.get_cpu_info()
            job_backend.set_system_info('cpu_name', cpu['brand'])
            job_backend.set_system_info('cpu', [cpu['hz_actual_raw'][0], cpus])

        job_backend.start_monitoring(cpu_cores=cpus, gpu_devices=gpu_devices, docker_container=job_backend.job_id)

        if not docker_image_built:
            docker_pull_image(logger, home_config, job_backend)

        docker_image_information(logger, home_config, job_backend)

        # make sure old container is removed
        subprocess.Popen([home_config['docker'], 'rm', job_backend.job_id], stderr=subprocess.PIPE).wait()

        command = docker_command_wrapper(logger, home_config, job_backend, volumes, cpus, memory, gpu_devices, env)

        # since linux doesnt handle SIGINT when pid=1 process has no signal listener,
        # we need to make sure, we attached one to the pid=1 process
        trap = 'trapIt () { "$@"& pid="$!"; trap "kill -INT $pid" INT TERM; ' \
               'while kill -0 $pid > /dev/null 2>&1; do wait $pid; ec="$?"; done; exit $ec;};'

        command.append(docker_image)
        command += ['/bin/sh', '-c', trap + 'trapIt /bin/sh /job/aetros/command.sh']
    else:
        # non-docker
        # env['PYTHONPATH'] += ':' + os.getcwd()
        job_backend.collect_system_information()
        job_backend.collect_environment(env)
        job_backend.start_monitoring(gpu_devices=gpu_devices)

        command = ['/bin/sh', job_backend.git.work_tree + '/aetros/command.sh']

    logger.debug("$ %s " % (' '.join([simplejson.dumps(a) for a in command])))
    job_backend.set_system_info('image/name', str(docker_image))

    p = None
    exited = False
    last_return_code = None
    state['last_process'] = None
    all_done = False
    command_stats = None
    files = job_backend.file_list()

    def clean():
        # clear working tree
        shutil.rmtree(job_backend.git.work_tree)

    def on_force_exit():
        # make sure the process dies
        clean()

        with open(os.devnull, 'r+b', 0) as DEVNULL:
            if docker_image:
                # docker run does not proxy INT signals to the docker-engine,
                # so we need to do it on our own directly.
                subprocess.Popen(args=[home_config['docker'], 'kill', job_backend.job_id], stdout=DEVNULL, stderr=DEVNULL).wait()
            elif not exited and state['last_process'] and state['last_process'].poll() is None:
                # wait for last command
                os.killpg(os.getpgid(state['last_process'].pid), signal.SIGKILL)

    job_backend.on_force_exit = on_force_exit

    try:
        job_backend.set_status('STARTED', add_section=False)
        # logger.warning("$ %s " % (str(command),))

        # make sure maxTime limitation is correctly calculated
        job_backend.monitoring_thread.handle_max_time = True
        job_backend.monitoring_thread.handle_max_time_time = time.time()

        # Since JobBackend sends SIGINT to its current process group, it sends also to its parents when same pg.
        # We need to change the process group of the process, so this won't happen.
        # If we don't this, the master process (server command e.g.) receives the SIGINT as well.
        kwargs = {}
        if os.name == 'nt':
            kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
        else:
            kwargs['preexec_fn'] = os.setsid

        # only use full env when no image used

        command_env = env
        if not docker_image:
            command_env = os.environ.copy()
            command_env.update(env)
            if os.environ.get('LD_LIBRARY_PATH', None):
                command_env['LD_LIBRARY_PATH_ORI'] = command_env['LD_LIBRARY_PATH']

        def write_command_sh(job_command):
            f = open(job_backend.git.work_tree + '/aetros/command.sh', 'w+')

            if not docker_image:
                # new shells unset LD_LIBRARY_PATH automatically, so we make sure it will be there again
                f.write('export LD_LIBRARY_PATH=$LD_LIBRARY_PATH_ORI;\n')

            if job.get_working_dir():
                f.write('cd %s;\n' % (job.get_working_dir(),))

            f.write(job_command)
            f.close()

        def read_line(line):
            handled, filtered_line, failed = extract_api_calls(line, job_backend.handle_stdout_api, logger=logger)

            if is_debug():
                for call in handled:
                    logger.debug('STDOUT API CALL: ' + str(call))

            for fail in failed:
                logger.warning("API call failed '%s': %s %s"
                               % (str(fail['line']), str(type(fail['exception']).__name__), str(fail['exception'])))

            return filtered_line

        def exec_command(index, command, job_command):
            write_command_sh(job_command)
            print('%s $ %s' % ('/' + job.get_working_dir(), job_command.strip()))
            args = command
            logger.debug('$ ' + ' '.join([simplejson.dumps(a) for a in args]))

            command_stats[index]['started'] = time.time() - start_time
            job_backend.set_system_info('command_stats', command_stats, True)

            # important to prefix it, otherwise name='master' would reset all stats in controller backend
            command_env['AETROS_JOB_NAME'] = 'command_' + str(index)

            state['last_process'] = subprocess.Popen(
                args=args, bufsize=0, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=command_env, **kwargs
            )
            job_backend.set_system_info('processRunning', True, True)
            wait_stdout = sys.stdout.attach(state['last_process'].stdout, read_line=read_line)
            wait_stderr = sys.stderr.attach(state['last_process'].stderr)
            state['last_process'].wait()
            command_stats[index]['rc'] = last_return_code
            command_stats[index]['ended'] = time.time() - start_time
            job_backend.set_system_info('command_stats', command_stats, True)
            job_backend.set_system_info('processRunning', True, False)
            wait_stdout()
            wait_stderr()
            # make sure a new line is printed after a command
            print("")

            return state['last_process']

        done = 0
        total = len(job_commands)
        job_backend.set_system_info('command_stats', command_stats, True)
        if isinstance(job_commands, list):
            command_stats = [{'rc': None, 'started': None, 'ended': None} for x in job_commands]
            for k, job_command in enumerate(job_commands):
                job_backend.set_status('Command ' + str(k+1))

                p = exec_command(k, command, job_command)
                last_return_code = p.poll()

                if last_return_code == 0:
                    done += 1
                else:
                    # one failed, so exit and don't execute next
                    break

        if isinstance(job_commands, dict):
            command_stats = {}
            for name, job_command in six.iteritems(job_commands):
                command_stats[name] = {'rc': None, 'started': None, 'ended': None}

            for name, job_command in six.iteritems(job_commands):
                job_backend.set_status('Command ' + name)

                p = exec_command(name, command, job_command)
                last_return_code = p.poll()

                if last_return_code == 0:
                    done += 1
                else:
                    # one failed, so exit and don't execute next
                    break

        all_done = done == total
        exited = True

        if state['last_process']:
            sys.exit(state['last_process'].poll())
        else:
            sys.exit(1)

    except SystemExit:
        # since we started the command in a new process group, a SIGINT or CTRL+C on this process won't affect
        # our actual command process. So we need to take care that we stop everything.
        logger.debug("SystemExit, exited=%s, all-done=%s, has-last-process=%s, pid=%s" %(
            str(exited),
            str(all_done),
            state['last_process'] is not None,
            state['last_process'].poll() if state['last_process'] else None
        ))

        # make sure the process dies
        if docker_image:
            # docker run does not proxy INT signals to the docker-engine,
            # so we need to do it on our own directly.
            p = subprocess.Popen(args=[home_config['docker'], 'inspect', job_backend.job_id],
                stderr=subprocess.PIPE, stdout=subprocess.PIPE)
            p.wait()
            if p.poll() == 0:
                subprocess.Popen(args=[home_config['docker'], 'kill', job_backend.job_id]).wait()
        elif not exited and state['last_process'] and state['last_process'].poll() is None:
            # wait for last command
            os.killpg(os.getpgid(state['last_process'].pid), signal.SIGINT)
            state['last_process'].wait()

        if 'output' in job_config and job_config['output']:
            upload_output_files(job_backend, job_config['output'])

        if exited:
            if all_done:
                job_backend.stop(progress=JOB_STATUS.PROGRESS_STATUS_DONE)
            else:
                job_backend.stop(progress=JOB_STATUS.PROGRESS_STATUS_FAILED)
        else:
            # master received SIGINT before the all job commands exited.
            if not job_backend.in_early_stop:
                # in_early_stop indicates whether we want to have a planned stop (maxTime limitation for example),
                # which should mark the job as done, not as abort().
                # if this is not set, we the master received a SIGINT without early_stop, so mark as aborted.
                job_backend.abort()
            else:
                # let the on_shutdown listener handle the rest
                pass

        clean()
Example #26
0
def make_report_backend(folder, as_df=False):
    """
    Looks into a folder for dumped files after
    the unit tests.

    :param folder: dump folder, it should contain files *.bench*
    :param as_df: returns a dataframe instread of a list of dictionary
    :return: time execution
    """
    import onnx
    import onnxruntime
    import cpuinfo

    res = {}
    benched = 0
    files = os.listdir(folder)
    for name in files:
        if name.endswith(".expected.pkl"):
            model = name.split(".")[0]
            if model not in res:
                res[model] = {}
            res[model]["_tested"] = True
        elif ".backend." in name:
            bk = name.split(".backend.")[-1].split(".")[0]
            model = name.split(".")[0]
            if model not in res:
                res[model] = {}
            res[model][bk] = True
        elif name.endswith(".err"):
            model = name.split(".")[0]
            fullname = os.path.join(folder, name)
            with open(fullname, "r", encoding="utf-8") as f:
                content = f.read()
            error = content.split("\n")[0].strip("\n\r ")
            if model not in res:
                res[model] = {}
            res[model]["stderr"] = error
        elif name.endswith(".model.pkl"):
            model = name.split(".")[0]
            if model not in res:
                res[model] = {}
            res[model].update(stat_model_skl(os.path.join(folder, name)))
        elif name.endswith(".model.onnx"):
            model = name.split(".")[0]
            if model not in res:
                res[model] = {}
            res[model].update(stat_model_onnx(os.path.join(folder, name)))
        elif name.endswith(".bench"):
            model = name.split(".")[0]
            fullname = os.path.join(folder, name)
            df = pandas.read_csv(fullname, sep=",")
            if model not in res:
                res[model] = {}
            for index, row in df.iterrows():
                name = row["name"]
                ave = row["average"]
                std = row["deviation"]
                size = row["input_size"]
                res[model]["{0}_time".format(name)] = ave
                res[model]["{0}_std".format(name)] = std
                res[model]["input_size"] = size
                benched += 1

    if benched == 0:
        raise RuntimeError("No benchmark files in '{0}', found:\n{1}".format(
            folder, "\n".join(files)))

    def dict_update(d, u):
        d.update(u)
        return d

    aslist = [dict_update(dict(_model=k), v) for k, v in res.items()]

    if as_df:
        from pandas import DataFrame

        df = DataFrame(aslist).sort_values(["_model"])
        df["numpy-version"] = numpy.__version__
        df["onnx-version"] = onnx.__version__
        df["onnxruntime-version"] = onnxruntime.__version__
        cols = list(df.columns)
        if "stderr" in cols:
            ind = cols.index("stderr")
            del cols[ind]
            cols += ["stderr"]
            df = df[cols]
        for col in ["onnxrt_time", "original_time"]:
            if col not in df.columns:
                raise RuntimeError("Column '{0}' is missing from {1}".format(
                    col, ", ".join(df.columns)))
        df["ratio"] = df["onnxrt_time"] / df["original_time"]
        df["ratio_nodes"] = df["nb_onnx_nodes"] / df["nb_estimators"]
        df["CPU"] = platform.processor()
        df["CPUI"] = cpuinfo.get_cpu_info()["brand"]
        return df
    else:
        cpu = cpuinfo.get_cpu_info()["brand"]
        proc = platform.processor()
        for row in aslist:
            try:
                row["ratio"] = row["onnxrt_time"] / row["original_time"]
            except KeyError:
                # execution failed
                pass
            try:
                row["ratio_nodes"] = (row["nb_onnx_nodes"] /
                                      row["nb_estimators"])
            except KeyError:
                # execution failed
                pass
            row["CPU"] = proc
            row["CPUI"] = cpu
            row["numpy-version"] = numpy.__version__
            row["onnx-version"] = onnx.__version__
            row["onnxruntime-version"] = onnxruntime.__version__
        return aslist
Example #27
0
timeout = 5 # Socket timeout
resourcesFolder = "PCMiner_resources"
shares = [0, 0]
diff = 0
last_hash_count = 0
khash_count = 0
hash_count = 0
hash_mean = []
donatorrunning = False
debug = True
serveripfile = "https://mhcoin.s3.filebase.com/Pool.txt" # Serverip file
config = configparser.ConfigParser()
autorestart = 0
donationlevel = 0
freeze_support() # If not used, pyinstaller hangs when checking cpuinfo
cpu = cpuinfo.get_cpu_info() # Processor info

if not os.path.exists(resourcesFolder):
    os.mkdir(resourcesFolder) # Create resources folder if it doesn't exist

def debugOutput(text):
  if debug == "True":
    print(now().strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text)

def title(title):
  if os.name == 'nt':
    os.system("title "+title)
  else:
    print('\33]0;'+title+'\a', end='')
    sys.stdout.flush()
Example #28
0
 def handleInfoCommand(self):
     info = cpuinfo.get_cpu_info()
     response = self.build_message(info)
     self.write_message(response)
Example #29
0
    def update(self):
        """ Gets the latest data and updates the state. """
        from cpuinfo import cpuinfo

        self.info = cpuinfo.get_cpu_info()
        self._state = round(float(self.info['hz_actual_raw'][0])/10**9, 2)
Example #30
0
with open(sys.argv[1], "rb") as fd:
    # tediously derived from the monster struct defined in <hdreg.h>
    # see comment at end of file to verify
    hd_driveid_format_str = "@ 10H 20s 3H 8s 40s 2B H 2B H 4B 6H 2B I 36H I Q 152H"
    # Also from <hdreg.h>
    HDIO_GET_IDENTITY = 0x030d
    # How big a buffer do we need?
    sizeof_hd_driveid = struct.calcsize(hd_driveid_format_str)

    # ensure our format string is the correct size
    # 512 is extracted using sizeof(struct hd_id) in the c code
    assert sizeof_hd_driveid == 512

    # Call native function
    buf = fcntl.ioctl(fd, HDIO_GET_IDENTITY, " " * sizeof_hd_driveid)
    fields = struct.unpack(hd_driveid_format_str, buf)
    hdd = fields[15].strip()

cpu = cpuinfo.get_cpu_info()['brand']

print(cpu)
print("Hard Drive Model: {0}".format(hdd))

info = {
    'CPU': cpu,
    'HDD': hdd,
}

with open('hardware.pkl', 'w') as pkl:
    pickle.dump(info, pkl)
Example #31
0
def benchmark_against_others(repo_root: pathlib.Path, overwrite: bool) -> None:
    """Run benchmars against other libraries and include them in the Readme."""
    script_rel_paths = [
        "benchmarks/compare_with_others.py",
    ]

    if not overwrite:
        for i, script_rel_path in enumerate(script_rel_paths):
            if i > 0:
                print()
            subprocess.check_call([sys.executable, str(repo_root / script_rel_path)])
    else:
        out = ["The following scripts were run:\n\n"]
        for script_rel_path in script_rel_paths:
            out.append(
                "* `{0} <https://github.com/Parquery/icontract/tree/master/{0}>`_\n".format(
                    script_rel_path
                )
            )
        out.append("\n")

        out.append(
            (
                "The benchmarks were executed on {}.\nWe used Python {}, "
                "icontract {}, deal 4.4.0 and dpcontracts 0.6.0.\n\n"
            ).format(
                cpuinfo.get_cpu_info()["brand"],
                platform.python_version(),
                icontract.__version__,
            )
        )

        out.append("The following tables summarize the results.\n\n")
        stdouts = []  # type: List[str]

        for script_rel_path in script_rel_paths:
            stdout = subprocess.check_output(
                [sys.executable, str(repo_root / script_rel_path)]
            ).decode()
            stdouts.append(stdout)

            out.append(stdout)
            out.append("\n")

        readme_path = repo_root / "README.rst"
        readme = readme_path.read_text(encoding="utf-8")
        marker_start = ".. Benchmark report starts."
        marker_end = ".. Benchmark report ends."
        lines = readme.splitlines()

        try:
            index_start = lines.index(marker_start)
        except ValueError as exc:
            raise ValueError(
                "Could not find the marker for the benchmarks in the {}: {}".format(
                    readme_path, marker_start
                )
            ) from exc

        try:
            index_end = lines.index(marker_end)
        except ValueError as exc:
            raise ValueError(
                "Could not find the start marker for the benchmarks in the {}: {}".format(
                    readme_path, marker_end
                )
            ) from exc

        assert (
            index_start < index_end
        ), "Unexpected end marker before start marker for the benchmarks."

        lines = (
            lines[: index_start + 1]
            + ["\n"]
            + ("".join(out)).splitlines()
            + ["\n"]
            + lines[index_end:]
        )
        readme_path.write_text("\n".join(lines) + "\n", encoding="utf-8")

        # This is necessary so that the benchmarks do not complain on a Windows machine if the console encoding has not
        # been properly set.
        sys.stdout.buffer.write(("\n\n".join(stdouts) + "\n").encode("utf-8"))
Example #32
0
import sys

# Import cpuinfo.py from up one directory
sys.path.append('../cpuinfo')

if __name__ == '__main__':
	from cpuinfo import get_cpu_info

	print(get_cpu_info())
Example #33
0
def run_unit_tests(path=PISA_PATH,
                   allow_missing=OPTIONAL_MODULES,
                   verbosity=Levels.WARN):
    """Run all tests found at `path` (or recursively below if `path` is a
    directory).

    Each module is imported and each test function is run initially with
    `set_verbosity(verbosity)`, but if an exception is caught, the module is
    re-imported or the test function is re-run with
    `set_verbosity(Levels.TRACE)`, then the traceback from the (original)
    exception emitted is displayed.

    Parameters
    ----------
    path : str
        Path to file or directory

    allow_missing : None or sequence of str

    verbosity : int in pisa.utils.log.Levels

    Raises
    ------
    Exception
        If any import or test fails not in `allow_missing`

    """
    set_verbosity(verbosity)
    logging.info("%sPlatform information:", PFX)
    logging.info("%s  HOSTNAME = %s", PFX, socket.gethostname())
    logging.info("%s  FQDN = %s", PFX, socket.getfqdn())
    logging.info("%s  OS = %s %s", PFX, platform.system(), platform.release())
    for key, val in cpuinfo.get_cpu_info().items():
        logging.info("%s  %s = %s", PFX, key, val)
    logging.info(PFX)
    logging.info("%sModule versions:", PFX)
    for module_name in REQUIRED_MODULES + OPTIONAL_MODULES:
        try:
            module = import_module(module_name)
        except ImportError:
            if module_name in REQUIRED_MODULES:
                raise
            ver = "optional module not installed or not import-able"
        else:
            if hasattr(module, "__version__"):
                ver = module.__version__
            else:
                ver = "?"
        logging.info("%s  %s : %s", PFX, module_name, ver)
    logging.info(PFX)

    path = expand(path, absolute=True, resolve_symlinks=True)
    if allow_missing is None:
        allow_missing = []
    elif isinstance(allow_missing, str):
        allow_missing = [allow_missing]

    tests = find_unit_tests(path)

    module_pypaths_succeeded = []
    module_pypaths_failed = []
    module_pypaths_failed_ignored = []
    test_pypaths_succeeded = []
    test_pypaths_failed = []
    test_pypaths_failed_ignored = []

    for rel_file_path, test_func_names in tests.items():
        pypath = ["pisa"] + rel_file_path[:-3].split("/")
        parent_pypath = ".".join(pypath[:-1])
        module_name = pypath[-1].replace(".", "_")
        module_pypath = f"{parent_pypath}.{module_name}"

        try:
            set_verbosity(verbosity)
            logging.info(PFX + f"importing {module_pypath}")

            set_verbosity(Levels.WARN)
            module = import_module(module_pypath, package=parent_pypath)

        except Exception as err:
            if (isinstance(err, ImportError) and hasattr(err, "name")
                    and err.name in allow_missing  # pylint: disable=no-member
                ):
                err_name = err.name  # pylint: disable=no-member
                module_pypaths_failed_ignored.append(module_pypath)
                logging.warning(
                    f"{PFX}module {err_name} failed to import wile importing"
                    f" {module_pypath}, but ok to ignore")
                continue

            module_pypaths_failed.append(module_pypath)

            set_verbosity(verbosity)
            msg = f"<< FAILURE IMPORTING : {module_pypath} >>"
            logging.error(PFX + "=" * len(msg))
            logging.error(PFX + msg)
            logging.error(PFX + "=" * len(msg))

            # Reproduce the failure with full output
            set_verbosity(Levels.TRACE)
            try:
                import_module(module_name, package=parent_pypath)
            except Exception:
                pass

            set_verbosity(Levels.TRACE)
            logging.exception(err)

            set_verbosity(verbosity)
            logging.error(PFX + "#" * len(msg))

            continue

        else:
            module_pypaths_succeeded.append(module_pypath)

        for test_func_name in test_func_names:
            test_pypath = f"{module_pypath}.{test_func_name}"
            try:
                set_verbosity(verbosity)
                logging.debug(PFX + f"getattr({module}, {test_func_name})")

                set_verbosity(Levels.WARN)
                test_func = getattr(module, test_func_name)

                # Run the test function
                set_verbosity(verbosity)
                logging.info(PFX + f"{test_pypath}()")

                set_verbosity(Levels.WARN)
                test_func()

            except Exception as err:
                if (isinstance(err, ImportError) and hasattr(err, "name")
                        and err.name in allow_missing  # pylint: disable=no-member
                    ):
                    err_name = err.name  # pylint: disable=no-member
                    test_pypaths_failed_ignored.append(module_pypath)
                    logging.warning(
                        PFX +
                        f"{test_pypath} failed because module {err_name} failed to"
                        + f" load, but ok to ignore")

                    continue

                test_pypaths_failed.append(test_pypath)
                set_verbosity(verbosity)
                msg = f"<< FAILURE RUNNING : {test_pypath} >>"
                logging.error(PFX + "=" * len(msg))
                logging.error(PFX + msg)
                logging.error(PFX + "=" * len(msg))

                # Reproduce the error with full output

                set_verbosity(Levels.TRACE)
                try:
                    test_func = getattr(module, test_func_name)
                    with np.printoptions(
                            precision=np.finfo(pisa.FTYPE).precision + 2,
                            floatmode="fixed",
                            sign=" ",
                            linewidth=200,
                    ):
                        test_func()
                except Exception:
                    pass

                set_verbosity(Levels.TRACE)
                logging.exception(err)

                set_verbosity(verbosity)
                logging.error(PFX + "#" * len(msg))

            else:
                test_pypaths_succeeded.append(test_pypath)

            finally:
                # remove references to the test function, e.g. to remove refs
                # to pycuda / numba.cuda contexts so these can be closed
                try:
                    del test_func
                except NameError:
                    pass

        # NOTE: Until we get all GPU code into Numba, need to unload pycuda
        # and/or numba.cuda contexts before a module requiring the other one is
        # to be imported.
        # NOTE: the following causes a traceback to be emitted at the very end
        # of the script, regardless of the exception catching here.
        if (pisa.TARGET == "cuda" and pycuda is not None
                and hasattr(pycuda, "autoinit")
                and hasattr(pycuda.autoinit, "context")):
            try:
                pycuda.autoinit.context.detach()
            except Exception:
                pass

        # Attempt to unload the imported module
        # TODO: pipeline, etc. fail as isinstance(service, (Stage, PiStage)) is False
        #if module_pypath in sys.modules and module_pypath != "pisa":
        #    del sys.modules[module_pypath]
        #del module

        # TODO: crashes program; subseqeunt calls in same shell crash(!?!?)
        # if pisa.TARGET == 'cuda' and nbcuda is not None:
        #    try:
        #        nbcuda.close()
        #    except Exception:
        #        pass

    # Summarize results

    n_import_successes = len(module_pypaths_succeeded)
    n_import_failures = len(module_pypaths_failed)
    n_import_failures_ignored = len(module_pypaths_failed_ignored)
    n_test_successes = len(test_pypaths_succeeded)
    n_test_failures = len(test_pypaths_failed)
    n_test_failures_ignored = len(test_pypaths_failed_ignored)

    set_verbosity(verbosity)
    logging.info(
        PFX + f"<< IMPORT TESTS : {n_import_successes} imported,"
        f" {n_import_failures} failed,"
        f" {n_import_failures_ignored} failed to import but ok to ignore >>")
    logging.info(PFX + f"<< UNIT TESTS : {n_test_successes} succeeded,"
                 f" {n_test_failures} failed,"
                 f" {n_test_failures_ignored} failed but ok to ignore >>")

    # Exit with error if any failures (import or unit test)

    if module_pypaths_failed or test_pypaths_failed:
        msgs = []
        if module_pypaths_failed:
            msgs.append(
                f"{n_import_failures} module(s) failed to import:\n  " +
                ", ".join(module_pypaths_failed))
        if test_pypaths_failed:
            msgs.append(f"{n_test_failures} unit test(s) failed:\n  " +
                        ", ".join(test_pypaths_failed))

        # Note the extra newlines before the exception to make it stand out;
        # and newlines after the exception are due to the pycuda error message
        # that is emitted when we call pycuda.autoinit.context.detach()
        sys.stdout.flush()
        sys.stderr.write("\n\n\n")
        raise Exception("\n".join(msgs) + "\n\n\n")
Example #34
0
    def __init__(self, server, git_version):

        print('Logs will be written to %s' % self.LOG_FOLDER)

        self.USERNAME = DEFAULT_USERNAME
        self.PASSWORD = DEFAULT_PASSWORD

        if not os.path.exists(self.LOG_FOLDER):
            os.makedirs(self.LOG_FOLDER)

        self.log_folder = self.LOG_FOLDER
        self.server = server
        self.hostname = socket.gethostname()
        self.ip = socket.gethostbyname(socket.gethostname())
        self.container = JobContainer(self.log_folder, server)
        self.status_changed = True
        self.info = cpuinfo.get_cpu_info()
        self.s = requests.Session()
        self.first_update = True
        self.need_restart = False
        self.cpu_percent = 0.0
        self.git_version = git_version

        # Log File Logger
        self.logger = logging.getLogger('JobClientLogger')
        self.logger.setLevel(logging.DEBUG)
        handler = logging.FileHandler(
            os.path.join(self.log_folder, 'JobClient.log'))
        formatter = logging.Formatter(
            "%(asctime)s - %(levelname)s - %(message)s")
        handler.setFormatter(formatter)
        self.logger.addHandler(handler)

        # Console handler
        ch = logging.StreamHandler()
        ch.setLevel(logging.INFO)
        self.logger.addHandler(ch)

        self.logger.info('Launching JobClient (Code Version %d)' % VERSION)

        try:
            import vhtrack
            self.cuda_device_count = vhtrack.query_cuda_device_count()
            if self.cuda_device_count > 0:
                di = vhtrack.DeviceInfo(0)
                if di.isCompatible():
                    total_memory = di.totalMemory()
                    vram_in_GB = total_memory >> 30
                    if vram_in_GB < 4 or di.majorVersion() < 5:
                        self.cuda_device_count = 0

                    self.logger.info(
                        '%d CUDA GPU Detected %s with %d GB VRAM' %
                        (self.cuda_device_count, di.name(), vram_in_GB))
                else:
                    self.cuda_device_count = 0
        except:
            self.cuda_device_count = 0

        if self.cuda_device_count == 0:
            self.logger.info('No CUDA GPU Detected')

        self.available_job_classes = create_available_job_list(self.logger)
Example #35
0
    args = parser.parse_args()

    # ---------------- PARAMS -------------------- #

    if args.model_type == 'Detection':
        args.models_dir = 'detection_models/'
    else:
        args.models_dir = 'general_models/'

    if args.avx:
        suffix_AVX = 'AVX'
    else:
        suffix_AVX = 'NOAVX'

    aux = cpuinfo.get_cpu_info()
    if args.device == 'CPU':
        if 'brand' in aux.keys():
            args.device_name = cpuinfo.get_cpu_info()['brand']
        else:
            args.device_name = cpuinfo.get_cpu_info()['brand_raw']
    else:
        args.device_name = args.device + ' X'

    models = [
        name for name in os.listdir(args.models_dir)
        if os.path.isdir(os.path.join(args.models_dir, name))
    ]
    images = [
        name for name in os.listdir(args.images_dir)
        if name.endswith('png') or name.endswith('jpg')
Example #36
0
def blosc_extension():
    print('[zarr] Setting up Blosc extension')

    # setup blosc extension
    blosc_sources = []
    extra_compile_args = []
    include_dirs = []
    define_macros = []

    # generic setup
    blosc_sources += [f for f in glob('c-blosc/blosc/*.c')
                      if 'avx2' not in f and 'sse2' not in f]
    blosc_sources += glob('c-blosc/internal-complibs/lz4*/*.c')
    blosc_sources += glob('c-blosc/internal-complibs/snappy*/*.cc')
    blosc_sources += glob('c-blosc/internal-complibs/zlib*/*.c')
    blosc_sources += glob('c-blosc/internal-complibs/zstd*/common/*.c')
    blosc_sources += glob('c-blosc/internal-complibs/zstd*/compress/*.c')
    blosc_sources += glob('c-blosc/internal-complibs/zstd*/decompress/*.c')
    blosc_sources += glob('c-blosc/internal-complibs/zstd*/dictBuilder/*.c')
    include_dirs += [os.path.join('c-blosc', 'blosc')]
    include_dirs += [d for d in glob('c-blosc/internal-complibs/*')
                     if os.path.isdir(d)]
    include_dirs += [d for d in glob('c-blosc/internal-complibs/*/*')
                     if os.path.isdir(d)]
    define_macros += [('HAVE_LZ4', 1),
                      ('HAVE_SNAPPY', 1),
                      ('HAVE_ZLIB', 1),
                      ('HAVE_ZSTD', 1)]
    # define_macros += [('CYTHON_TRACE', '1')]

    # determine CPU support for SSE2 and AVX2
    cpu_info = cpuinfo.get_cpu_info()

    # SSE2
    if 'sse2' in cpu_info['flags']:
        print('[zarr] SSE2 detected')
        extra_compile_args.append('-DSHUFFLE_SSE2_ENABLED')
        blosc_sources += [f for f in glob('c-blosc/blosc/*.c') if 'sse2' in f]
        if os.name == 'posix':
            extra_compile_args.append('-msse2')
        elif os.name == 'nt':
            define_macros += [('__SSE2__', 1)]

    # AVX2
    if 'avx2' in cpu_info['flags']:
        print('[zarr] AVX2 detected')
        extra_compile_args.append('-DSHUFFLE_AVX2_ENABLED')
        blosc_sources += [f for f in glob('c-blosc/blosc/*.c') if 'avx2' in f]
        if os.name == 'posix':
            extra_compile_args.append('-mavx2')
        elif os.name == 'nt':
            define_macros += [('__AVX2__', 1)]

    # workaround lack of support for "inline" in MSVC when building for Python
    # 2.7 64-bit
    if PY2 and os.name == 'nt':
        extra_compile_args.append('-Dinline=__inline')

    if have_cython:
        sources = ['zarr/blosc.pyx']
    else:
        sources = ['zarr/blosc.c']

    # define extension module
    extensions = [
        Extension('zarr.blosc',
                  sources=sources + blosc_sources,
                  include_dirs=include_dirs,
                  define_macros=define_macros,
                  extra_compile_args=extra_compile_args,
                  ),
    ]

    if have_cython:
        extensions = cythonize(extensions)

    return extensions
Example #37
0
import cpuinfo
import platform
import psutil

processador = cpuinfo.get_cpu_info()['brand']
arquitectura = cpuinfo.get_cpu_info()['arch']
bits = cpuinfo.get_cpu_info()['bits']
frequencia_disponivel = cpuinfo.get_cpu_info()['hz_advertised']
frequencia_usada = cpuinfo.get_cpu_info()['hz_actual']

#platform
kernel = platform.system()
kernel_versao = platform.release()
utilizador = platform.node()

#psutil
nr_nucleos = psutil.cpu_count()
bateria = round(psutil.sensors_battery().percent)
Example #38
0
 def get_cpuinfo(self):
     return cpuinfo.get_cpu_info()
Example #39
0
__author__ = "Alexander [Amper] Marshalov"
__email__ = "*****@*****.**"
__icq__ = "87-555-3"
__jabber__ = "*****@*****.**"
__twitter__ = "amper"
__url__ = "http://amper.github.com/cityhash"

from setuptools import setup
from setuptools.extension import Extension
from setuptools.dist import Distribution
from os.path import join, dirname
from cpuinfo import get_cpu_info

cpu_info = get_cpu_info()
have_sse42 = 'sse4.2' in cpu_info['flags']

try:
    from Cython.Distutils import build_ext
except ImportError:
    USE_CYTHON = False
else:
    USE_CYTHON = True


class BinaryDistribution(Distribution):
    """
    Subclass the setuptools Distribution to flip the purity flag to false.
    See http://lucumr.pocoo.org/2014/1/27/python-on-wheels/
    """
    def is_pure(self):
        # TODO: check if this is still necessary with Python v2.7
Example #40
0
 def brand():
     return cpuinfo.get_cpu_info()['brand']
Example #41
0
import logging
import os
import socket
from typing import Optional

import cpuinfo
import psutil
import pydantic
import yaml

__all__ = [
    "get_config", "get_provenance_augments", "global_repr", "NodeDescriptor"
]

# Start a globals dictionary with small starting values
CPUINFO = cpuinfo.get_cpu_info()

# We want physical cores
if hasattr(psutil.Process(), "cpu_affinity"):
    cpu_cnt = len(psutil.Process().cpu_affinity())
else:
    cpu_cnt = psutil.cpu_count(logical=False)
    if cpu_cnt is None:
        cpu_cnt = psutil.cpu_count(logical=True)
CPUINFO["count"] = cpu_cnt

# Generic globals
GLOBALS = {}
GLOBALS["hostname"] = socket.gethostname()
GLOBALS["memory"] = round(psutil.virtual_memory().available / (1024**3), 3)
GLOBALS["cpu"] = CPUINFO["brand"]
Example #42
0
 def hz_actual():
     return cpuinfo.get_cpu_info()['hz_actual']
Example #43
0
'''
This script is used to ask for the CPU flags on Windows. We use this instead of
importing the cpuinfo package, because recent versions of py-cpuinfo use the
multiprocessing module, and any import of cpuinfo that is not within a
`if __name__ == '__main__':` block will lead to the script being executed twice.

The CPU flags are printed to stdout encoded as JSON.
'''
from __future__ import absolute_import
from __future__ import print_function
import json

if __name__ == '__main__':
    import cpuinfo
    flags = cpuinfo.get_cpu_info()['flags']
    print(json.dumps(flags))
Example #44
0
 def family():
     return cpuinfo.get_cpu_info()['family']
Example #45
0
def environment():
    info = get_cpu_info()
    return render_template('environment.html',name=current_user.name, info = info)
Example #46
0
__author__  = "Alexander [Amper] Marshalov"
__email__   = "*****@*****.**"
__icq__     = "87-555-3"
__jabber__  = "*****@*****.**"
__twitter__ = "amper"
__url__     = "http://amper.github.com/cityhash"

from setuptools import setup
from setuptools.extension import Extension
from setuptools.dist import Distribution
from os.path import join, dirname

try:
    from cpuinfo import get_cpu_info
    cpu_info = get_cpu_info()
    have_sse42 = 'sse4.2' in cpu_info['flags']
except Exception:
    have_sse42 = False

try:
    from Cython.Distutils import build_ext
except ImportError:
    USE_CYTHON = False
else:
    USE_CYTHON = True


class BinaryDistribution(Distribution):
    """
    Subclass the setuptools Distribution to flip the purity flag to false.
    See http://lucumr.pocoo.org/2014/1/27/python-on-wheels/
Example #47
0
def _cpu(command):
    _cpuinfo = cpuinfo.get_cpu_info()
    _cpuinfo["usage"] = psutil.cpu_percent()
    return _cpuinfo[command]
 def getMaxCpus(self):
     info = cpuinfo.get_cpu_info()
     return info['count']
Example #49
0
import psutil
import time
import cpuinfo

data = ""

cpui = cpuinfo.get_cpu_info()
data += "\t\"cpu\": {\n"
data += "\t\t\"threads\": \"" + str(psutil.cpu_count(logical=True)) + "\",\n"
data += "\t\t\"cores\": \"" + str(psutil.cpu_count(logical=False)) + "\",\n"
stats = psutil.cpu_stats()
data += "\t\t\"context_switches\": \"" + str(stats.ctx_switches) + "\",\n"
data += "\t\t\"interrupts\": \"" + str(stats.interrupts) + "\",\n"
data += "\t\t\"soft_interrupts\": \"" + str(stats.soft_interrupts) + "\",\n"
data += "\t\t\"architecture\": \"" + cpui["arch"] + "\",\n"
data += "\t\t\"vendor\": \"" + cpui["brand"] + "\",\n"
data += "\t\t\"frequency\": \"" + str(cpui["hz_actual_raw"][0]) + "\",\n"
data += "\t\t\"base_frequency\": \"" + str(
    cpui["hz_advertised_raw"][0]) + "\",\n"
data += "\t\t\"vendor_id\": \"" + cpui["vendor_id"] + "\",\n"
data += "\t\t\"bits\": \"" + str(cpui["bits"]) + "\",\n"
data += "\t\t\"cache\": \"" + str(cpui["l2_cache_size"]) + "\",\n"
data += "\t\t\"cores\": {\n"
counter = 0
cores = psutil.cpu_times_percent(interval=1, percpu=True)
for times in cores:
    data += "\t\t\t\"" + str(counter) + "\": {\n"
    data += "\t\t\t\t\"nice\": \"" + str(times.nice) + "\",\n"
    data += "\t\t\t\t\"iowait\": \"" + str(times.iowait) + "\",\n"
    data += "\t\t\t\t\"irq\": \"" + str(times.irq) + "\",\n"
    data += "\t\t\t\t\"softirq\": \"" + str(times.softirq) + "\",\n"
Example #50
0
def is_vm():
    flags = cpuinfo.get_cpu_info()['flags']
    return('hypervisor' in flags )
Example #51
0
 def vendor():
     return cpuinfo.get_cpu_info()['vendor_id']
Example #52
0
def _get_facts(*, kit=None):
    """
    Get facts

    Facts are immutable global variables
    set at the very end of variable resolution.

    :param kit: A kit from which facts are going to be extracted
    :return: A dictionary with a bunch of scavenged variables
    """

    # these are the facts
    facts = {}

    # General facts that are available for every platform
    _create_fact(facts, 'version', pkg_version)
    _create_fact(facts, 'install_prefix', os.getenv('HOME'))

    # General system-related facts
    _create_fact(facts, 'sys_uid', os.getuid())
    _create_fact(facts, 'sys_gid', os.getgid())

    # A collection of current environment variables is held in here
    _create_fact(facts, 'env', dict(os.environ))

    # Facts for *nix operating systems
    _create_fact(facts, 'sys_path', os.getenv("PATH").split(":"))
    if os.name == 'posix':
        _create_fact(facts, 'sys_user', os.getenv('USER'))
        _create_fact(facts, 'sys_user_home', os.getenv('HOME'))

    ####################################################
    # System-related facts:
    # ---------------------
    # These facts collect characteristics of the current
    # platform zenfig is running on
    ####################################################

    # Operating System facts
    _system = platform.system()
    _create_fact(facts, 'system', _system)
    _create_fact(facts, 'sys_node', platform.node())

    # These are exclusive to linux-based systems
    if _system == 'Linux':
        linux_distro = platform.linux_distribution()
        _create_fact(facts, 'linux_dist_name', linux_distro[0])
        _create_fact(facts, 'linux_dist_version', linux_distro[1])
        _create_fact(facts, 'linux_dist_id', linux_distro[2])

        # kernel version
        _create_fact(facts, 'linux_release', platform.release())

    # OSX-specific facts
    if _system == 'Darwin':
        _create_fact(facts, 'osx_ver', platform.mac_ver())

    # Hardware-related facts
    _create_fact(facts, 'sys_machine', platform.machine())

    # Low level CPU information (thanks to cpuinfo)
    _cpu_info = cpuinfo.get_cpu_info()
    _create_fact(facts, 'cpu_vendor_id', _cpu_info['vendor_id'])
    _create_fact(facts, 'cpu_brand', _cpu_info['brand'])
    _create_fact(facts, 'cpu_cores', _cpu_info['count'])
    _create_fact(facts, 'cpu_hz', _cpu_info['hz_advertised_raw'][0])
    _create_fact(facts, 'cpu_arch', _cpu_info['arch'])
    _create_fact(facts, 'cpu_bits', _cpu_info['bits'])

    # RAM information
    _create_fact(facts, 'mem_total', psutil.virtual_memory()[0])

    ####################
    # Python information
    ####################
    _py_ver = platform.python_version_tuple()
    _create_fact(facts, 'python_implementation', platform.python_revision())
    _create_fact(facts, 'python_version', platform.python_version())
    _create_fact(facts, 'python_version_major', _py_ver[0])
    _create_fact(facts, 'python_version_minor', _py_ver[1])
    _create_fact(facts, 'python_version_patch', _py_ver[2])

    # Kit index variables are taken as well as facts
    # so they can be referenced by other variables, also
    # this means that index variables from a kit can reference
    # other variables as well, because all these variables get
    # rendered as part of variable resolution.
    if kit is not None:
        for key, value in kit.index_data.items():
            _create_fact(facts, key, value, prefix="{}_{}".format(pkg_name, "kit"))

    # Give those variables already!
    return facts
Example #53
0
 def hz_advertised():
     return cpuinfo.get_cpu_info()['hz_advertised']
    def printWelcome(logo,
                     version='beta',
                     color_d='blue',
                     color_out='magenta',
                     attributes2=['bold']):
        print(logo)
        print(
            colored('+------------------------------------------+', color_out))
        print(colored('beerware software.', color_d, attrs=attributes2))
        s = getpass.getuser() + '@' + platform.node()

        print(colored('Computer:            ', color_d, attrs=attributes2),
              colored(s, color_out))
        print(colored('Script:              ', color_d, attrs=attributes2),
              colored(Welcome.getScript(), color_out))
        print(colored('Api version:         ', color_d, attrs=attributes2),
              colored(sys.api_version, color_out))

        print(colored('Path:                ', color_d, attrs=attributes2),
              colored(sys.executable, color_out))
        print(colored('Native Compiler:     ', color_d, attrs=attributes2),
              colored(platform.python_compiler(), color_out))
        print(colored('Architecture:        ', color_d, attrs=attributes2),
              colored(platform.processor(), color_out))
        s = platform.machine() + '  ' + platform.system(
        ) + ' Kernel version ' + platform.release()
        print(colored('Kernel:              ', color_d, attrs=attributes2),
              colored(s, color_out))

        print(colored('CPU Info:            ', color_d, attrs=attributes2),
              colored(cpuinfo.get_cpu_info()['brand'], color_out))

        print(colored('Python Version:      ', color_d, attrs=attributes2),
              colored(platform.python_version(), color_out))
        print(colored('Processors:          ', color_d, attrs=attributes2),
              colored(psutil.cpu_count(logical=False), color_out))
        print(colored('Terminal:            ', color_d, attrs=attributes2),
              colored(Welcome.getTerminal(), color_out))
        print(colored('User:                '******'Current process:     ', color_d, attrs=attributes2),
              colored(psutil.Process().pid, color_out))
        print(colored('Code version:        ', color_d, attrs=attributes2),
              colored(version, color_out))
        mem = psutil.virtual_memory()

        print(colored('Total Memory:        ', color_d, attrs=attributes2),
              colored(Welcome.sizeof_fmt(mem.total), color_out))

        print(colored('Available Memory:    ', color_d, attrs=attributes2),
              colored(Welcome.sizeof_fmt(mem.available), color_out))
        print(colored('Free Memory:         ', color_d, attrs=attributes2),
              colored(Welcome.sizeof_fmt(mem.free), color_out))
        print(colored('Used Memory:         ', color_d, attrs=attributes2),
              colored(Welcome.sizeof_fmt(mem.used), color_out))
        print(colored('Active Memory:       ', color_d, attrs=attributes2),
              colored(Welcome.sizeof_fmt(mem.active), color_out))
        print(colored('Inactive Memory:     ', color_d, attrs=attributes2),
              colored(Welcome.sizeof_fmt(mem.inactive), color_out))
        print(colored('Wired Memory:        ', color_d, attrs=attributes2),
              colored(Welcome.sizeof_fmt(mem.wired), color_out))

        print(colored('Current path:        ', color_d, attrs=attributes2),
              colored(os.getcwd(), color_out))

        print(colored('Current date:        ', color_d, attrs=attributes2),
              colored(datetime.datetime.now(), color_out))
        print(colored('Current time:        ', color_d, attrs=attributes2),
              colored(datetime.datetime.now(), color_out))
        print(
            colored('+------------------------------------------+', color_out))
Example #55
0
 def bit():
     return cpuinfo.get_cpu_info()['bits']
Example #56
0
 def get_cpu_info(self):
     """ Returns metadata about current CPU the computation is executed on.
     """
     cpu_info = get_cpu_info()
     return {'brand': cpu_info['brand'], 'hz_actual': cpu_info['hz_actual']}
Example #57
0
 def model():
     return cpuinfo.get_cpu_info()['model']
Example #58
0
def get_processor_name() -> str:
    return cpuinfo.get_cpu_info()['brand_raw']
Example #59
0
    sources += [f for f in glob('c-blosc/blosc/*.c')
                if 'avx2' not in f and 'sse2' not in f]
    sources += glob('c-blosc/internal-complibs/lz4*/*.c')
    sources += glob('c-blosc/internal-complibs/snappy*/*.cc')
    sources += glob('c-blosc/internal-complibs/zlib*/*.c')
    sources += glob('c-blosc/internal-complibs/zstd*/*/*.c')
    inc_dirs += [os.path.join('c-blosc', 'blosc')]
    inc_dirs += [d for d in glob('c-blosc/internal-complibs/*')
                 if os.path.isdir(d)]
    inc_dirs += [d for d in glob('c-blosc/internal-complibs/zstd*/*')
                 if os.path.isdir(d)]
    def_macros += [('HAVE_LZ4', 1), ('HAVE_SNAPPY', 1), ('HAVE_ZLIB', 1),
                   ('HAVE_ZSTD', 1)]

    # Guess SSE2 or AVX2 capabilities
    cpu_info = cpuinfo.get_cpu_info()
    # SSE2
    if 'sse2' in cpu_info['flags']:
        print('SSE2 detected')
        CFLAGS.append('-DSHUFFLE_SSE2_ENABLED')
        sources += [f for f in glob('c-blosc/blosc/*.c') if 'sse2' in f]
        if os.name == 'posix':
            CFLAGS.append('-msse2')
        elif os.name == 'nt':
            def_macros += [('__SSE2__', 1)]

    # AVX2
    if 'avx2' in cpu_info['flags']:
        print('AVX2 detected')
        CFLAGS.append('-DSHUFFLE_AVX2_ENABLED')
        sources += [f for f in glob('c-blosc/blosc/*.c') if 'avx2' in f]
Example #60
0
def argparse_function(
        version: str,
        release: str) -> typing.Tuple[str, str, str, str, bool, bool]:
    conf = config.getInstance()
    parser = argparse.ArgumentParser(
        epilog=f"Load Config file '{conf.ini_path}'.")
    parser.add_argument("AnalysisPath",
                        default='',
                        nargs='?',
                        help="Analysis folder or single file.")
    parser.add_argument("-p",
                        "--path",
                        default='',
                        nargs='?',
                        help="Analysis folder.")
    parser.add_argument("-s",
                        "--success-output-folder",
                        default='',
                        nargs='?',
                        help="Success output folder.")
    parser.add_argument("-f",
                        "--failed-output-folder",
                        default='',
                        nargs='?',
                        help="Failed output folder.")
    parser.add_argument(
        "-m",
        "--main-mode",
        default='',
        nargs='?',
        help="Main mode. 1:Scraping 2:Organizing 3:Scraping in analysis folder"
    )
    # parser.add_argument("-C", "--config", default='config.ini', nargs='?', help="The config file Path.")
    parser.add_argument(
        "-L",
        "--link-mode",
        default='',
        nargs='?',
        help=
        "Create movie file link. 0:moving movie file, do not create link 1:soft link 2:try hard link first"
    )
    default_logdir = str(Path.home() / '.avlogs')
    parser.add_argument(
        "-o",
        "--log-dir",
        dest='logdir',
        default=default_logdir,
        nargs='?',
        help=
        f"""Duplicate stdout and stderr to logfiles in logging folder, default on.
        default folder for current user: '******'. Change default folder to an empty file,
        or use --log-dir= to turn log off.""")
    parser.add_argument("-q",
                        "--regex-query",
                        dest='regexstr',
                        default='',
                        nargs='?',
                        help="python re module regex filepath filtering.")
    parser.add_argument("-d",
                        "--nfo-skip-days",
                        dest='days',
                        default='',
                        nargs='?',
                        help="Override nfo_skip_days value in config.")
    parser.add_argument("-c",
                        "--stop-counter",
                        dest='cnt',
                        default='',
                        nargs='?',
                        help="Override stop_counter value in config.")
    parser.add_argument(
        "-R",
        "--rerun-delay",
        dest='delaytm',
        default='',
        nargs='?',
        help=
        "Delay (eg. 1h10m30s or 60 (second)) time and rerun, until all movies proceed. Note: stop_counter value in config or -c must none zero."
    )
    parser.add_argument("-i",
                        "--ignore-failed-list",
                        action="store_true",
                        help="Ignore failed list '{}'".format(
                            os.path.join(os.path.abspath(conf.failed_folder()),
                                         'failed_list.txt')))
    parser.add_argument("-a",
                        "--auto-exit",
                        action="store_true",
                        help="Auto exit after program complete")
    parser.add_argument(
        "-g",
        "--debug",
        action="store_true",
        help="Turn on debug mode to generate diagnostic log for issue report.")
    parser.add_argument(
        "-N",
        "--no-network-operation",
        action="store_true",
        help=
        "No network query, do not get metadata, for cover cropping purposes, only takes effect when main mode is 3."
    )
    parser.add_argument("-w",
                        "--website",
                        dest='site',
                        default='',
                        nargs='?',
                        help="Override [priority]website= in config.")
    parser.add_argument(
        "-D",
        "--download-images",
        dest='dnimg',
        action="store_true",
        help=
        "Override [common]download_only_missing_images=0 force invoke image downloading."
    )
    parser.add_argument(
        "-C",
        "--config-override",
        dest='cfgcmd',
        action='append',
        nargs=1,
        help=
        "Common use config override. Grammar: section:key=value[;[section:]key=value] eg. 'de:s=1' or 'debug_mode:switch=1' override[debug_mode]switch=1 Note:this parameters can be used multiple times"
    )
    parser.add_argument(
        "-z",
        "--zero-operation",
        dest='zero_op',
        action="store_true",
        help=
        """Only show job list of files and numbers, and **NO** actual operation
is performed. It may help you correct wrong numbers before real job.""")
    verrel = version if not len(release) else f"{version}-{release}"
    parser.add_argument("-v", "--version", action="version", version=verrel)

    args = parser.parse_args()

    def set_natural_number_or_none(sk, value):
        if isinstance(value, str) and value.isnumeric() and int(value) >= 0:
            conf.set_override(f'{sk}={value}')

    def set_str_or_none(sk, value):
        if isinstance(value, str) and len(value):
            conf.set_override(f'{sk}={value}')

    def set_bool_or_none(sk, value):
        if isinstance(value, bool) and value:
            conf.set_override(f'{sk}=1')

    set_natural_number_or_none("common:main_mode", args.main_mode)
    set_natural_number_or_none("common:link_mode", args.link_mode)
    set_str_or_none("common:source_folder", args.path)
    set_str_or_none("common:success_output_folder", args.success_output_folder)
    set_str_or_none("common:failed_output_folder", args.failed_output_folder)
    set_bool_or_none("common:auto_exit", args.auto_exit)
    set_natural_number_or_none("common:nfo_skip_days", args.days)
    set_natural_number_or_none("common:stop_counter", args.cnt)
    set_bool_or_none("common:ignore_failed_list", args.ignore_failed_list)
    set_str_or_none("common:rerun_delay", args.delaytm)
    set_str_or_none("priority:website", args.site)
    if isinstance(args.dnimg, bool) and args.dnimg:
        conf.set_override("common:download_only_missing_images=0")
    set_bool_or_none("debug_mode:switch", args.debug)
    regexstr = args.regexstr
    if isinstance(args.AnalysisPath, str) and len(args.AnalysisPath):
        ana_path = Path(args.AnalysisPath)
        if ana_path.is_file():
            sourcedir = ana_path.parent
            if sourcedir.is_dir():
                conf.set_override(f'common:source_folder={str(sourcedir)}')
                regexstr = get_number(False, ana_path.stem)
        elif ana_path.is_dir():
            conf.set_override(f"common:source_folder={str(ana_path)}")
    if isinstance(args.cfgcmd, list):
        for cmd in args.cfgcmd:
            conf.set_override(cmd[0])

    no_net_op = False
    if conf.main_mode() == 3:
        no_net_op = args.no_network_operation
        if no_net_op:
            conf.set_override(
                "common:stop_counter=0;rerun_delay=0s;face:aways_imagecut=1")

    return args.logdir, regexstr, args.zero_op, no_net_op, verrel, get_cpu_info(
    )