Example #1
0
def main(boost_dir):
    print "This is Python %s" % platform.python_version()
    print "Compiled with %s" % platform.python_compiler()
    print "Architecture: %s" % platform.architecture()[0]
    buildlibs = ["thread", "date_time"]

    pycomp = platform.python_compiler()
    if pycomp.startswith("MSC v.1400"):
        msvc_version = "8.0"  # Visual Studio 2005
    elif pycomp.startswith("MSC v.1500"):
        msvc_version = "9.0"  # Visual Studio 2008
    elif pycomp.startswith("MSC v.1600"):
        msvc_version = "10.0"  # Visual Studio 2010
    else:
        print "Error: Python compiled with unknown version of Visual Studio"
        sys.exit(0)
    msvc_short = msvc_version.replace(".", "")

    assert os.path.exists(boost_dir)
    old_path = os.getcwd()

    python_version = ".".join(platform.python_version_tuple()[:2])
    python_path = get_config_var("prefix").replace("\\", "/").replace(" ", "\\ ")
    jamConfigText = "using msvc : %s ;\nusing python : %s : %s ;\n" % (msvc_version, python_version, python_path)

    os.chdir(boost_dir)
    jamConfig = open("tmp-config.jam", "w")
    jamConfig.write(jamConfigText)
    jamConfig.close()

    if not os.path.exists("bjam.exe"):
        os.system("bootstrap.bat")

    boost_version_string = getBoostVersion(boost_dir)
    print "Boost version: %s" % boost_version_string

    extra_args = ["-j8", "--user-config=%s" % jamConfig.name]
    assert os.path.exists("bjam.exe")

    if "64 bit" in platform.python_compiler():
        extra_args.append("address-model=64")

    command = "bjam.exe {lib} stage --stagedir={stage} runtime-debugging={debug} variant={variant} link={link} {extra}"
    for variant, debug in (("release", "off"), ("debug", "on")):
        for link in ("shared", "static"):
            os.system(
                command.format(
                    lib=" ".join("--with-%s" % lib for lib in buildlibs),
                    stage=join(old_path),
                    extra=" ".join(extra_args),
                    variant=variant,
                    debug=debug,
                    link=link,
                )
            )
    os.remove(jamConfig.name)
Example #2
0
def get_system_info():
    """ Function to collect and display system information. """
    python_info = {
        "version": platform.python_version(),
        "version_tuple:": platform.python_version_tuple(),
        "compiler": platform.python_compiler(),
        "build": platform.python_build(),
    }

    platform_info = {"platform": platform.platform(aliased=True)}

    os_and_hardware_info = {
        "uname:": platform.uname(),
        "system": platform.system(),
        "node": platform.node(),
        "release": platform.release(),
        "version": platform.version(),
        "machine": platform.machine(),
        "processor": platform.processor(),
    }

    executable_architecture_info = {"interpreter": platform.architecture(), "/bin/ls": platform.architecture("/bin/ls")}

    info = {
        "python_info": python_info,
        "platform_info": platform_info,
        "os_and_hardware_info": os_and_hardware_info,
        "executable_architecture_info": executable_architecture_info,
    }

    return info
Example #3
0
def get_python_info():
    ret = {}
    ret['argv'] = _escape_shell_args(sys.argv)
    ret['bin'] = sys.executable

    # Even though compiler/build_date are already here, they're
    # actually parsed from the version string. So, in the rare case of
    # the unparsable version string, we're still transmitting it.
    ret['version'] = ' '.join(sys.version.split())

    ret['compiler'] = platform.python_compiler()
    ret['build_date'] = platform.python_build()[1]
    ret['version_info'] = list(sys.version_info)

    ret['features'] = {'openssl': OPENSSL_VERSION,
                       'expat': EXPAT_VERSION,
                       'sqlite': SQLITE_VERSION,
                       'tkinter': TKINTER_VERSION,
                       'zlib': ZLIB_VERSION,
                       'unicode_wide': HAVE_UCS4,
                       'readline': HAVE_READLINE,
                       '64bit': IS_64BIT,
                       'ipv6': HAVE_IPV6,
                       'threading': HAVE_THREADING,
                       'urandom': HAVE_URANDOM}

    return ret
Example #4
0
def get_machine_details():

    if _debug:
        print 'Getting machine details...'
    buildno, builddate = platform.python_build()
    python = platform.python_version()
    try:
        unichr(100000)
    except ValueError:
        # UCS2 build (standard)
        unicode = 'UCS2'
    except NameError:
        unicode = None
    else:
        # UCS4 build (most recent Linux distros)
        unicode = 'UCS4'
    bits, linkage = platform.architecture()
    return {
        'platform': platform.platform(),
        'processor': platform.processor(),
        'executable': sys.executable,
        'implementation': getattr(platform, 'python_implementation',
                                  lambda:'n/a')(),
        'python': platform.python_version(),
        'compiler': platform.python_compiler(),
        'buildno': buildno,
        'builddate': builddate,
        'unicode': unicode,
        'bits': bits,
        }
Example #5
0
    def build_system(self):
        """
        from https://github.com/Dreyer/pyinfo/blob/master/pyinfo.py
        """
        system = {
            'path': False,
            'os_path': False,
            'os_version': False,
            'version': False,
            'subversion': False,
            'prefix': False,
            'build_date': platform.python_build()[1],
            'executable': False,
            'compiler': platform.python_compiler(),
            'api_version': False,
            'implementation': platform.python_implementation(),
            'system': platform.system(),
        }

        if platform.dist()[0] != '' and platform.dist()[1] != '':
            system['os_version'] = '%s %s (%s %s)' % ( platform.system(), platform.release(), platform.dist()[0].capitalize(), platform.dist()[1] )
        else:
            system['os_version'] = '%s %s' % ( platform.system(), platform.release() )

        if hasattr( os, 'path' ): system['os_path'] = os.environ['PATH']
        if hasattr( sys, 'version' ): system['version'] = platform.python_version()
        if hasattr( sys, 'subversion' ): system['subversion'] = ', '.join( sys.subversion )
        if hasattr( sys, 'prefix' ): system['prefix'] = sys.prefix
        if hasattr( sys, 'path' ): system['path'] = sys.path
        if hasattr( sys, 'executable' ): system['executable'] = sys.executable
        if hasattr( sys, 'api_version' ): system['api'] = sys.api_version

        self.system = system
Example #6
0
def print_platform_info():
    import platform
    logging.debug('*************** PLATFORM INFORMATION ************************')
    
    logging.debug('==Interpreter==')
    logging.debug('Version      :' + platform.python_version())
    logging.debug('Version tuple:' + str(platform.python_version_tuple()))
    logging.debug('Compiler     :' + platform.python_compiler())
    logging.debug('Build        :' + str(platform.python_build()))
    
    logging.debug('==Platform==')
    logging.debug('Normal :' + platform.platform())
    logging.debug('Aliased:' + platform.platform(aliased=True))
    logging.debug('Terse  :' + platform.platform(terse=True))
    
    logging.debug('==Operating System and Hardware Info==')
    logging.debug('uname:' + str(platform.uname()))
    logging.debug('system   :' + platform.system())
    logging.debug('node     :' + platform.node())
    logging.debug('release  :' + platform.release())
    logging.debug('version  :' + platform.version())
    logging.debug('machine  :' + platform.machine())
    logging.debug('processor:' + platform.processor())
    
    logging.debug('==Executable Architecture==')
    logging.debug('interpreter:' + str(platform.architecture()))
    logging.debug('/bin/ls    :' + str(platform.architecture('/bin/ls')))
    logging.debug('*******************************************************')
Example #7
0
def _get_system_provenance():
    """ return JSON string containing provenance for all things that are
    fixed during the runtime"""

    bits, linkage = platform.architecture()

    return dict(
        ctapipe_version=ctapipe.__version__,
        ctapipe_resources_version=ctapipe_resources.__version__,
        ctapipe_svc_path=os.getenv("CTAPIPE_SVC_PATH"),
        executable=sys.executable,
        platform=dict(
            architecture_bits=bits,
            architecture_linkage=linkage,
            machine=platform.machine(),
            processor=platform.processor(),
            node=platform.node(),
            version=platform.version(),
            system=platform.system(),
            release=platform.release(),
            libcver=platform.libc_ver(),
            num_cpus=psutil.cpu_count(),
            boot_time=Time(psutil.boot_time(), format='unix').isot,
        ),
        python=dict(
            version_string=sys.version,
            version=platform.python_version_tuple(),
            compiler=platform.python_compiler(),
            implementation=platform.python_implementation(),
        ),
        arguments=sys.argv,
        start_time_utc=Time.now().isot,
    )
def get_platform_info(output_file):
    print("\nSYSTEM INFORMATION", file=output_file)
    print("{:<20}{:>5}".format('system:', platform.system()), file=output_file)
    print("{:<20}{:>5}".format('node:', platform.node()), file=output_file)
    print("{:<20}{:>5}".format('version:', platform.version()), file=output_file)
    print("{:<20}{:>5}".format('processor:', platform.processor()), file=output_file)
    print("{:<20}{:>5}".format("python compiler:", platform.python_compiler()), file=output_file)
Example #9
0
    def _get_sysinfo(self):
        def linux_os():
            modelCPU = subprocess.check_output('cat /proc/cpuinfo | sed -n -e '
                '"0,/model name\s*:\s*/s///p" | tr -d "\n"', shell=True, universal_newlines=True)
            stringDesc = subprocess.check_output('lsb_release -a | sed -n -e "s/Description:\s*//p" | '
                'tr -d "\n"', shell=True, universal_newlines=True)
            return modelCPU, stringDesc

        def apple_os():
            modelCPU = subprocess.check_output('sysctl -n machdep.cpu.brand_string | tr -d "\n"',
                shell=True, universal_newlines=True)
            osxVersion = subprocess.check_output('sw_vers -productVersion | tr -d "\n"',
                shell=True, universal_newlines=True)
            stringDesc = 'OS X ' + osxVersion
            return modelCPU, stringDesc

        stringDesc = platform.system()
        if stringDesc == 'Linux':
            stringCPU, stringDesc = linux_os()
        elif stringDesc == 'Darwin':
            stringCPU, stringDesc = apple_os()

        if self.out:
            self.out += '\n\n'
        self.out += 'compiler   : %s\nsystem     : %s\n'\
        'release    : %s\nmachine    : %s\n'\
        'processor  : %s\nCPU cores  : %s\ninterpreter: %s'%(
        platform.python_compiler(),
        stringDesc,
        platform.release(),
        platform.machine(),
        stringCPU,
        cpu_count(),
        platform.architecture()[0]
        )
Example #10
0
def error_report_extra_provider(request):
    ts = os.times()
    e = request.environ
    if 'CONTENT_LENGTH' in e and e['CONTENT_LENGTH']:
        form = filter_names(request.form, ignore=(
            'password',
            'confirm_password'
        ))
    else:
        form = {}
    return {
        'HTTP_ACCEPT_LANGUAGE': e['HTTP_ACCEPT_LANGUAGE'],
        'HTTP_REFERER': e.get('HTTP_REFERER', '?'),
        'HTTP_USER_AGENT': e['HTTP_USER_AGENT'],
        'PATH_INFO': e['PATH_INFO'],
        'REMOTE_ADDR': e['REMOTE_ADDR'],
        'REQUEST_METHOD': e['REQUEST_METHOD'],
        'executable': sys.executable,
        'hostname': socket.gethostname(),
        'http_cookies': request.cookies,
        'http_form': form,
        'machine': platform.machine(),
        'modules': modules_info(),
        'process_uptime': timedelta(seconds=time() - start_time),
        'python_compiler': platform.python_compiler(),
        'python_version': platform.python_version(),
        'release': platform.release(),
        'route_args': dict(e['route_args']),
        'stime': timedelta(seconds=ts[1]),
        'system': platform.system(),
        'utime': timedelta(seconds=ts[0]),
        'uwsgi.version': e.get('uwsgi.version', '?'),
    }
Example #11
0
def get_machine_details():

    if _debug:
        print('Getting machine details...')
    buildno, builddate = platform.python_build()
    python = platform.python_version()
    # XXX this is now always UCS4, maybe replace it with 'PEP393' in 3.3+?
    if sys.maxunicode == 65535:
        # UCS2 build (standard)
        unitype = 'UCS2'
    else:
        # UCS4 build (most recent Linux distros)
        unitype = 'UCS4'
    bits, linkage = platform.architecture()
    return {
        'platform': platform.platform(),
        'processor': platform.processor(),
        'executable': sys.executable,
        'implementation': getattr(platform, 'python_implementation',
                                  lambda:'n/a')(),
        'python': platform.python_version(),
        'compiler': platform.python_compiler(),
        'buildno': buildno,
        'builddate': builddate,
        'unicode': unitype,
        'bits': bits,
        }
Example #12
0
def print_platform_info():
    import platform

    logging.debug("*************** PLATFORM INFORMATION ************************")

    logging.debug("==Interpreter==")
    logging.debug("Version      :" + platform.python_version())
    logging.debug("Version tuple:" + str(platform.python_version_tuple()))
    logging.debug("Compiler     :" + platform.python_compiler())
    logging.debug("Build        :" + str(platform.python_build()))

    logging.debug("==Platform==")
    logging.debug("Normal :" + platform.platform())
    logging.debug("Aliased:" + platform.platform(aliased=True))
    logging.debug("Terse  :" + platform.platform(terse=True))

    logging.debug("==Operating System and Hardware Info==")
    logging.debug("uname:" + str(platform.uname()))
    logging.debug("system   :" + platform.system())
    logging.debug("node     :" + platform.node())
    logging.debug("release  :" + platform.release())
    logging.debug("version  :" + platform.version())
    logging.debug("machine  :" + platform.machine())
    logging.debug("processor:" + platform.processor())

    logging.debug("==Executable Architecture==")
    logging.debug("interpreter:" + str(platform.architecture()))
    logging.debug("/bin/ls    :" + str(platform.architecture("/bin/ls")))
    logging.debug("*******************************************************")
Example #13
0
def get_python_info():
    ret = {}
    ret["argv"] = _escape_shell_args(sys.argv)
    ret["bin"] = sys.executable

    # Even though compiler/build_date are already here, they're
    # actually parsed from the version string. So, in the rare case of
    # the unparsable version string, we're still transmitting it.
    ret["version"] = " ".join(sys.version.split())

    ret["compiler"] = platform.python_compiler()
    ret["build_date"] = platform.python_build()[1]
    ret["version_info"] = list(sys.version_info)

    ret["features"] = {
        "openssl": OPENSSL_VERSION,
        "expat": EXPAT_VERSION,
        "sqlite": SQLITE_VERSION,
        "tkinter": TKINTER_VERSION,
        "zlib": ZLIB_VERSION,
        "unicode_wide": HAVE_UCS4,
        "readline": HAVE_READLINE,
        "64bit": IS_64BIT,
        "ipv6": HAVE_IPV6,
        "threading": HAVE_THREADING,
        "urandom": HAVE_URANDOM,
    }

    return ret
Example #14
0
def get_machine_details():

    import platform
    if _debug:
        print 'Getting machine details...'
    buildno, builddate = platform.python_build()
    python = platform.python_version()
    if python > '2.0':
        try:
            unichr(100000)
        except ValueError:
            # UCS2 build (standard)
            unicode = 'UCS2'
        else:
            # UCS4 build (most recent Linux distros)
            unicode = 'UCS4'
    else:
        unicode = None
    bits, linkage = platform.architecture()
    return {
        'platform': platform.platform(),
        'processor': platform.processor(),
        'executable': sys.executable,
        'python': platform.python_version(),
        'compiler': platform.python_compiler(),
        'buildno': buildno,
        'builddate': builddate,
        'unicode': unicode,
        'bits': bits,
        }
Example #15
0
    def test_current_version(self):
        ver = Version.objects.get(pk=1)
        data = {}
        data["version"] = ver.version
        data["os.name"] = platform.system()
        data["os.arch"] = platform.machine()
        data["os.version"] = getOSVersion()
        data["python.version"] = platform.python_version()
        data["python.compiler"] = platform.python_compiler()
        data["python.build"] = platform.python_build()
    
        hit_url = reverse('registry_hit')

        request = self.factory.get(hit_url, data,
                                   HTTP_USER_AGENT='OMERO.test')

        response = views_hit(request)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.content, '')

        hit = Hit.objects.get(agent_version=data["version"])
        self.assertEqual(hit.agent.agent_name, 'OMERO.test')
        self.assertEqual(hit.os_name, data["os.name"])
        self.assertEqual(hit.os_arch, data["os.arch"])
        self.assertEqual(hit.os_version, data["os.version"])
        self.assertEqual(hit.python_version, data["python.version"])
        self.assertEqual(hit.python_compiler, data["python.compiler"][:50])
        self.assertEqual(hit.python_build, data["python.build"][1])
Example #16
0
File: config.py Project: i386x/doit
    def __probe_host_interpreter(self, *args, **kwargs):
        """Detects host interpreter.

        :param tuple args: A positinal arguments.
        :param dict kwargs: A keyword arguments.
        """

        self.__host_interpreter = ""
        self.__host_interpreter_version = UNUSED_VERSION
        try:
            self.__host_interpreter = {
                'cpython': PYTHON_CPYTHON,
                'ironpython': PYTHON_IRON,
                'jython': PYTHON_JYTHON,
                'pypy': PYTHON_PYPY
            }.get(platform.python_implementation().lower(), '')
            self.__host_interpreter_version = self.__to_version(
                platform.python_version()
            )
            python_compiler = platform.python_compiler()
            if python_compiler:
                if not self.__host_interpreter_version:
                    self.__host_interpreter_version = Version(0, 0, 0, -1, "")
                self.__host_interpreter_version.info = python_compiler
        except Exception as e:
            self.__host_interpreter = ""
            self.__host_interpreter_version = UNUSED_VERSION
            if not self.__what_is_wrong:
                self.__what_is_wrong = repr(e)
            self.__is_ok = False
        except:
            self.__host_interpreter = ""
            self.__host_interpreter_version = UNUSED_VERSION
            self.__is_ok = False
Example #17
0
    def is_android(cls):
        if cls._is_android is not None:
            return cls._is_android

        try:
            # Check android platform criteria
            if not os.path.exists('/system/build.prop'):
                # Couldn't find "build.prop" file
                cls._is_android = False
            elif os.path.exists('/system/lib/libandroid_runtime.so'):
                # Found "libandroid_runtime.so" file
                log.info('Detected android system (found the "libandroid_runtime.so" file)')
                cls._is_android = True
            elif '-google' in platform.python_compiler():
                # Found "-google" in the python compiler attribute
                log.info('Detected android system (found "-google" in the python compiler attribute)')
                cls._is_android = True
            else:
                log.warn('Found the "build.prop" file, but could\'t confirm if the system is running android')
                cls._is_android = False

        except Exception as ex:
            log.warn('Unable to check if the system is running android: %s', ex, exc_info=True)

        return cls._is_android
Example #18
0
def test_arraywriters():
    # Test initialize
    # Simple cases
    if machine() == 'sparc64' and python_compiler().startswith('GCC'):
        # bus errors on at least np 1.4.1 through 1.6.1 for complex
        test_types = FLOAT_TYPES + IUINT_TYPES
    else:
        test_types = NUMERIC_TYPES
    for klass in (SlopeInterArrayWriter, SlopeArrayWriter, ArrayWriter):
        for type in test_types:
            arr = np.arange(10, dtype=type)
            aw = klass(arr)
            assert_true(aw.array is arr)
            assert_equal(aw.out_dtype, arr.dtype)
            assert_array_equal(arr, round_trip(aw))
            # Byteswapped is OK
            bs_arr = arr.byteswap().newbyteorder('S')
            bs_aw = klass(bs_arr)
            # assert against original array because POWER7 was running into
            # trouble using the byteswapped array (bs_arr)
            assert_array_equal(arr, round_trip(bs_aw))
            bs_aw2 = klass(bs_arr, arr.dtype)
            assert_array_equal(arr, round_trip(bs_aw2))
            # 2D array
            arr2 = np.reshape(arr, (2, 5))
            a2w = klass(arr2)
            # Default out - in order is Fortran
            arr_back = round_trip(a2w)
            assert_array_equal(arr2, arr_back)
            arr_back = round_trip(a2w, 'F')
            assert_array_equal(arr2, arr_back)
            # C order works as well
            arr_back = round_trip(a2w, 'C')
            assert_array_equal(arr2, arr_back)
            assert_true(arr_back.flags.c_contiguous)
Example #19
0
    def test_old_version(self):
        ver = Version.objects.get(pk=1)
        data = {}
        data["version"] = '0.0.0'
        data["os.name"] = platform.system()
        data["os.arch"] = platform.machine()
        data["os.version"] = getOSVersion()
        data["python.version"] = platform.python_version()
        data["python.compiler"] = platform.python_compiler()
        data["python.build"] = platform.python_build()

        hit_url = reverse('registry_hit')

        request = self.factory.get(hit_url, data,
                                   HTTP_USER_AGENT='OMERO.test')

        response = views_hit(request)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(
            response.content,
            ('Please upgrade to %s. See '
             'http://downloads.openmicroscopy.org/latest-stable/omero'
             ' for the latest version.') % ver.version)

        hit = Hit.objects.get(agent_version=data["version"])
        self.assertEqual(hit.agent.agent_name, 'OMERO.test')
        self.assertEqual(hit.os_name, data["os.name"])
        self.assertEqual(hit.os_arch, data["os.arch"])
        self.assertEqual(hit.os_version, data["os.version"])
        self.assertEqual(hit.python_version, data["python.version"])
        self.assertEqual(hit.python_compiler, data["python.compiler"][:50])
        self.assertEqual(hit.python_build, data["python.build"][1])
Example #20
0
def get_system_details():
    """Return a dictionary with information about the system
    """
    buildno, builddate = platform.python_build()
    python = platform.python_version()
    if sys.maxunicode == 65535:
        # UCS2 build (standard)
        unitype = 'UCS2'
    else:
        # UCS4 build (most recent Linux distros)
        unitype = 'UCS4'
    bits, linkage = platform.architecture()

    from . import ctwrapper

    return {
        'platform': platform.platform(),
        'processor': platform.processor(),
        'executable': sys.executable,
        'implementation': getattr(platform, 'python_implementation',
                                  lambda:'n/a')(),
        'python': platform.python_version(),
        'compiler': platform.python_compiler(),
        'buildno': buildno,
        'builddate': builddate,
        'unicode': unitype,
        'bits': bits,
        'visa': get_library_paths(ctwrapper)
        }
Example #21
0
    def __init__(self):

        # Linux
        if( sys.platform == "linux2" ):
            self.type = "Linux"
 
        # MacOs
        if( sys.platform == "mac" or sys.platform == "darwin" ):
            self.type = "Darwin"
        
        # Windows
        if( sys.platform == "win32" ):
            self.type = "Win"

        if( self.type == "Linux" ):
            # description
            out=getstatusoutput("lsb_release -d")
            if( out[0] == 0 ):
                self.ver=out[1].split("Description:")[1].strip()
            # hardware platform
            out=getstatusoutput("uname -i")
            if( out[0] == 0 ):
                self.platform=out[1].strip()


        try:
            sizeofint = platform.architecture()[0]
            # major minor version of compiler
            gccver = ''.join( platform.python_compiler().split()[-1].split('.')[:2] )
            self.arch = 'gcc' + gccver + '_' + sizeofint
        except:
            pass
Example #22
0
def main():

    nltk.download('stopwords')
    nltk.download('vader_lexicon')        
        
    print("\n================================================================================\n")
    print("---------------------------------- Platform Information ------------------------")
    print('machine: {}'.format(platform.machine()))
    print('node: {}'.format(platform.node()))    
    print('processor: {}'.format(platform.processor()))    
    print('release: {}'.format(platform.release()))
    print('system: {}'.format(platform.system()))    
    print('version: {}'.format(platform.version()))
    print('uname: {}'.format(platform.uname()))
    
    #mem = virtual_memory()
    #print('memory: {}'.format(mem.total))  # total physical memory available
    
    print('python_build: {}'.format(platform.python_build()))
    print('python_compiler: {}'.format(platform.python_compiler()))
    print('python_branch: {}'.format(platform.python_branch()))
    print('python_implementation: {}'.format(platform.python_implementation()))
    
    print('python_revision: {}'.format(platform.python_revision()))
    print('python_version: {}'.format(platform.python_version()))
    
    print("\n================================================================================\n")
Example #23
0
def display_platform_information():
    """
    Display platform information.
    """
    import platform
    output_string = '\n-- INFORMATION: PLATFORM & OS ---------\n'

    try:

        output_string = add_param_to_output(output_string,
                                            'OS',
                                            platform.platform())
        output_string = add_param_to_output(output_string,
                                            'OS release version',
                                            platform.version())
        output_string = add_param_to_output(output_string,
                                            'machine',
                                            platform.machine())
        output_string = add_param_to_output(output_string,
                                            'node',
                                            platform.node())
        output_string = add_param_to_output(output_string,
                                            'python version',
                                            platform.python_version())
        output_string = add_param_to_output(output_string,
                                            'python build',
                                            platform.python_build())
        output_string = add_param_to_output(output_string,
                                            'python compiler',
                                            platform.python_compiler())

    except Exception:
        output_string += 'Some platform information cannot be displayed\n'
    output_string += '----------------------------------------'
    neon_logger.display(output_string)
Example #24
0
    def report(self, *args):
        report = ""
        report += self.entry.text.replace("(optional)", "") + "\n\n---\n\n"
        report += "**Application:** {0}\n".format(APP_NAME)
        report += "**Version:** {0}\n".format(APP_VERSION)
        report += "**Platform:** {0}\n".format(platform)
        report += "**Distro:** {0}\n".format(platfm.dist())
        report += "**OS release:** {0}\n".format(platfm.release())
        if platform == "macosx":
            report += "**Mac version:** {0}\n".format(platfm.mac_ver)
        if platform == "win":
            report += "**Win32 version:** {0}\n".format(platfm.win32_ver())
        report += "**Uname:** {0}\n".format(platfm.uname())
        report += "**Python version:** {0}\n".format(platfm.python_version())
        report += "**Python branch:** {0}\n".format(platfm.python_branch())        
        report += "**Python revision:** {0}\n".format(platfm.python_revision())
        report += "**Python build:** {0}\n".format(platfm.python_build())
        report += "**Python implementation:** {0}\n".format(platfm.python_implementation())
        report += "**Python compiler:** {0}\n".format(platfm.python_compiler())
        report += "**Kivy version:** {0}\n".format(kivy.__version__)
        report += "**Prefix:** {0}\n".format(sys.prefix)
        report += "\n\n---\n\n<pre>\n"
        report += self.exception
        report += "</pre>"

        exc = traceback.format_exception_only(*self.exc_info[0:2])[0].split(":")[0]
        last = traceback.format_tb(self.exc_info[-1])[-1].replace("  File", "file").split("\n")[0].replace(misc.home, "~")

        title = "{exception} in {file}".format(exception=exc, file=last)

        from urllib import quote

        open_url("https://github.com/Davideddu/Karaokivy/issues/new?title={title}&body={body}&labels=bug".format(title=quote(title), body=quote(report)))

        self.quit()
def main():
    print("platform.architecture: {}".format(p.architecture()));

    print("platform.machine: {}".format(p.machine()));

    print("platform.node: {}".format(p.node()));

    print("platform.processor: {}".format(p.processor()));

    print("platform.python_build: {}".format(p.python_build()));

    print("platform.python_compiler: {}".format(p.python_compiler()));

    print("platform.python_branch: {}".format(p.python_branch()));

    print("platform.python_implementation: {}".format(p.python_implementation()));

    print("platform.python_revision: {}".format(p.python_revision()));

    print("platform.python_version: {}".format(p.python_version()));

    print("platform.python_version_tuple: {}".format(p.python_version_tuple()));

    print("platform.release: {}".format(p.release()));

    print("platform.system: {}".format(p.system()));

    #print("platform.system_alias: {}".format(p.system_alias()));

    print("platform.version: {}".format(p.version()));

    print("platform.uname: {}".format(p.uname()));

    print("platform.win32_ver: {}".format(p.win32_ver()));
Example #26
0
def makeLog():
    msg = ''
    msg += "sys.version           = %s\n" % (sys.version)
    msg += "sys.version_info      = %s\n" % (str(sys.version_info))
    msg += "machine               = %s\n" % (platform.machine())
    msg += "platform              = %s\n" % (platform.platform())
    msg += "processor             = %s\n" % (platform.processor())
    msg += "architecure           = %s\n" % (str(platform.architecture()))
    #msg += "os          = %s\n" %(platform.os())
    msg += "python_branch         = %s\n" % (platform.python_branch())
    msg += "python_revision       = %s\n" % (platform.python_revision())
    msg += "win32_ver             = %s\n" % (str(platform.win32_ver()))
    msg += "version               = %s\n" % (platform.version())
    msg += "uname                 = %s\n" % (str(platform.uname()))
    msg += "system                = %s\n" % (platform.system())
    msg += "python_build          = %s\n" % (str(platform.python_build()))
    msg += "python_compiler       = %s\n" % (platform.python_compiler())
    msg += "python_implementation = %s\n" % (platform.python_implementation())
    msg += "system                = %s\n" % (platform.system())
    #msg += "system_alias          = %s\n" %(platform.system_alias())
    msg += "mac_ver               = %s\n" % (str(platform.mac_ver()))
    msg += "linux_distribution    = %s\n" % (
        str(platform.linux_distribution()))
    msg += "libc_ver              = %s\n" % (str(platform.libc_ver()))
    print msg
    f = open('pyNastran.log', 'w')
    f.write(msg)
    f.close()
def get_python_info(f):
    """
        This function will capture specific Python information,
        such as version number, compiler, and build.
    """
    f.write('\n\n================PYTHON INFORMATION================\n')
    f.write('Python Version:  ')
    vers = platform.python_version()
    f.write(vers)
    split_vers = vers.split('.')
    f_zero = float(split_vers[0])
    f_one = float(split_vers[1])
    if ((f_zero != 2) or (f_one < 7)):
        f.write('\nERROR: OpenMDAO WILL NOT WORK with a python version before 2.7 or after 3.0')

    f.write('\n')
    f.write('Python Compiler:  ')
    f.write(platform.python_compiler())
    f.write('\n')
    f.write('Python Build:  ')
    f.write(str(platform.python_build()))
    f.write('\n')
    f.write('Python Path:  \n')
    for subp in sys.path:
        f.write("    "+str(subp)+"\n")
    f.write('\n')
Example #28
0
def TestPlatform( ):
    print ("----------Operation System--------------------------")
    #   取得 python 版本
    print '取得 python 版本 : ' + platform.python_version()

    #   取得操作系統可執行結構 : ex('64bit','WindowsPE')
    print "取得操作系統可執行結構 : ex('64bit','WindowsPE')"
    print platform.architecture()

    #   電腦目前網路群組名稱
    print '電腦目前網路群組名稱' + platform.node()

    #   獲取操作系統名稱及版本號,‘Windows-7-6.1.7601-SP1’
    print '獲取操作系統名稱及版本號 : ' + platform.platform()

    #   電腦處理器資訊,’Intel64 Family 6 Model 42 Stepping 7, GenuineIntel’
    print '電腦處理器資訊 : ' + platform.processor()

    #   獲取操作系統中 Python 的構建日期
    print "獲取操作系統中 Python 的構建日期"
    print platform.python_build()

    #  獲取系統中 python 解釋器的信息
    print '獲取系統中 python 解釋器的信息 : ' + platform.python_compiler()

    if platform.python_branch()=="":
        print platform.python_implementation()
        print platform.python_revision()
    print "platform.release : " + platform.release()
    print "platform.system : " + platform.system()

    #print platform.system_alias()
    #  獲取操作系統版本
    print '獲取操作系統版本 : ' + platform.version()
def print_python_env():
    print('-------------------------------------------------------------')
    print('Interpreter')
    print('platform.python_version:    ', platform.python_version())
    print('platform.python_compiler:   ', platform.python_compiler())
    print('platform.python_build:      ', platform.python_build())
    print()
    print('Platform')
    print('platform.platform(Normal):  ', platform.platform())
    print('platform.platform(Aliased): ', platform.platform(aliased=True))
    print('platform.platform(Terse):   ', platform.platform(terse=True))
    print()
    print('Operating System and Hardware Info')
    print('platform.uname:             ', platform.uname())
    print('platform.system:            ', platform.system())
    print('platform.node:              ', platform.node())
    print('platform.release:           ', platform.release())
    print('platform.version:           ', platform.version())
    print('platform.machine:           ', platform.machine())
    print('platform.processor:         ', platform.processor())
    print()
    print('Executable Architecture')
    print('platform.architecture:      ', platform.architecture())
    #print()
    #print('OS')
    #print('os.uname:                   ', os.uname())
    #print('os.getcwd:                  ', os.getcwd())
    print()
    print('Network')
    print('socket.gethostname:         ', socket.gethostname())
    print('socket.gethostbyname        ', socket.gethostbyname(socket.gethostname()))
    print('-------------------------------------------------------------')
Example #30
0
def get_system_details(visa=True):
    """Return a dictionary with information about the system
    """
    buildno, builddate = platform.python_build()
    if sys.maxunicode == 65535:
        # UCS2 build (standard)
        unitype = 'UCS2'
    else:
        # UCS4 build (most recent Linux distros)
        unitype = 'UCS4'
    bits, linkage = platform.architecture()

    d = {
        'platform': platform.platform(),
        'processor': platform.processor(),
        'executable': sys.executable,
        'implementation': getattr(platform, 'python_implementation',
                                  lambda: 'n/a')(),
        'python': platform.python_version(),
        'compiler': platform.python_compiler(),
        'buildno': buildno,
        'builddate': builddate,
        'unicode': unitype,
        'bits': bits,
        'pyvisa': __version__
    }

    if visa:
        from . import ctwrapper
        d['visa'] = ctwrapper.WRAPPER_CLASS.get_library_paths(LibraryPath, read_user_library_path())

    return d
Example #31
0
    def __init__(self):
        self.logListeners = []
        self.eventListeners = []
        self.NativeLog = True
        self.buffer = ""
        self.data = deque()
        self.maxlength = 5000
        self.ctrl = DummyLogCtrl()
        log = self

        class StdOut:
            def write(self, data):
                log.Write(data, INFO_ICON)
                if eg.debugLevel:
                    try:
                        oldStdOut.write(data)
                    except:
                        oldStdOut.write(data.decode("mbcs"))

        class StdErr:
            def write(self, data):
                log.Write(data, ERROR_ICON)
                if eg.debugLevel:
                    try:
                        oldStdErr.write(data)
                    except:
                        oldStdErr.write(data.decode("mbcs"))

        if eg.startupArguments.isMain:
            sys.stdout = StdOut()
            sys.stderr = StdErr()
        if eg.debugLevel == 2:
            if hasattr(_oldStdErr, "_displayMessage"):
                _oldStdErr._displayMessage = False
        if eg.debugLevel:
            import platform
            import warnings
            warnings.simplefilter('error', UnicodeWarning)
            self.PrintDebugNotice("----------------------------------------")
            self.PrintDebugNotice("        {0} started".format(eg.APP_NAME))
            self.PrintDebugNotice("----------------------------------------")
            self.PrintDebugNotice(eg.APP_NAME, "Version:", eg.Version.string)
            self.PrintDebugNotice("Machine type:", platform.machine())
            self.PrintDebugNotice("Processor:", platform.processor())
            self.PrintDebugNotice("Architecture:", platform.architecture())
            self.PrintDebugNotice(
                "Python:",
                platform.python_branch(),
                platform.python_version(),
                platform.python_implementation(),
                platform.python_build(),
                "[{0}]".format(platform.python_compiler())
            )
            self.PrintDebugNotice("----------------------------------------")

        # redirect all wxPython error messages to our log
        class MyLog(wx.PyLog):
            def DoLog(self, level, msg, dummyTimestamp):
                if (level >= 6):
                    return
                sys.stderr.write("wxError%d: %s\n" % (level, msg))
        wx.Log.SetActiveTarget(MyLog())
Example #32
0
    pixel_wand.load(library, IM_VERSION.value, IM_QUANTUM_DEPTH.value, IM_HDRI)
    drawing_wand.load(library, IM_VERSION.value)
    del IM_HDRI, IM_QUANTUM_DEPTH, IM_VERSION

except AttributeError:
    raise ImportError('MagickWand shared library not found or incompatible\n'
                      'Original exception was raised in:\n' +
                      traceback.format_exc())

#: (:class:`ctypes.CDLL`) The C standard library.
libc = None

if platform.system() == 'Windows':
    msvcrt = ctypes.util.find_msvcrt()
    # workaround -- the newest visual studio DLL is named differently:
    if not msvcrt and '1900' in platform.python_compiler():
        msvcrt = 'vcruntime140.dll'
    if msvcrt:
        libc = ctypes.CDLL(msvcrt)
else:
    libc_path = ctypes.util.find_library('c')
    if libc_path:
        libc = ctypes.cdll.LoadLibrary(libc_path)
    else:
        # Attempt to guess popular versions of libc
        libc_paths = ('libc.so.6', 'libc.so', 'libc.a', 'libc.dylib',
                      '/usr/lib/libc.dylib')
        for libc_path in libc_paths:
            try:
                libc = ctypes.cdll.LoadLibrary(libc_path)
                break
Example #33
0
# -*- coding: utf-8 -*-

from setuptools import setup, Extension
from glob import glob
import platform
import io
import os

if platform.python_compiler().startswith("MSC"):
    args = ["/std:c++17"]
else:
    args = ["-std=c++17", "-flto", "-Wno-date-time"]

args.extend(["-DLARGEBOARDS", "-DPRECOMPUTED_MAGICS", "-DNNUE_EMBEDDING_OFF"])

if "64bit" in platform.architecture():
    args.append("-DIS_64BIT")

CLASSIFIERS = [
    "Development Status :: 3 - Alpha",
    "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
    "Programming Language :: Python :: 3",
    "Operating System :: OS Independent",
]

with io.open("README.md", "r", encoding="utf8") as fh:
    long_description = fh.read().strip()

sources = glob("src/*.cpp") + glob("src/syzygy/*.cpp") + glob(
    "src/nnue/*.cpp") + glob("src/nnue/features/*.cpp")
ffish_source_file = os.path.normcase("src/ffishjs.cpp")
Example #34
0
import sys
import platform
from time import ctime
from rich.console import Console

# Define styled print
print = Console().print

purple = "blue_violet"
yellow = "yellow1"
red = "red3"
""" This function will stop the program when a critical error occurs """

py_version = platform.python_version()
py_build = "{}, DATE: {}".format(*platform.python_build())
py_compiler = platform.python_compiler()
script_location = os.path.dirname(os.path.realpath(sys.argv[0]))
current_location = os.getcwd()

system = platform.system()
system_realese = platform.release()
system_version = platform.version()
system_architecture = {"{} {}".format(*platform.architecture())}
system_processor = platform.processor()
system_machine = platform.machine()
system_node = platform.node()
system_time = ctime()


def CriticalError(message, error):
    print(f"""
Example #35
0
def get_sys_info():
    # delay these imports until now as they are only needed in this
    # function which then exits.
    import platform
    import json
    from numba import cuda as cu
    from numba.cuda import cudadrv
    from numba.cuda.cudadrv.driver import driver as cudriver
    import textwrap as tw
    import ctypes as ct
    import llvmlite.binding as llvmbind
    import locale
    from datetime import datetime
    from itertools import chain
    from subprocess import check_output, CalledProcessError

    try:
        fmt = "%-21s : %-s"
        print("-" * 80)
        print("__Time Stamp__")
        print(datetime.utcnow())
        print("")

        print("__Hardware Information__")
        print(fmt % ("Machine", platform.machine()))
        print(fmt % ("CPU Name", llvmbind.get_host_cpu_name()))
        features = sorted([
            key for key, value in llvmbind.get_host_cpu_features().items()
            if value
        ])
        cpu_feat = tw.fill(' '.join(features), 80)
        print(fmt % ("CPU Features", ""))
        print(cpu_feat)
        print("")

        print("__OS Information__")
        print(fmt % ("Platform", platform.platform(aliased=True)))
        print(fmt % ("Release", platform.release()))
        system_name = platform.system()
        print(fmt % ("System Name", system_name))
        print(fmt % ("Version", platform.version()))
        try:
            if system_name == 'Linux':
                info = platform.linux_distribution()
            elif system_name == 'Windows':
                info = platform.win32_ver()
            elif system_name == 'Darwin':
                info = platform.mac_ver()
            else:
                raise RuntimeError("Unknown system.")
            buf = ''.join([
                x if x != '' else ' ' for x in list(chain.from_iterable(info))
            ])
            print(fmt % ("OS specific info", buf))

            if system_name == 'Linux':
                print(fmt % ("glibc info", ' '.join(platform.libc_ver())))
        except:
            print("Error: System name incorrectly identified or unknown.")
        print("")

        print("__Python Information__")
        print(fmt % ("Python Compiler", platform.python_compiler()))
        print(fmt %
              ("Python Implementation", platform.python_implementation()))
        print(fmt % ("Python Version", platform.python_version()))
        print(fmt % ("Python Locale ", ' '.join(
            [x for x in locale.getdefaultlocale() if x is not None])))

        print("")
        print("__LLVM information__")
        print(fmt % ("LLVM version", '.'.join(
            [str(k) for k in llvmbind.llvm_version_info])))

        print("")
        print("__CUDA Information__")
        # Look for GPUs
        try:
            cu.list_devices()[0]  # will a device initialise?
        except BaseException as e:
            msg_not_found = "CUDA driver library cannot be found"
            msg_disabled_by_user = "******"
            msg_end = " or no CUDA enabled devices are present."
            msg_generic_problem = "Error: CUDA device intialisation problem."
            msg = getattr(e, 'msg', None)
            if msg is not None:
                if msg_not_found in msg:
                    err_msg = msg_not_found + msg_end
                elif msg_disabled_by_user in msg:
                    err_msg = msg_disabled_by_user + msg_end
                else:
                    err_msg = msg_generic_problem + " Message:" + msg
            else:
                err_msg = msg_generic_problem + " " + str(e)
            # Best effort error report
            print("%s\nError class: %s" % (err_msg, str(type(e))))
        else:
            try:
                cu.detect()
                dv = ct.c_int(0)
                cudriver.cuDriverGetVersion(ct.byref(dv))
                print(fmt % ("CUDA driver version", dv.value))
                print("CUDA libraries:")
                cudadrv.libs.test(sys.platform, print_paths=False)
            except:
                print(
                    "Error: Probing CUDA failed (device and driver present, runtime problem?)\n"
                )

        # Look for conda and conda information
        print("")
        print("__Conda Information__")
        cmd = ["conda", "info", "--json"]
        try:
            conda_out = check_output(cmd)
        except Exception as e:
            print("Conda not present/not working.\nError was %s\n" % e)
        else:
            data = ''.join(conda_out.decode("utf-8").splitlines())
            jsond = json.loads(data)
            keys = [
                'conda_build_version', 'conda_env_version', 'platform',
                'python_version', 'root_writable'
            ]
            for k in keys:
                try:
                    print(fmt % (k, jsond[k]))
                except KeyError:
                    pass

            # get info about current environment
            cmd = ["conda", "list"]
            try:
                conda_out = check_output(cmd)
            except CalledProcessError as e:
                print("Error: Conda command failed. Error was %s\n" % e.output)
            else:
                print("")
                print("__Current Conda Env__")
                data = conda_out.decode("utf-8").splitlines()
                for k in data:
                    if k[0] != '#':  # don't show where the env is, personal data
                        print(k)

        print("-" * 80)

    except Exception as e:
        print("Error: The system reporting tool has failed unexpectedly.")
        print("Exception was:")
        print(e)

    finally:
        print(
            "%s" %
            "If requested, please copy and paste the information between\n"
            "the dashed (----) lines, or from a given specific section as\n"
            "appropriate.\n\n"
            "=============================================================\n"
            "IMPORTANT: Please ensure that you are happy with sharing the\n"
            "contents of the information present, any information that you\n"
            "wish to keep private you should remove before sharing.\n"
            "=============================================================\n")
Example #36
0
    fp.write(script)


here = os.path.abspath(os.path.dirname(__file__))

# Package meta-data.
NAME = 'plasticity'
DESCRIPTION = 'Unsupervised Neural Networks with biological-inspired learning rules'
URL = 'https://github.com/Nico-Curti/plasticity'
EMAIL = '[email protected], [email protected], [email protected], [email protected]'
AUTHOR = 'Nico Curti, Lorenzo Squadrani, Simone Gasperini, Mattia Ceccarelli'
REQUIRES_PYTHON = '>=3.5'
VERSION = None
KEYWORDS = 'neural-networks deep-neural-networks deep-learning image-classification'

CPP_COMPILER = platform.python_compiler()
README_FILENAME = os.path.join(here, 'README.md')
REQUIREMENTS_FILENAME = os.path.join(here, 'requirements.txt')
VERSION_FILENAME = os.path.join(here, 'plasticity', '__version__.py')

ENABLE_OMP = False

if '--omp' in sys.argv:
  ENABLE_OMP = True
  sys.argv.remove('--omp')

# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
  LONG_DESCRIPTION = read_description(README_FILENAME)
Example #37
0
try:
    try:
        readme_content = f.read()
    except:
        readme_content = ""
finally:
    f.close()

# PYTHON-654 - Clang doesn't support -mno-fused-madd but the pythons Apple
# ships are built with it. This is a problem starting with Xcode 5.1
# since clang 3.4 errors out when it encounters unrecognized compiler
# flags. This hack removes -mno-fused-madd from the CFLAGS automatically
# generated by distutils for Apple provided pythons, allowing C extension
# builds to complete without error. The inspiration comes from older
# versions of distutils.sysconfig.get_config_vars.
if sys.platform == 'darwin' and 'clang' in platform.python_compiler().lower():
    from distutils.sysconfig import get_config_vars
    res = get_config_vars()
    for key in ('CFLAGS', 'PY_CFLAGS'):
        if key in res:
            flags = res[key]
            flags = re.sub('-mno-fused-madd', '', flags)
            res[key] = flags


class test(Command):
    description = "run the tests"

    user_options = [
        ("test-module=", "m", "Discover tests in specified module"),
        ("test-suite=", "s",
Example #38
0
import os
import platform

print "-" * 80

#platform specifics
# see http://docs.python.org/library/platform.html
print "Platform         ", platform.platform()
print "Architecture     ", platform.architecture()
print "Machine          ", platform.machine()

print "--"

#python specifics
print "Python build     ", platform.python_build()
print "Python compiler  ", platform.python_compiler()

print "--"

#osg specifics
try:
    try:
        import osg
    except:
        print "import osg failed"
    print "osg.osgGetLibraryName                  ", osg.osgGetLibraryName()
    print "osg.osgGetVersion                      ", osg.osgGetVersion()
    print "osg.osgGetSOVersion                    ", osg.osgGetSOVersion()
    print "osg Python library location          : ", osg.__file__
    print "osg Dynamic linked library location  : ", osg._osg
Example #39
0
    def test_sys_version(self):
        # Old test.
        for input, output in (
            ('2.4.3 (#1, Jun 21 2006, 13:54:21) \n[GCC 3.3.4 (pre 3.3.5 20040809)]',
             ('CPython', '2.4.3', '', '', '1', 'Jun 21 2006 13:54:21',
              'GCC 3.3.4 (pre 3.3.5 20040809)')),
            ('IronPython 1.0.60816 on .NET 2.0.50727.42',
             ('IronPython', '1.0.60816', '', '', '', '', '.NET 2.0.50727.42')),
            ('IronPython 1.0 (1.0.61005.1977) on .NET 2.0.50727.42',
             ('IronPython', '1.0.0', '', '', '', '', '.NET 2.0.50727.42')),
            ('2.4.3 (truncation, date, t) \n[GCC]',
             ('CPython', '2.4.3', '', '', 'truncation', 'date t', 'GCC')),
            ('2.4.3 (truncation, date, ) \n[GCC]',
             ('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
            ('2.4.3 (truncation, date,) \n[GCC]',
             ('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
            ('2.4.3 (truncation, date) \n[GCC]',
             ('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
            ('2.4.3 (truncation, d) \n[GCC]', ('CPython', '2.4.3', '', '',
                                               'truncation', 'd', 'GCC')),
            ('2.4.3 (truncation, ) \n[GCC]', ('CPython', '2.4.3', '', '',
                                              'truncation', '', 'GCC')),
            ('2.4.3 (truncation,) \n[GCC]', ('CPython', '2.4.3', '', '',
                                             'truncation', '', 'GCC')),
            ('2.4.3 (truncation) \n[GCC]', ('CPython', '2.4.3', '', '',
                                            'truncation', '', 'GCC')),
        ):
            # branch and revision are not "parsed", but fetched
            # from sys._mercurial.  Ignore them
            (name, version, branch, revision, buildno, builddate, compiler) \
                   = platform._sys_version(input)
            self.assertEqual(
                (name, version, '', '', buildno, builddate, compiler), output)

        # Tests for python_implementation(), python_version(), python_branch(),
        # python_revision(), python_build(), and python_compiler().
        sys_versions = {
            ("2.6.1 (r261:67515, Dec  6 2008, 15:26:00) \n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]", ('CPython', 'tags/r261', '67515'), self.save_platform):
            ("CPython", "2.6.1", "tags/r261", "67515",
             ('r261:67515', 'Dec  6 2008 15:26:00'),
             'GCC 4.0.1 (Apple Computer, Inc. build 5370)'),
            ("IronPython 2.0 (2.0.0.0) on .NET 2.0.50727.3053", None, "cli"):
            ("IronPython", "2.0.0", "", "", ("", ""), ".NET 2.0.50727.3053"),
            ("2.6.1 (IronPython 2.6.1 (2.6.10920.0) on .NET 2.0.50727.1433)", None, "cli"):
            ("IronPython", "2.6.1", "", "", ("", ""), ".NET 2.0.50727.1433"),
            ("2.7.4 (IronPython 2.7.4 (2.7.0.40) on Mono 4.0.30319.1 (32-bit))", None, "cli"):
            ("IronPython", "2.7.4", "", "", ("", ""),
             "Mono 4.0.30319.1 (32-bit)"),
            ("2.5 (trunk:6107, Mar 26 2009, 13:02:18) \n[Java HotSpot(TM) Client VM (\"Apple Computer, Inc.\")]", ('Jython', 'trunk', '6107'), "java1.5.0_16"):
            ("Jython", "2.5.0", "trunk", "6107", ('trunk:6107', 'Mar 26 2009'),
             "java1.5.0_16"),
            ("2.5.2 (63378, Mar 26 2009, 18:03:29)\n[PyPy 1.0.0]", ('PyPy', 'trunk', '63378'), self.save_platform):
            ("PyPy", "2.5.2", "trunk", "63378", ('63378', 'Mar 26 2009'), "")
        }
        for (version_tag, subversion, sys_platform), info in \
                sys_versions.items():
            sys.version = version_tag
            if subversion is None:
                if hasattr(sys, "_mercurial"):
                    del sys._mercurial
            else:
                sys._mercurial = subversion
            if sys_platform is not None:
                sys.platform = sys_platform
            self.assertEqual(platform.python_implementation(), info[0])
            self.assertEqual(platform.python_version(), info[1])
            self.assertEqual(platform.python_branch(), info[2])
            self.assertEqual(platform.python_revision(), info[3])
            self.assertEqual(platform.python_build(), info[4])
            self.assertEqual(platform.python_compiler(), info[5])
Example #40
0
def get_platform_info() -> dict:
    """Recovers information about the current platform's operating system, its
    processor and its Python interpreter.

    It uses the module platform to obtain that information.

    The returned dictionary contains the following structure:

    1. 'platform':
        A summary of the current platform.
    2. 'node':
        The name of the host machine.
    3. 'os':
        Information about the operating system. It contains the following
        keys:
            3.1. 'system':
                The general name of the operating system. Examples: "Linux",
                "Windows".
            3.2. 'release':
                The general number of release of the operating system.
            3.3. 'version':
                The specific release version of the operating system.
    4. 'hardware':
        Information about the processor. It contains the following keys:
            4.1. 'machine':
                The general architecture of the processor.
            4.2. 'processor:
                The name of the processor. Not all platforms provide it. In
                some cases it may contain the same value as 'machine'.
    5. 'python':
        Information about the Python interpreter. It contains the following
        keys:
            5.1. 'build':
                A tuple of two strings; the first one is the build's number
                and the second one is its date.
            5.2. 'compiler':
                The compiler used to build the interpreter.
            5.3. 'implementation':
                The specific implementation of the interpreter. Example:
                "CPython".
            5.4. 'version': The used version of the language.

    Returns
    -------
    dict
        A dictionary containing the structure previously described.
    """

    os_info = {}
    os_info['system'] = platform.system()
    os_info['release'] = platform.release()
    os_info['version'] = platform.version()

    hw_info = {}
    hw_info['machine'] = platform.machine()
    hw_info['processor'] = platform.processor()

    py_info = {}
    py_info['build'] = platform.python_build()
    py_info['compiler'] = platform.python_compiler()
    py_info['implementation'] = platform.python_implementation()
    py_info['version'] = platform.python_version()

    info = {}
    info['platform'] = platform.platform()
    info['node'] = platform.node()
    info['os'] = os_info
    info['hardware'] = hw_info
    info['python'] = py_info

    return info
Example #41
0
        "BLPAPI_ROOT (or BLPAPI_INCDIR/BLPAPI_LIBDIR) " + \
        "environment variable isn't defined"

is64bit = plat.architecture()[0] == '64bit'
if is64bit:
    blpapiLibraryName = 'blpapi3_64'
else:
    blpapiLibraryName = 'blpapi3_32'

extraLinkArgs = []
if platform == 'windows':
    extraLinkArgs = ['/MANIFEST']

    # Handle the very frequent case when user need to use Visual C++ 2010
    # with Python that wants to use Visual C++ 2008.
    if plat.python_compiler().startswith('MSC v.1500'):
        if (not 'VS90COMNTOOLS' in os.environ) and \
                ('VS100COMNTOOLS' in os.environ):
            os.environ['VS90COMNTOOLS'] = os.environ['VS100COMNTOOLS']

blpapiLibraryPath = blpapiLibVar or os.path.join(blpapiRoot, lib_in_release())
blpapiIncludes = blpapiIncludesVar or os.path.join(blpapiRoot, 'include')

blpapi_wrap = Extension('blpapi._internals',
                        sources=['blpapi/internals_wrap.c'],
                        include_dirs=[blpapiIncludes],
                        library_dirs=[blpapiLibraryPath],
                        libraries=[blpapiLibraryName],
                        extra_link_args=extraLinkArgs)

versionhelper_wrap = Extension('blpapi._versionhelper',
Example #42
0
 verinfodata = re.sub(
     "__build_time_utc__ \= \{.*\}\;",
     '__build_time_utc__ = {"timestamp": ' + str(utccurtimestamp) +
     ', "year": ' + str(utccurtimetuple[0]) + ', "month": ' +
     str(utccurtimetuple[1]) + ', "day": ' + str(utccurtimetuple[2]) +
     ', "hour": ' + str(utccurtimetuple[3]) + ', "minute": ' +
     str(utccurtimetuple[4]) + ', "second": ' + str(utccurtimetuple[5]) +
     '};', verinfodata)
 if (sys.version[0] == "2"):
     '''verinfodata = verinfodata.replace('__build_python_info__ = {"python_branch": None, "python_build": None, "python_compiler": None, "python_implementation": None, "python_revision": None, "python_version": None, "python_version_tuple": None, "release": None, "system": None, "uname": None, "machine": None, "node": None, "platform": None, "processor": None, "version": None, "java_ver": None, "win32_ver": None, "mac_ver": None, "linux_distribution": None, "libc_ver": None};', '__build_python_info__ = '+str({'python_branch': platform.python_branch(), 'python_build': platform.python_build(), 'python_compiler': platform.python_compiler(), 'python_implementation': platform.python_implementation(), 'python_revision': platform.python_revision(), 'python_version': platform.python_version(), 'python_version_tuple': platform.python_version_tuple(), 'release': platform.release(), 'system': platform.system(), 'uname': platform.uname(), 'machine': platform.machine(), 'node': platform.node(), 'platform': platform.platform(), 'processor': platform.processor(), 'architecture': platform.architecture(), 'version': platform.version(), 'java_ver': platform.java_ver(), 'win32_ver': platform.win32_ver(), 'mac_ver': platform.mac_ver(), 'linux_distribution': platform.linux_distribution(), 'libc_ver': platform.libc_ver()})+';');'''
     verinfodata = re.sub(
         "__build_python_info__ \= \{.*\}\;", '__build_python_info__ = ' +
         str({
             'python_branch': platform.python_branch(),
             'python_build': platform.python_build(),
             'python_compiler': platform.python_compiler(),
             'python_implementation': platform.python_implementation(),
             'python_revision': platform.python_revision(),
             'python_version': platform.python_version(),
             'python_version_tuple': platform.python_version_tuple(),
             'release': platform.release(),
             'system': platform.system(),
             'uname': platform.uname(),
             'machine': platform.machine(),
             'node': platform.node(),
             'platform': platform.platform(),
             'processor': platform.processor(),
             'architecture': platform.architecture(),
             'version': platform.version(),
             'java_ver': platform.java_ver(),
             'win32_ver': platform.win32_ver(),
Example #43
0
def print_sysinfo():
    print('\nPython version  : {}'.format(platform.python_version()))
    print('compiler        : {}'.format(platform.python_compiler()))
    print('\nsystem     : {}'.format(platform.system()))
    print('release    : {}'.format(platform.release()))
    print('machine    : {}'.format(platform.machine()))
    print('processor  : {}'.format(platform.processor()))
    print('CPU count  : {}'.format(mp.cpu_count()))
    print('interpreter: {}'.format(platform.architecture()[0]))
    print('\n\n')


# def set_ax_plot(ax, models=['base_model'], models_aka=['base_model'], y_name = 'val_dice', x_name = 'epochs', mode='summary', niters=None):
#     x_title = x_name.title()
#     if 'val' in y_name:
#         y_title = y_name.split('_')[-1].title()
#         title = 'Validation History - %s (per %s)' % (y_title, x_title)
#     else:
#         y_title = y_name.title()
#         title = 'History - %s (per %s)' % (y_title, x_title)

#     ax.set_title(title, fontdict={'fontsize':15})
#     for col, model in enumerate(models):
#         aka = models_aka[col]
#         hist_path = './%s/history/hist.json' % (model)
#         kfold = len(glob.glob(file_path_fold(hist_path,'*')))
#         with open(file_path_fold(hist_path,kfold-1), 'r') as f:
#             history = json.load(f)
#         max_epoch = np.max([len(hist['loss']) for hist in history])

#         if not niters == None: niter = niters[col]
#         else: niter = None
#         if 'epoch' in x_name: index = np.arange(1, max_epoch+1)
#         elif 'iter' in x_name: index = np.arange(1, max_epoch*niter+1, niter)
#         else: raise ValueError

#         value = np.zeros((len(history),max_epoch))
#         for i, hist in enumerate(history):
#             value[i,:len(hist[y_name])] = hist[y_name]

#         if mode == 'summary':
#             ax.plot(index, np.mean(value, axis=0), 'C%s.-' % (col+1), label='%s-%s'% (aka, y_title))
#             ax.fill_between(index, np.mean(value, axis=0)-np.std(value, axis=0), np.mean(value, axis=0)+np.std(value, axis=0),
#                             color='C%s' % (col+1), alpha=0.2)
#             if 'accuracy' in y_name : # total epoch max per each fold
#                 ax.plot(index[np.argmax(value, axis=1)], np.max(value, axis=1), 'C%s*' % (col+1), markersize=12)
#                 for j, (x,y) in enumerate(zip(index[np.argmax(value, axis=1)], np.max(value, axis=1))):
#                     ax.annotate(j+1, (x,y))
#             if 'loss' in y_name : # total epoch min per each fold
#                 ax.plot(index[np.argmax(value, axis=1)], np.min(value, axis=1), 'C%s*' % (col+1), markersize=12)
#                 for j, (x,y) in enumerate(zip(index[np.argmin(value, axis=1)], np.min(value, axis=1))):
#                     ax.annotate(j+1, (x,y))
#             # ax.plot(index, np.max(value, axis=0), 'C%s.' % (col+1), alpha=0.3) # total fold max per epoch
#             # ax.plot(index, np.min(value, axis=0), 'C%s.' % (col+1), alpha=0.3) # total fold min per epoch
#         elif mode == 'all':
#             for j, v in enumerate(value):
#                 if j == 0: ax.plot(index, v, 'C%s.-' % (col+1), label='%s-%s'% (aka, y_title), alpha=0.4)
#                 else: ax.plot(index, v, 'C%s.-' % (col+1), alpha=0.4)

#     ax.set_xlabel(x_title)
#     ax.set_ylabel(y_title)
#     ax.legend(loc='lower right', fontsize='small')

# def plot_history(models, models_aka,
#                  history_types = ['validation','train'], y_names = ['dice','precision','recall'], x_name='epochs',
#                  mode = 'summary', figsize=(20,20), niters=None):
#     fig, axes = plt.subplots(len(y_names), len(history_types), figsize=figsize)
#     if len(y_names) == 1: axes = np.expand_dims(axes, 0)
#     if len(history_types) == 1: axes = np.expand_dims(axes, -1)
#     for j in range(len(history_types)):
#         for i, y_name in enumerate(y_names):
#             if 'val' in history_types[j]: set_ax_plot(axes[i,j], models, models_aka, 'val_%s' % y_name, x_name, mode, niters)
#             else: set_ax_plot(axes[i,j], models, models_aka, y_name, x_name, mode, niters)
#     fig.tight_layout()
#     return fig

# class TensorBoardWrapper(TensorBoard):
#     '''
#     Sets the self.validation_data property for use with TensorBoard callback.

#     Image Summary with multi-modal medical 3D volumes:
#         Thumbnail of nrow x ncol 2D images (of one person)
#             nrow: number of slice (z-axis)
#             ncol:
#                    input images: number of modals
#                    bottleneck images : number of filters
#                    output images: 2 (GT, predict)
#         TODO: fix one person as reference..
#     '''

#     def __init__(self, validation_data, write_weights_histogram = True, write_weights_images=False,
#                  tb_data_steps=1,  **kwargs):
#         super(TensorBoardWrapper, self).__init__(**kwargs)
#         self.write_weights_histogram = write_weights_histogram
#         self.write_weights_images = write_weights_images
#         self.tb_data_steps = tb_data_steps
#         self.validation_data = validation_data

#         if self.embeddings_data is None and self.validation_data:
#             self.embeddings_data = self.validation_data

#     def set_model(self, model):
#         self.model = model
#         if k.backend() == 'tensorflow':
#             self.sess = k.get_session()

#         if self.histogram_freq and self.merged is None:
#             for layer in self.model.layers:
#                 for weight in layer.weights:
#                     mapped_weight_name = 'weight_%s' % weight.name.replace(':', '_')
#                     # histogram
#                     if self.write_weights_histogram: tf.summary.histogram(mapped_weight_name, weight)
#                     # gradient histogram
#                     if self.write_grads:
#                         grads = model.optimizer.get_gradients(model.total_loss,
#                                                               weight)

#                         def is_indexed_slices(grad):
#                             return type(grad).__name__ == 'IndexedSlices'
#                         grads = [
#                             grad.values if is_indexed_slices(grad) else grad
#                             for grad in grads]
#                         tf.summary.histogram('{}_grad'.format(mapped_weight_name), grads)

#                     if self.write_weights_images:
#                         w_img = tf.squeeze(weight)
#                         shape = k.int_shape(w_img)
#                         if len(shape) == 2:  # dense layer kernel case
#                             if shape[0] > shape[1]:
#                                 w_img = tf.transpose(w_img)
#                                 shape = k.int_shape(w_img)
#                             w_img = tf.reshape(w_img, [1,
#                                                        shape[0],
#                                                        shape[1],
#                                                        1])
#                         elif len(shape) == 3:  # 1d convnet case
#                             if k.image_data_format() == 'channels_last':
#                                 # switch to channels_first to display
#                                 # every kernel as a separate image
#                                 w_img = tf.transpose(w_img, perm=[2, 0, 1])
#                                 shape = k.int_shape(w_img)
#                             w_img = tf.reshape(w_img, [shape[0],
#                                                        shape[1],
#                                                        shape[2],
#                                                        1])
#                         elif len(shape) == 4: # conv2D
#                             # input_dim * output_dim, width, hieght
#                             w_img = tf.transpose(w_img, perm=[2, 3, 0, 1])
#                             shape = k.int_shape(w_img)
#                             w_img = tf.reshape(w_img, [shape[0]*shape[1],
#                                                        shape[2],
#                                                        shape[3],
#                                                        1])
#                         elif len(shape) == 5: # conv3D
#                             # input_dim * output_dim*depth, width, hieght
#                             w_img = tf.transpose(w_img, perm=[3, 4, 0, 1, 2])
#                             shape = k.int_shape(w_img)
#                             w_img = tf.reshape(w_img, [shape[0]*shape[1]*shape[2],
#                                                        shape[3],
#                                                        shape[4],
#                                                        1])
#                         elif len(shape) == 1:  # bias case
#                             w_img = tf.reshape(w_img, [1,
#                                                        shape[0],
#                                                        1,
#                                                        1])
#                         tf.summary.image(mapped_weight_name, w_img)

#                 if hasattr(layer, 'output'):
#                     if isinstance(layer.output, list):
#                         for i, output in enumerate(layer.output):
#                             tf.summary.histogram('{}_out_{}'.format(layer.name, i), output)
#                     else:
#                         tf.summary.histogram('{}_out'.format(layer.name),
#                                              layer.output)
#             #################################################################################
#             # image summary
# #             if self.write_images:
# #                 input_shape = []
# #                 input_shape[:] = self.img_shape[:]
# #                 input_shape[-1] += 2

# #                 tot_pred_image = []
# #                 for i in range(self.batch_size):
# #                     # input images, GT, prediction
# #                     mri = model.inputs[0][i]
# #                     gt = model.targets[0][i]
# #                     pred = model.outputs[0][i]
# #                     pred_image = self.tile_patches_medical(k.concatenate([mri, gt, pred], axis=-1),
# #                                                            shape=input_shape, zcut=[10,6]) # output : [1,x*nrow, y*ncol, 1]
# #                     pred_image = colorize(pred_image, cmap='inferno') # output : [x*nrow, y*ncol, 3]
# #                     tot_pred_image.append(pred_image)
# #                 tot_pred_image = k.stack(tot_pred_image) # output : [batch, x*nrow, y*ncol, 3]
# #                 shape = k.int_shape(tot_pred_image)
# #                 assert len(shape) == 4 and shape[-1] in [1, 3, 4]
# #                 tf.summary.image('prediction', tot_pred_image, max_outputs=self.batch_size)

#             #################################################################################

#         self.merged = tf.summary.merge_all()
#         #################################################################################
#         # tensor graph & file write
#         if self.write_graph:
#             self.writer = tf.summary.FileWriter(self.log_dir,
#                                                 self.sess.graph)
#         else:
#             self.writer = tf.summary.FileWriter(self.log_dir)

#         #################################################################################
#         # embedding : TODO
#         if self.embeddings_freq:
#             embeddings_layer_names = self.embeddings_layer_names

#             if not embeddings_layer_names:
#                 embeddings_layer_names = [layer.name for layer in self.model.layers
#                                           if type(layer).__name__ == 'Embedding']
#             self.assign_embeddings = []
#             embeddings_vars = {}

#             self.batch_id = batch_id = tf.placeholder(tf.int32)
#             self.step = step = tf.placeholder(tf.int32)

#             for layer in self.model.layers:
#                 if layer.name in embeddings_layer_names:
#                     embedding_input = self.model.get_layer(layer.name).output
#                     embedding_size = int(np.prod(embedding_input.shape[1:]))
#                     embedding_input = tf.reshape(embedding_input,
#                                                  (step, embedding_size))
#                     shape = (self.embeddings_data[0].shape[0], embedding_size)
#                     embedding = tf.Variable(tf.zeros(shape),
#                                             name=layer.name + '_embedding')
#                     embeddings_vars[layer.name] = embedding
#                     batch = tf.assign(embedding[batch_id:batch_id + step],
#                                       embedding_input)
#                     self.assign_embeddings.append(batch)

#             self.saver = tf.train.Saver(list(embeddings_vars.values()))

#             embeddings_metadata = {}

#             if not isinstance(self.embeddings_metadata, str):
#                 embeddings_metadata = self.embeddings_metadata
#             else:
#                 embeddings_metadata = {layer_name: self.embeddings_metadata
#                                        for layer_name in embeddings_vars.keys()}

#             config = projector.ProjectorConfig()

#             for layer_name, tensor in embeddings_vars.items():
#                 embedding = config.embeddings.add()
#                 embedding.tensor_name = tensor.name

#                 if layer_name in embeddings_metadata:
#                     embedding.metadata_path = embeddings_metadata[layer_name]

#             projector.visualize_embeddings(self.writer, config)

#     def on_epoch_end(self, epoch, logs=None):
#         logs = logs or {}

#         if not self.validation_data and self.histogram_freq:
#             raise ValueError("If printing histograms, validation_data must be "
#                              "provided, and cannot be a generator.")
#         if self.embeddings_data is None and self.embeddings_freq:
#             raise ValueError("To visualize embeddings, embeddings_data must "
#                              "be provided.")

#         if self.validation_data and self.histogram_freq:
#             if epoch % self.histogram_freq == 0: # TODO : last epoch..

#                 val_data = self.validation_data
#                 if self.batch_size == None:
#                     self.batch_size = val_data[0].shape[0]

#                 tensors = (self.model.inputs +
#                            self.model.targets +
#                            self.model.sample_weights)

#                 if self.model.uses_learning_phase:
#                     tensors += [k.learning_phase()]

#                 try:
#                     for i in range(self.tb_data_steps):
#                         x, y = val_data[i]
#                         if self.model.uses_learning_phase:
#                             batch_val = np.array([x, y, np.ones(self.batch_size, dtype=np.float32), 0.0])
#                         else:
#                             batch_val = np.array([x, y, np.ones(self.batch_size, dtype=np.float32)])

#                         assert len(batch_val) == len(tensors)
#                         feed_dict = dict(zip(tensors, batch_val))
#                         result = self.sess.run([self.merged], feed_dict=feed_dict)
#                         summary_str = result[0]
#                         self.writer.add_summary(summary_str, epoch)
#                 except:
#                     val_size = val_data[0].shape[0]
#                     i = 0
#                     while i < val_size:
#                         step = min(self.batch_size, val_size - i)
#                         if self.model.uses_learning_phase:
#                             # do not slice the learning phase
#                             batch_val = [x[i:i + step] for x in val_data[:-1]]
#                             batch_val.append(val_data[-1])
#                         else:
#                             batch_val = [x[i:i + step] for x in val_data]
#                         assert len(batch_val) == len(tensors)
#                         feed_dict = dict(zip(tensors, batch_val))
#                         result = self.sess.run([self.merged], feed_dict=feed_dict)
#                         summary_str = result[0]
#                         self.writer.add_summary(summary_str, epoch)
#                         i += self.batch_size

#         if self.embeddings_freq and self.embeddings_data is not None:
#             if epoch % self.embeddings_freq == 0: ## TODO : Last epoch..
#                 embeddings_data = self.embeddings_data
#                 for i in range(self.tb_data_steps):
#                     if type(self.model.input) == list:
#                         feed_dict = {model_input: embeddings_data[i][idx]
#                                      for idx, model_input in enumerate(self.model.input)}
#                     else:
#                         feed_dict = {self.model.input: embeddings_data[i]}

#                     feed_dict.update({self.batch_id: i, self.step: self.batch_size})

#                     if self.model.uses_learning_phase:
#                         feed_dict[k.learning_phase()] = False

#                     self.sess.run(self.assign_embeddings, feed_dict=feed_dict)
#                     self.saver.save(self.sess,
#                                     os.path.join(self.log_dir, 'keras_embedding.ckpt'),
#                                     epoch)

#         for name, value in logs.items():
#             if name in ['batch', 'size']:
#                 continue
#             summary = tf.Summary()
#             summary_value = summary.value.add()
#             summary_value.simple_value = value.item()
#             summary_value.tag = name
#             self.writer.add_summary(summary, epoch)
#         self.writer.flush()
Example #44
0
def get_sys_info():
    # delay these imports until now as they are only needed in this
    # function which then exits.
    import platform
    import json
    import multiprocessing
    from numba import config
    from numba import cuda as cu
    from numba.cuda import cudadrv
    from numba.cuda.cudadrv.driver import driver as cudriver
    from numba import roc
    from numba.roc.hlc import hlc, libhlc
    import textwrap as tw
    import ctypes as ct
    import llvmlite.binding as llvmbind
    import locale
    from datetime import datetime
    from itertools import chain
    from subprocess import check_output, CalledProcessError

    try:
        fmt = "%-45s : %-s"
        print("-" * 80)
        print("__Time Stamp__")
        print(datetime.utcnow())
        print("")

        print("__Hardware Information__")
        system_name = platform.system()
        print(fmt % ("Machine", platform.machine()))
        print(fmt % ("CPU Name", llvmbind.get_host_cpu_name()))
        if system_name == 'Linux':
            strmatch = 'Cpus_allowed'
            try:
                loc = '/proc/self/status'
                with open(loc, 'rt') as f:
                    proc_stat = f.read().splitlines()
                    for x in proc_stat:
                        if x.startswith(strmatch):
                            if x.startswith('%s:' % strmatch):
                                hexnum = '0x%s' % x.split(':')[1].strip()
                                acc_cpus = int(hexnum, 16)
                                _n = str(bin(acc_cpus).count('1'))
                                print(fmt % ("Number of accessible CPU cores",
                                                _n))
                            elif x.startswith('%s_list:' % strmatch):
                                _a = x.split(':')[1].strip()
                                print(fmt % ("Listed accessible CPUs cores",
                                                _a))
            except Exception:
                print(fmt % ("CPU count", multiprocessing.cpu_count()))
            # See if CFS is in place
            # https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
            try:
                def scrape_lines(loc):
                    with open(loc, 'rt') as f:
                        return f.read().splitlines()
                loc = '/sys/fs/cgroup/cpuacct/cpu.cfs_period_us'
                cfs_period = int(scrape_lines(loc)[0])
                loc = '/sys/fs/cgroup/cpuacct/cpu.cfs_quota_us'
                cfs_quota = int(scrape_lines(loc)[0])
                if cfs_quota == -1:
                    print(fmt % ("CFS restrictions", "None"))
                else:
                    runtime_amount = float(cfs_quota)/float(cfs_period)
                    print(fmt % ("CFS restrictions (CPUs worth of runtime)",
                                 runtime_amount))
            except Exception:
                print(fmt % ("CFS restrictions", 'Information not available'))
        else:
            print(fmt % ("CPU count", multiprocessing.cpu_count()))

        try:
            featuremap = llvmbind.get_host_cpu_features()
        except RuntimeError:
            print(fmt % ("CPU Features", "NA"))
        else:
            features = sorted([key for key, value in featuremap.items()
                               if value])
            cpu_feat = tw.fill(' '.join(features), 80)
            print(fmt % ("CPU Features", ""))
            print(cpu_feat)
        print("")

        print("__OS Information__")
        print(fmt % ("Platform", platform.platform(aliased=True)))
        print(fmt % ("Release", platform.release()))
        print(fmt % ("System Name", system_name))
        print(fmt % ("Version", platform.version()))
        try:
            if system_name == 'Linux':
                info = platform.linux_distribution()
            elif system_name == 'Windows':
                info = platform.win32_ver()
            elif system_name == 'Darwin':
                info = platform.mac_ver()
            else:
                raise RuntimeError("Unknown system.")
            buf = ''.join([x
                           if x != '' else ' '
                           for x in list(chain.from_iterable(info))])
            print(fmt % ("OS specific info", buf))

            if system_name == 'Linux':
                print(fmt % ("glibc info", ' '.join(platform.libc_ver())))
        except:
            print("Error: System name incorrectly identified or unknown.")
        print("")

        print("__Python Information__")
        print(fmt % ("Python Compiler", platform.python_compiler()))
        print(
            fmt %
            ("Python Implementation",
             platform.python_implementation()))
        print(fmt % ("Python Version", platform.python_version()))
        lcl = []
        try:
            for x in locale.getdefaultlocale():
                if x is not None:
                    lcl.append(x)
        except Exception as e:
            lcl.append(str(e))
        print(fmt % ("Python Locale ", ' '.join(lcl)))

        print("")
        print("__LLVM information__")
        print(
            fmt %
            ("LLVM version", '.'.join(
                [str(k) for k in llvmbind.llvm_version_info])))

        print("")
        print("__CUDA Information__")
        # Look for GPUs
        try:
            cu.list_devices()[0]  # will a device initialise?
        except Exception as e:
            msg_not_found = "CUDA driver library cannot be found"
            msg_disabled_by_user = "******"
            msg_end = " or no CUDA enabled devices are present."
            msg_generic_problem = "Error: CUDA device intialisation problem."
            msg = getattr(e, 'msg', None)
            if msg is not None:
                if msg_not_found in msg:
                    err_msg = msg_not_found + msg_end
                elif msg_disabled_by_user in msg:
                    err_msg = msg_disabled_by_user + msg_end
                else:
                    err_msg = msg_generic_problem + " Message:" + msg
            else:
                err_msg = msg_generic_problem + " " + str(e)
            # Best effort error report
            print("%s\nError class: %s" % (err_msg, str(type(e))))
        else:
            try:
                cu.detect()
                dv = ct.c_int(0)
                cudriver.cuDriverGetVersion(ct.byref(dv))
                print(fmt % ("CUDA driver version", dv.value))
                print("CUDA libraries:")
                cudadrv.libs.test(sys.platform, print_paths=False)
            except:
                print(
                    "Error: Probing CUDA failed (device and driver present, runtime problem?)\n")

        print("")
        print("__ROC Information__")
        roc_is_available = roc.is_available()
        print(fmt % ("ROC available", roc_is_available))

        toolchains = []
        try:
            libhlc.HLC()
            toolchains.append('librocmlite library')
        except:
            pass
        try:
            cmd = hlc.CmdLine().check_tooling()
            toolchains.append('ROC command line tools')
        except:
            pass

        # if no ROC try and report why
        if not roc_is_available:
            from numba.roc.hsadrv.driver import hsa
            try:
                hsa.is_available
            except Exception as e:
                msg = str(e)
            else:
               msg = 'No ROC toolchains found.'
            print(fmt % ("Error initialising ROC due to", msg))

        if toolchains:
            print(fmt % ("Available Toolchains", ', '.join(toolchains)))

        try:
            # ROC might not be available due to lack of tool chain, but HSA
            # agents may be listed
            from numba.roc.hsadrv.driver import hsa, dgpu_count
            decode = lambda x: x.decode('utf-8') if isinstance(x, bytes) else x
            print("\nFound %s HSA Agents:" % len(hsa.agents))
            for i, agent in enumerate(hsa.agents):
                print('Agent id  : %s' % i)
                print('    vendor: %s' % decode(agent.vendor_name))
                print('      name: %s' % decode(agent.name))
                print('      type: %s' % agent.device)
                print("")

            _dgpus = []
            for a in hsa.agents:
                if a.is_component and a.device == 'GPU':
                   _dgpus.append(decode(a.name))
            print(fmt % ("Found %s discrete GPU(s)" % dgpu_count(), \
                  ', '.join(_dgpus)))
        except Exception as e:
            print("No HSA Agents found, encountered exception when searching:")
            print(e)


        print("")
        print("__SVML Information__")
        # replicate some SVML detection logic from numba.__init__ here.
        # if SVML load fails in numba.__init__ the splitting of the logic
        # here will help diagnosis of the underlying issue
        have_svml_library = True
        try:
            if sys.platform.startswith('linux'):
                llvmbind.load_library_permanently("libsvml.so")
            elif sys.platform.startswith('darwin'):
                llvmbind.load_library_permanently("libsvml.dylib")
            elif sys.platform.startswith('win'):
                llvmbind.load_library_permanently("svml_dispmd")
            else:
                have_svml_library = False
        except:
            have_svml_library = False
        func = getattr(llvmbind.targets, "has_svml", None)
        llvm_svml_patched = func() if func is not None else False
        svml_operational = (config.USING_SVML and llvm_svml_patched \
                            and have_svml_library)
        print(fmt % ("SVML state, config.USING_SVML", config.USING_SVML))
        print(fmt % ("SVML library found and loaded", have_svml_library))
        print(fmt % ("llvmlite using SVML patched LLVM", llvm_svml_patched))
        print(fmt % ("SVML operational", svml_operational))

        # Check which threading backends are available.
        print("")
        print("__Threading Layer Information__")
        def parse_error(e, backend):
            # parses a linux based error message, this is to provide feedback
            # and hide user paths etc
            try:
                path, problem, symbol =  [x.strip() for x in e.msg.split(':')]
                extn_dso = os.path.split(path)[1]
                if backend in extn_dso:
                    return "%s: %s" % (problem, symbol)
            except Exception:
                pass
            return "Unknown import problem."

        try:
            from numba.npyufunc import tbbpool
            print(fmt % ("TBB Threading layer available", True))
        except ImportError as e:
            # might be a missing symbol due to e.g. tbb libraries missing
            print(fmt % ("TBB Threading layer available", False))
            print(fmt % ("+--> Disabled due to",
                         parse_error(e, 'tbbpool')))

        try:
            from numba.npyufunc import omppool
            print(fmt % ("OpenMP Threading layer available", True))
        except ImportError as e:
            print(fmt % ("OpenMP Threading layer available", False))
            print(fmt % ("+--> Disabled due to",
                         parse_error(e, 'omppool')))

        try:
            from numba.npyufunc import workqueue
            print(fmt % ("Workqueue Threading layer available", True))
        except ImportError as e:
            print(fmt % ("Workqueue Threading layer available", False))
            print(fmt % ("+--> Disabled due to",
                         parse_error(e, 'workqueue')))

        # look for numba env vars that are set
        print("")
        print("__Numba Environment Variable Information__")
        _envvar_found = False
        for k, v in os.environ.items():
            if k.startswith('NUMBA_'):
                print(fmt % (k, v))
                _envvar_found = True
        if not _envvar_found:
            print("None set.")

        # Look for conda and conda information
        print("")
        print("__Conda Information__")
        cmd = ["conda", "info", "--json"]
        try:
            conda_out = check_output(cmd)
        except Exception as e:
            print(
                "Conda not present/not working.\nError was %s\n" % e)
        else:
            data = ''.join(conda_out.decode("utf-8").splitlines())
            jsond = json.loads(data)
            keys = ['conda_build_version',
                    'conda_env_version',
                    'platform',
                    'python_version',
                    'root_writable']
            for k in keys:
                try:
                    print(fmt % (k, jsond[k]))
                except KeyError:
                    pass

            # get info about current environment
            cmd = ["conda", "list"]
            try:
                conda_out = check_output(cmd)
            except CalledProcessError as e:
                print("Error: Conda command failed. Error was %s\n" % e.output)
            else:
                print("")
                print("__Current Conda Env__")
                data = conda_out.decode("utf-8").splitlines()
                for k in data:
                    if k[0] != '#':  # don't show where the env is, personal data
                        print(k)

        print("-" * 80)

    except Exception as e:
        print("Error: The system reporting tool has failed unexpectedly.")
        print("Exception was:")
        print(e)

    finally:
        print(
            "%s" %
            "If requested, please copy and paste the information between\n"
            "the dashed (----) lines, or from a given specific section as\n"
            "appropriate.\n\n"
            "=============================================================\n"
            "IMPORTANT: Please ensure that you are happy with sharing the\n"
            "contents of the information present, any information that you\n"
            "wish to keep private you should remove before sharing.\n"
            "=============================================================\n")
Example #45
0
    def finalize_options(self):
        _build.finalize_options(self)

        compiler = new_compiler(compiler=self.compiler, verbose=self.verbose)
        customize_compiler(compiler)

        disabled_libraries = []

        # Section for custom limits imposed on the SIMD instruction levels based
        # on the installed compiler
        plat_compiler = platform.python_compiler()
        if plat_compiler.lower().startswith('gcc'):
            # Check the installed gcc version, as versions older than 7.0 claim to
            # support avx512 but are missing some intrinsics that FastNoiseSIMD calls
            output = subprocess.check_output('gcc --version', shell=True)
            gcc_version = tuple([
                int(x)
                for x in re.findall(b'\d+(?:\.\d+)+', output)[0].split(b'.')
            ])
            if gcc_version < (7, 2):  # Disable AVX512
                disabled_libraries.append('avx512')
            if gcc_version < (4, 7):  # Disable AVX2
                disabled_libraries.append('avx2')
        elif plat_compiler.lower().startswith('msc'):
            # No versions of Windows Python support AVX512 yet
            #                 MSVC++ 14.1 _MSC_VER == 1911 (Visual Studio 2017)
            #                 MSVC++ 14.1 _MSC_VER == 1910 (Visual Studio 2017)
            # Python 3.5/3.6: MSVC++ 14.0 _MSC_VER == 1900 (Visual Studio 2015)
            # Python 3.4:     MSVC++ 10.0 _MSC_VER == 1600 (Visual Studio 2010)
            # Python 2.7:     MSVC++ 9.0  _MSC_VER == 1500 (Visual Studio 2008)
            # Here we just assume the user has the platform compiler
            msc_version = int(
                re.findall('v\.\d+', plat_compiler)[0].lstrip('v.'))
            # print('FOUND MSVC VERSION: ', msc_version)
            # Still not working with MSVC2017 yet with 1915 and Python 3.7, it
            # cannot find the function `_mm512_floor_ps`
            if msc_version < 1916:
                disabled_libraries.append('avx512')
            if msc_version < 1900:
                disabled_libraries.append('avx2')
        # End of SIMD limits

        for name, lib in self.distribution.libraries:
            val = getattr(self, 'with_' + name)
            if val not in ('auto', 'yes', 'no'):
                raise DistutilsOptionError('with_%s flag must be auto, yes, '
                                           'or no, not "%s".' % (name, val))

            if val == 'no':
                disabled_libraries.append(name)
                continue

            if not self.compiler_has_flags(compiler, name, lib['cflags']):
                if val == 'yes':
                    # Explicitly required but not available.
                    raise CCompilerError('%s is not supported by your '
                                         'compiler.' % (name, ))
                disabled_libraries.append(name)

        use_fma = False
        if (self.with_fma != 'no' and ('avx512' not in disabled_libraries
                                       or 'avx2' not in disabled_libraries)):
            if fma_flags is None:
                # No flags required.
                use_fma = True
            elif self.compiler_has_flags(compiler, 'fma', fma_flags):
                use_fma = True
                avx512['cflags'] += fma_flags
                avx2['cflags'] += fma_flags
            elif self.with_fma == 'yes':
                # Explicitly required but not available.
                raise CCompilerError('FMA is not supported by your compiler.')

        self.distribution.libraries = [
            lib for lib in self.distribution.libraries
            if lib[0] not in disabled_libraries
        ]

        with open('pyfastnoisesimd/fastnoisesimd/x86_flags.h', 'wb') as fh:
            fh.write(b'// This file is generated by setup.py, '
                     b'do not edit it by hand\n')
            for name, lib in self.distribution.libraries:
                fh.write(b'#define FN_COMPILE_%b\n' %
                         (name.upper().encode('ascii', )))
            if use_fma:
                fh.write(b'#define FN_USE_FMA\n')
Example #46
0
def check_installation():
    """Utility function to check package installation.
    """

    import pkg_resources
    import os
    import sys
    import platform
    import subprocess

    log = ui.FancyPrinter()

    # Get system information
    log.line('System information')
    log.data(field='System', value=platform.system())
    log.data(field='Release', value=platform.release())
    log.data(field='Version', value=platform.version())
    log.data(field='Processor', value=platform.processor())
    log.line()

    # Get Python installation information
    log.line('Python information')
    log.data(field='Version', value=sys.version)
    log.data(field='Compiler', value=platform.python_compiler())
    log.data(field='Implementation', value=platform.python_implementation())
    log.line()

    # Get package information
    log.line('Package information')
    log.data(field='Name', value=__name__)
    log.data(field='Version', value=__version__)
    log.line()

    package = pkg_resources.require('dcase_util')[0]

    # Get core requirements
    core_requirements = package.requires()

    # Load requirements.txt
    requirements_filename = os.path.join(package.location, 'requirements.txt')
    with open(requirements_filename) as fp:
        requirements_file = fp.read()

    # Get all requirements
    all_requirements = []
    for r in pkg_resources.parse_requirements(requirements_file):
        if r.marker:
            raise ValueError("environment markers are not supported, in '%s'" %
                             r)
        all_requirements.append(r)

    processed = []

    log.line('Core requirements')
    log.row('Package',
            'Required',
            'Installed',
            'Status',
            widths=[25, 15, 15, 15])
    log.row_sep()
    for requirement in core_requirements:
        if requirement.key not in processed:
            log.row(
                requirement.key, ''.join(requirement.specs[0]),
                pkg_resources.get_distribution(requirement.key).version,
                'OK' if requirement.__contains__(
                    pkg_resources.get_distribution(
                        requirement.key)) else 'CHECK')
            processed.append(requirement.key)
    log.line()

    log.line('Extra requirements')
    log.row('Package',
            'Required',
            'Installed',
            'Status',
            widths=[25, 15, 15, 15])
    log.row_sep()
    for requirement in all_requirements:
        if requirement.key not in processed:
            log.row(
                requirement.key, ''.join(requirement.specs[0]),
                pkg_resources.get_distribution(requirement.key).version,
                'OK' if requirement.__contains__(
                    pkg_resources.get_distribution(
                        requirement.key)) else 'CHECK')
            processed.append(requirement.key)
    log.line()

    # Get system level requirements
    log.line('System')
    ffmpeg_info = subprocess.check_output(['ffmpeg',
                                           '-version']).decode('utf-8')

    log.data(field='FFMPEG', value=ffmpeg_info)
Example #47
0
import platform
print("COMPILER     :", platform.python_compiler())
print("SYSTEM       :", platform.system())
print("PROCESSOR    :", platform.processor())
print("ARCHITECTURE :", platform.architecture())
print("MACHINE      :", platform.machine())
print("JAVA VERSION :", platform.java_ver())
print("PYTHON VER   :", platform.python_version())
print("SYSTEM VER   :", platform._sys_version())
Example #48
0
def get_sysinfo():

    # Gather the information that shouldn't raise exceptions
    sys_info = {
        _start: datetime.now(),
        _start_utc: datetime.utcnow(),
        _machine: platform.machine(),
        _cpu_name: llvmbind.get_host_cpu_name(),
        _cpu_count: multiprocessing.cpu_count(),
        _platform_name: platform.platform(aliased=True),
        _platform_release: platform.release(),
        _os_name: platform.system(),
        _os_version: platform.version(),
        _python_comp: platform.python_compiler(),
        _python_impl: platform.python_implementation(),
        _python_version: platform.python_version(),
        _numba_env_vars: {k: v for (k, v) in os.environ.items()
                          if k.startswith('NUMBA_')},
        _llvm_version: '.'.join(str(i) for i in llvmbind.llvm_version_info),
        _roc_available: roc.is_available(),
        _psutil: _psutil_import,
    }

    # CPU features
    try:
        feature_map = llvmbind.get_host_cpu_features()
    except RuntimeError as e:
        _error_log.append(f'Error (CPU features): {e}')
    else:
        features = sorted([key for key, value in feature_map.items() if value])
        sys_info[_cpu_features] = ' '.join(features)

    # Python locale
    # On MacOSX, getdefaultlocale can raise. Check again if Py > 3.7.5
    try:
        # If $LANG is unset, getdefaultlocale() can return (None, None), make
        # sure we can encode this as strings by casting explicitly.
        sys_info[_python_locale] = '.'.join([str(i) for i in
                                             locale.getdefaultlocale()])
    except Exception as e:
        _error_log.append(f'Error (locale): {e}')

    # CUDA information
    try:
        cu.list_devices()[0]  # will a device initialise?
    except Exception as e:
        sys_info[_cu_dev_init] = False
        msg_not_found = "CUDA driver library cannot be found"
        msg_disabled_by_user = "******"
        msg_end = " or no CUDA enabled devices are present."
        msg_generic_problem = "CUDA device intialisation problem."
        msg = getattr(e, 'msg', None)
        if msg is not None:
            if msg_not_found in msg:
                err_msg = msg_not_found + msg_end
            elif msg_disabled_by_user in msg:
                err_msg = msg_disabled_by_user + msg_end
            else:
                err_msg = msg_generic_problem + " Message:" + msg
        else:
            err_msg = msg_generic_problem + " " + str(e)
        # Best effort error report
        _warning_log.append("Warning (cuda): %s\nException class: %s" %
                            (err_msg, str(type(e))))
    else:
        try:
            sys_info[_cu_dev_init] = True

            output = StringIO()
            with redirect_stdout(output):
                cu.detect()
            sys_info[_cu_detect_out] = output.getvalue()
            output.close()

            dv = ctypes.c_int(0)
            cudriver.cuDriverGetVersion(ctypes.byref(dv))
            sys_info[_cu_drv_ver] = dv.value

            output = StringIO()
            with redirect_stdout(output):
                cudadrv.libs.test(sys.platform, print_paths=False)
            sys_info[_cu_lib_test] = output.getvalue()
            output.close()
        except Exception as e:
            _warning_log.append(
                "Warning (cuda): Probing CUDA failed "
                "(device and driver present, runtime problem?)\n"
                f"(cuda) {type(e)}: {e}")

    # ROC information
    # If no ROC try and report why
    if not sys_info[_roc_available]:
        from numba.roc.hsadrv.driver import hsa
        try:
            hsa.is_available
        except Exception as e:
            msg = str(e)
        else:
            msg = 'No ROC toolchains found.'
        _warning_log.append(f"Warning (roc): Error initialising ROC: {msg}")

    toolchains = []
    try:
        libhlc.HLC()
        toolchains.append('librocmlite library')
    except Exception:
        pass
    try:
        cmd = hlc.CmdLine().check_tooling()
        toolchains.append('ROC command line tools')
    except Exception:
        pass
    sys_info[_roc_toolchains] = toolchains

    try:
        # ROC might not be available due to lack of tool chain, but HSA
        # agents may be listed
        from numba.roc.hsadrv.driver import hsa, dgpu_count

        def decode(x):
            return x.decode('utf-8') if isinstance(x, bytes) else x

        sys_info[_hsa_agents_count] = len(hsa.agents)
        agents = []
        for i, agent in enumerate(hsa.agents):
            agents.append({
                'Agent id': i,
                'Vendor': decode(agent.vendor_name),
                'Name': decode(agent.name),
                'Type': agent.device,
            })
        sys_info[_hsa_agents] = agents

        _dgpus = []
        for a in hsa.agents:
            if a.is_component and a.device == 'GPU':
                _dgpus.append(decode(a.name))
        sys_info[_hsa_gpus_count] = dgpu_count()
        sys_info[_hsa_gpus] = ', '.join(_dgpus)
    except Exception as e:
        _warning_log.append(
            "Warning (roc): No HSA Agents found, "
            f"encountered exception when searching: {e}")

    # SVML information
    # Replicate some SVML detection logic from numba.__init__ here.
    # If SVML load fails in numba.__init__ the splitting of the logic
    # here will help diagnosing the underlying issue.
    svml_lib_loaded = True
    try:
        if sys.platform.startswith('linux'):
            llvmbind.load_library_permanently("libsvml.so")
        elif sys.platform.startswith('darwin'):
            llvmbind.load_library_permanently("libsvml.dylib")
        elif sys.platform.startswith('win'):
            llvmbind.load_library_permanently("svml_dispmd")
        else:
            svml_lib_loaded = False
    except Exception:
        svml_lib_loaded = False
    func = getattr(llvmbind.targets, "has_svml", None)
    sys_info[_llvm_svml_patched] = func() if func else False
    sys_info[_svml_state] = config.USING_SVML
    sys_info[_svml_loaded] = svml_lib_loaded
    sys_info[_svml_operational] = all((
        sys_info[_svml_state],
        sys_info[_svml_loaded],
        sys_info[_llvm_svml_patched],
    ))

    # Check which threading backends are available.
    def parse_error(e, backend):
        # parses a linux based error message, this is to provide feedback
        # and hide user paths etc
        try:
            path, problem, symbol = [x.strip() for x in e.msg.split(':')]
            extn_dso = os.path.split(path)[1]
            if backend in extn_dso:
                return "%s: %s" % (problem, symbol)
        except Exception:
            pass
        return "Unknown import problem."

    try:
        from numba.np.ufunc import tbbpool  # NOQA
        sys_info[_tbb_thread] = True
    except ImportError as e:
        # might be a missing symbol due to e.g. tbb libraries missing
        sys_info[_tbb_thread] = False
        sys_info[_tbb_error] = parse_error(e, 'tbbpool')

    try:
        from numba.np.ufunc import omppool
        sys_info[_openmp_thread] = True
        sys_info[_openmp_vendor] = omppool.openmp_vendor
    except ImportError as e:
        sys_info[_openmp_thread] = False
        sys_info[_openmp_error] = parse_error(e, 'omppool')

    try:
        from numba.np.ufunc import workqueue  # NOQA
        sys_info[_wkq_thread] = True
    except ImportError as e:
        sys_info[_wkq_thread] = True
        sys_info[_wkq_error] = parse_error(e, 'workqueue')

    # Look for conda and installed packages information
    cmd = ('conda', 'info', '--json')
    try:
        conda_out = check_output(cmd)
    except Exception as e:
        _warning_log.append(f'Warning: Conda not available.\n Error was {e}\n')
        # Conda is not available, try pip list to list installed packages
        cmd = (sys.executable, '-m', 'pip', 'list')
        try:
            reqs = check_output(cmd)
        except Exception as e:
            _error_log.append(f'Error (pip): {e}')
        else:
            sys_info[_inst_pkg] = reqs.decode().splitlines()

    else:
        jsond = json.loads(conda_out.decode())
        keys = {
            'conda_build_version': _conda_build_ver,
            'conda_env_version': _conda_env_ver,
            'platform': _conda_platform,
            'python_version': _conda_python_ver,
            'root_writable': _conda_root_writable,
        }
        for conda_k, sysinfo_k in keys.items():
            sys_info[sysinfo_k] = jsond.get(conda_k, 'N/A')

        # Get info about packages in current environment
        cmd = ('conda', 'list')
        try:
            conda_out = check_output(cmd)
        except CalledProcessError as e:
            _error_log.append(f'Error (conda): {e}')
        else:
            data = conda_out.decode().splitlines()
            sys_info[_inst_pkg] = [l for l in data if not l.startswith('#')]

    sys_info.update(get_os_spec_info(sys_info[_os_name]))
    sys_info[_errors] = _error_log
    sys_info[_warnings] = _warning_log
    sys_info[_runtime] = (datetime.now() - sys_info[_start]).total_seconds()
    return sys_info
Example #49
0
def configuration(parent_package="", top_path=None):
    from numpy.distutils.misc_util import Configuration, dot_join
    from numpy.distutils.system_info import get_info

    config = Configuration("core", parent_package, top_path)
    local_dir = config.local_path
    codegen_dir = join(local_dir, "code_generators")

    if is_released(config):
        warnings.simplefilter("error", MismatchCAPIWarning)

    # Check whether we have a mismatch between the set C API VERSION and the
    # actual C API VERSION
    check_api_version(C_API_VERSION, codegen_dir)

    generate_umath_py = join(codegen_dir, "generate_umath.py")
    n = dot_join(config.name, "generate_umath")
    generate_umath = npy_load_module("_".join(n.split(".")), generate_umath_py,
                                     (".py", "U", 1))

    header_dir = "include/numpy"  # this is relative to config.path_in_package

    cocache = CallOnceOnly()

    def generate_config_h(ext, build_dir):
        target = join(build_dir, header_dir, "config.h")
        d = os.path.dirname(target)
        if not os.path.exists(d):
            os.makedirs(d)

        if newer(__file__, target):
            config_cmd = config.get_config_cmd()
            log.info("Generating %s", target)

            # Check sizeof
            moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)

            # Check math library and C99 math funcs availability
            mathlibs = check_mathlib(config_cmd)
            moredefs.append(("MATHLIB", ",".join(mathlibs)))

            check_math_capabilities(config_cmd, moredefs, mathlibs)
            moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
            moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])

            # Signal check
            if is_npy_no_signal():
                moredefs.append("__NPY_PRIVATE_NO_SIGNAL")

            # Windows checks
            if sys.platform == "win32" or os.name == "nt":
                win32_checks(moredefs)

            # C99 restrict keyword
            moredefs.append(("NPY_RESTRICT", config_cmd.check_restrict()))

            # Inline check
            inline = config_cmd.check_inline()

            # Use relaxed stride checking
            if NPY_RELAXED_STRIDES_CHECKING:
                moredefs.append(("NPY_RELAXED_STRIDES_CHECKING", 1))

            # Use bogus stride debug aid when relaxed strides are enabled
            if NPY_RELAXED_STRIDES_DEBUG:
                moredefs.append(("NPY_RELAXED_STRIDES_DEBUG", 1))

            # Get long double representation
            rep = check_long_double_representation(config_cmd)
            moredefs.append(("HAVE_LDOUBLE_%s" % rep, 1))

            # Py3K check
            if sys.version_info[0] == 3:
                moredefs.append(("NPY_PY3K", 1))

            # Generate the config.h file from moredefs
            with open(target, "w") as target_f:
                for d in moredefs:
                    if isinstance(d, str):
                        target_f.write("#define %s\n" % (d))
                    else:
                        target_f.write("#define %s %s\n" % (d[0], d[1]))

                # define inline to our keyword, or nothing
                target_f.write("#ifndef __cplusplus\n")
                if inline == "inline":
                    target_f.write("/* #undef inline */\n")
                else:
                    target_f.write("#define inline %s\n" % inline)
                target_f.write("#endif\n")

                # add the guard to make sure config.h is never included directly,
                # but always through npy_config.h
                target_f.write(
                    textwrap.dedent("""
                    #ifndef _NPY_NPY_CONFIG_H_
                    #error config.h should never be included directly, include npy_config.h instead
                    #endif
                    """))

            print("File:", target)
            with open(target) as target_f:
                print(target_f.read())
            print("EOF")
        else:
            mathlibs = []
            with open(target) as target_f:
                for line in target_f:
                    s = "#define MATHLIB"
                    if line.startswith(s):
                        value = line[len(s):].strip()
                        if value:
                            mathlibs.extend(value.split(","))

        # Ugly: this can be called within a library and not an extension,
        # in which case there is no libraries attributes (and none is
        # needed).
        if hasattr(ext, "libraries"):
            ext.libraries.extend(mathlibs)

        incl_dir = os.path.dirname(target)
        if incl_dir not in config.numpy_include_dirs:
            config.numpy_include_dirs.append(incl_dir)

        return target

    def generate_numpyconfig_h(ext, build_dir):
        """Depends on config.h: generate_config_h has to be called before !"""
        # put common include directory in build_dir on search path
        # allows using code generation in headers headers
        config.add_include_dirs(join(build_dir, "src", "common"))
        config.add_include_dirs(join(build_dir, "src", "npymath"))

        target = join(build_dir, header_dir, "_numpyconfig.h")
        d = os.path.dirname(target)
        if not os.path.exists(d):
            os.makedirs(d)
        if newer(__file__, target):
            config_cmd = config.get_config_cmd()
            log.info("Generating %s", target)

            # Check sizeof
            ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)

            if is_npy_no_signal():
                moredefs.append(("NPY_NO_SIGNAL", 1))

            if is_npy_no_smp():
                moredefs.append(("NPY_NO_SMP", 1))
            else:
                moredefs.append(("NPY_NO_SMP", 0))

            mathlibs = check_mathlib(config_cmd)
            moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
            moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])

            if NPY_RELAXED_STRIDES_CHECKING:
                moredefs.append(("NPY_RELAXED_STRIDES_CHECKING", 1))

            if NPY_RELAXED_STRIDES_DEBUG:
                moredefs.append(("NPY_RELAXED_STRIDES_DEBUG", 1))

            # Check whether we can use inttypes (C99) formats
            if config_cmd.check_decl("PRIdPTR", headers=["inttypes.h"]):
                moredefs.append(("NPY_USE_C99_FORMATS", 1))

            # visibility check
            hidden_visibility = visibility_define(config_cmd)
            moredefs.append(("NPY_VISIBILITY_HIDDEN", hidden_visibility))

            # Add the C API/ABI versions
            moredefs.append(("NPY_ABI_VERSION", "0x%.8X" % C_ABI_VERSION))
            moredefs.append(("NPY_API_VERSION", "0x%.8X" % C_API_VERSION))

            # Add moredefs to header
            with open(target, "w") as target_f:
                for d in moredefs:
                    if isinstance(d, str):
                        target_f.write("#define %s\n" % (d))
                    else:
                        target_f.write("#define %s %s\n" % (d[0], d[1]))

                # Define __STDC_FORMAT_MACROS
                target_f.write(
                    textwrap.dedent("""
                    #ifndef __STDC_FORMAT_MACROS
                    #define __STDC_FORMAT_MACROS 1
                    #endif
                    """))

            # Dump the numpyconfig.h header to stdout
            print("File: %s" % target)
            with open(target) as target_f:
                print(target_f.read())
            print("EOF")
        config.add_data_files((header_dir, target))
        return target

    def generate_api_func(module_name):
        def generate_api(ext, build_dir):
            script = join(codegen_dir, module_name + ".py")
            sys.path.insert(0, codegen_dir)
            try:
                m = __import__(module_name)
                log.info("executing %s", script)
                h_file, c_file, doc_file = m.generate_api(
                    os.path.join(build_dir, header_dir))
            finally:
                del sys.path[0]
            config.add_data_files((header_dir, h_file), (header_dir, doc_file))
            return (h_file, )

        return generate_api

    generate_numpy_api = generate_api_func("generate_numpy_api")
    generate_ufunc_api = generate_api_func("generate_ufunc_api")

    config.add_include_dirs(join(local_dir, "src", "common"))
    config.add_include_dirs(join(local_dir, "src"))
    config.add_include_dirs(join(local_dir))

    config.add_data_dir("include/numpy")
    config.add_include_dirs(join("src", "npymath"))
    config.add_include_dirs(join("src", "multiarray"))
    config.add_include_dirs(join("src", "umath"))
    config.add_include_dirs(join("src", "npysort"))

    config.add_define_macros([
        ("NPY_INTERNAL_BUILD", "1")
    ])  # this macro indicates that Numpy build is in process
    config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
    if sys.platform[:3] == "aix":
        config.add_define_macros([("_LARGE_FILES", None)])
    else:
        config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
        config.add_define_macros([("_LARGEFILE_SOURCE", "1")])
        config.add_define_macros([("_LARGEFILE64_SOURCE", "1")])

    config.numpy_include_dirs.extend(config.paths("include"))

    deps = [
        join("src", "npymath", "_signbit.c"),
        join("include", "numpy", "*object.h"),
        join(codegen_dir, "genapi.py"),
    ]

    #######################################################################
    #                            dummy module                             #
    #######################################################################

    # npymath needs the config.h and numpyconfig.h files to be generated, but
    # build_clib cannot handle generate_config_h and generate_numpyconfig_h
    # (don't ask). Because clib are generated before extensions, we have to
    # explicitly add an extension which has generate_config_h and
    # generate_numpyconfig_h as sources *before* adding npymath.

    config.add_extension(
        "_dummy",
        sources=[
            join("src", "dummymodule.c"),
            generate_config_h,
            generate_numpyconfig_h,
            generate_numpy_api,
        ],
    )

    #######################################################################
    #                          npymath library                            #
    #######################################################################

    subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])

    def get_mathlib_info(*args):
        # Another ugly hack: the mathlib info is known once build_src is run,
        # but we cannot use add_installed_pkg_config here either, so we only
        # update the substitution dictionary during npymath build
        config_cmd = config.get_config_cmd()

        # Check that the toolchain works, to fail early if it doesn't
        # (avoid late errors with MATHLIB which are confusing if the
        # compiler does not work).
        st = config_cmd.try_link("int main(void) { return 0;}")
        if not st:
            raise RuntimeError(
                "Broken toolchain: cannot link a simple C program")
        mlibs = check_mathlib(config_cmd)

        posix_mlib = " ".join(["-l%s" % l for l in mlibs])
        msvc_mlib = " ".join(["%s.lib" % l for l in mlibs])
        subst_dict["posix_mathlib"] = posix_mlib
        subst_dict["msvc_mathlib"] = msvc_mlib

    npymath_sources = [
        join("src", "npymath", "npy_math_internal.h.src"),
        join("src", "npymath", "npy_math.c"),
        join("src", "npymath", "ieee754.c.src"),
        join("src", "npymath", "npy_math_complex.c.src"),
        join("src", "npymath", "halffloat.c"),
    ]

    # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.
    # Intel and Clang also don't seem happy with /GL
    is_msvc = platform.platform().startswith(
        "Windows") and platform.python_compiler().startswith("MS")
    config.add_installed_library(
        "npymath",
        sources=npymath_sources + [get_mathlib_info],
        install_dir="lib",
        build_info={
            "include_dirs":
            [],  # empty list required for creating npy_math_internal.h
            "extra_compiler_args": (["/GL-"] if is_msvc else []),
        },
    )
    config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
                              subst_dict)
    config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config", subst_dict)

    #######################################################################
    #                         npysort library                             #
    #######################################################################

    # This library is created for the build but it is not installed
    npysort_sources = [
        join("src", "common", "npy_sort.h.src"),
        join("src", "npysort", "quicksort.c.src"),
        join("src", "npysort", "mergesort.c.src"),
        join("src", "npysort", "timsort.c.src"),
        join("src", "npysort", "heapsort.c.src"),
        join("src", "npysort", "radixsort.c.src"),
        join("src", "common", "npy_partition.h.src"),
        join("src", "npysort", "selection.c.src"),
        join("src", "common", "npy_binsearch.h.src"),
        join("src", "npysort", "binsearch.c.src"),
    ]
    config.add_library("npysort", sources=npysort_sources, include_dirs=[])

    #######################################################################
    #                     multiarray_tests module                         #
    #######################################################################

    config.add_extension(
        "_multiarray_tests",
        sources=[
            join("src", "multiarray", "_multiarray_tests.c.src"),
            join("src", "common", "mem_overlap.c"),
        ],
        depends=[
            join("src", "common", "mem_overlap.h"),
            join("src", "common", "npy_extint128.h"),
        ],
        libraries=["npymath"],
    )

    #######################################################################
    #             _multiarray_umath module - common part                  #
    #######################################################################

    common_deps = [
        join("src", "common", "array_assign.h"),
        join("src", "common", "binop_override.h"),
        join("src", "common", "cblasfuncs.h"),
        join("src", "common", "lowlevel_strided_loops.h"),
        join("src", "common", "mem_overlap.h"),
        join("src", "common", "npy_cblas.h"),
        join("src", "common", "npy_config.h"),
        join("src", "common", "npy_ctypes.h"),
        join("src", "common", "npy_extint128.h"),
        join("src", "common", "npy_import.h"),
        join("src", "common", "npy_longdouble.h"),
        join("src", "common", "templ_common.h.src"),
        join("src", "common", "ucsnarrow.h"),
        join("src", "common", "ufunc_override.h"),
        join("src", "common", "umathmodule.h"),
        join("src", "common", "numpyos.h"),
    ]

    common_src = [
        join("src", "common", "array_assign.c"),
        join("src", "common", "mem_overlap.c"),
        join("src", "common", "npy_longdouble.c"),
        join("src", "common", "templ_common.h.src"),
        join("src", "common", "ucsnarrow.c"),
        join("src", "common", "ufunc_override.c"),
        join("src", "common", "numpyos.c"),
    ]

    blas_info = get_info("blas_opt", 0)
    if blas_info and ("HAVE_CBLAS", None) in blas_info.get(
            "define_macros", []):
        extra_info = blas_info
        # These files are also in MANIFEST.in so that they are always in
        # the source distribution independently of HAVE_CBLAS.
        common_src.extend([
            join("src", "common", "cblasfuncs.c"),
            join("src", "common", "python_xerbla.c"),
        ])
        if uses_accelerate_framework(blas_info):
            common_src.extend(get_sgemv_fix())
    else:
        extra_info = {}

    #######################################################################
    #             _multiarray_umath module - multiarray part              #
    #######################################################################

    multiarray_deps = ([
        join("src", "multiarray", "arrayobject.h"),
        join("src", "multiarray", "arraytypes.h"),
        join("src", "multiarray", "arrayfunction_override.h"),
        join("src", "multiarray", "buffer.h"),
        join("src", "multiarray", "calculation.h"),
        join("src", "multiarray", "common.h"),
        join("src", "multiarray", "convert_datatype.h"),
        join("src", "multiarray", "convert.h"),
        join("src", "multiarray", "conversion_utils.h"),
        join("src", "multiarray", "ctors.h"),
        join("src", "multiarray", "descriptor.h"),
        join("src", "multiarray", "dragon4.h"),
        join("src", "multiarray", "getset.h"),
        join("src", "multiarray", "hashdescr.h"),
        join("src", "multiarray", "iterators.h"),
        join("src", "multiarray", "mapping.h"),
        join("src", "multiarray", "methods.h"),
        join("src", "multiarray", "multiarraymodule.h"),
        join("src", "multiarray", "nditer_impl.h"),
        join("src", "multiarray", "number.h"),
        join("src", "multiarray", "refcount.h"),
        join("src", "multiarray", "scalartypes.h"),
        join("src", "multiarray", "sequence.h"),
        join("src", "multiarray", "shape.h"),
        join("src", "multiarray", "strfuncs.h"),
        join("src", "multiarray", "typeinfo.h"),
        join("src", "multiarray", "usertypes.h"),
        join("src", "multiarray", "vdot.h"),
        join("include", "numpy", "arrayobject.h"),
        join("include", "numpy", "_neighborhood_iterator_imp.h"),
        join("include", "numpy", "npy_endian.h"),
        join("include", "numpy", "arrayscalars.h"),
        join("include", "numpy", "noprefix.h"),
        join("include", "numpy", "npy_interrupt.h"),
        join("include", "numpy", "npy_3kcompat.h"),
        join("include", "numpy", "npy_math.h"),
        join("include", "numpy", "halffloat.h"),
        join("include", "numpy", "npy_common.h"),
        join("include", "numpy", "npy_os.h"),
        join("include", "numpy", "utils.h"),
        join("include", "numpy", "ndarrayobject.h"),
        join("include", "numpy", "npy_cpu.h"),
        join("include", "numpy", "numpyconfig.h"),
        join("include", "numpy", "ndarraytypes.h"),
        join("include", "numpy", "npy_1_7_deprecated_api.h"),
        # add library sources as distuils does not consider libraries
        # dependencies
    ] + npysort_sources + npymath_sources)

    multiarray_src = [
        join("src", "multiarray", "alloc.c"),
        join("src", "multiarray", "arrayobject.c"),
        join("src", "multiarray", "arraytypes.c.src"),
        join("src", "multiarray", "array_assign_scalar.c"),
        join("src", "multiarray", "array_assign_array.c"),
        join("src", "multiarray", "arrayfunction_override.c"),
        join("src", "multiarray", "buffer.c"),
        join("src", "multiarray", "calculation.c"),
        join("src", "multiarray", "compiled_base.c"),
        join("src", "multiarray", "common.c"),
        join("src", "multiarray", "convert.c"),
        join("src", "multiarray", "convert_datatype.c"),
        join("src", "multiarray", "conversion_utils.c"),
        join("src", "multiarray", "ctors.c"),
        join("src", "multiarray", "datetime.c"),
        join("src", "multiarray", "datetime_strings.c"),
        join("src", "multiarray", "datetime_busday.c"),
        join("src", "multiarray", "datetime_busdaycal.c"),
        join("src", "multiarray", "descriptor.c"),
        join("src", "multiarray", "dragon4.c"),
        join("src", "multiarray", "dtype_transfer.c"),
        join("src", "multiarray", "einsum.c.src"),
        join("src", "multiarray", "flagsobject.c"),
        join("src", "multiarray", "getset.c"),
        join("src", "multiarray", "hashdescr.c"),
        join("src", "multiarray", "item_selection.c"),
        join("src", "multiarray", "iterators.c"),
        join("src", "multiarray", "lowlevel_strided_loops.c.src"),
        join("src", "multiarray", "mapping.c"),
        join("src", "multiarray", "methods.c"),
        join("src", "multiarray", "multiarraymodule.c"),
        join("src", "multiarray", "nditer_templ.c.src"),
        join("src", "multiarray", "nditer_api.c"),
        join("src", "multiarray", "nditer_constr.c"),
        join("src", "multiarray", "nditer_pywrap.c"),
        join("src", "multiarray", "number.c"),
        join("src", "multiarray", "refcount.c"),
        join("src", "multiarray", "sequence.c"),
        join("src", "multiarray", "shape.c"),
        join("src", "multiarray", "scalarapi.c"),
        join("src", "multiarray", "scalartypes.c.src"),
        join("src", "multiarray", "strfuncs.c"),
        join("src", "multiarray", "temp_elide.c"),
        join("src", "multiarray", "typeinfo.c"),
        join("src", "multiarray", "usertypes.c"),
        join("src", "multiarray", "vdot.c"),
    ]

    #######################################################################
    #             _multiarray_umath module - umath part                   #
    #######################################################################

    def generate_umath_c(ext, build_dir):
        target = join(build_dir, header_dir, "__umath_generated.c")
        dir = os.path.dirname(target)
        if not os.path.exists(dir):
            os.makedirs(dir)
        script = generate_umath_py
        if newer(script, target):
            with open(target, "w") as f:
                f.write(
                    generate_umath.make_code(generate_umath.defdict,
                                             generate_umath.__file__))
        return []

    umath_src = [
        join("src", "umath", "umathmodule.c"),
        join("src", "umath", "reduction.c"),
        join("src", "umath", "funcs.inc.src"),
        join("src", "umath", "simd.inc.src"),
        join("src", "umath", "loops.h.src"),
        join("src", "umath", "loops.c.src"),
        join("src", "umath", "matmul.h.src"),
        join("src", "umath", "matmul.c.src"),
        join("src", "umath", "clip.h.src"),
        join("src", "umath", "clip.c.src"),
        join("src", "umath", "ufunc_object.c"),
        join("src", "umath", "extobj.c"),
        join("src", "umath", "cpuid.c"),
        join("src", "umath", "scalarmath.c.src"),
        join("src", "umath", "ufunc_type_resolution.c"),
        join("src", "umath", "override.c"),
    ]

    umath_deps = [
        generate_umath_py,
        join("include", "numpy", "npy_math.h"),
        join("include", "numpy", "halffloat.h"),
        join("src", "multiarray", "common.h"),
        join("src", "multiarray", "number.h"),
        join("src", "common", "templ_common.h.src"),
        join("src", "umath", "simd.inc.src"),
        join("src", "umath", "override.h"),
        join(codegen_dir, "generate_ufunc_api.py"),
    ]

    config.add_extension(
        "_multiarray_umath",
        sources=multiarray_src + umath_src + npymath_sources + common_src + [
            generate_config_h,
            generate_numpyconfig_h,
            generate_numpy_api,
            join(codegen_dir, "generate_numpy_api.py"),
            join("*.py"),
            generate_umath_c,
            generate_ufunc_api,
        ],
        depends=deps + multiarray_deps + umath_deps + common_deps,
        libraries=["npymath", "npysort"],
        extra_info=extra_info,
    )

    #######################################################################
    #                        umath_tests module                           #
    #######################################################################

    config.add_extension("_umath_tests",
                         sources=[join("src", "umath", "_umath_tests.c.src")])

    #######################################################################
    #                   custom rational dtype module                      #
    #######################################################################

    config.add_extension(
        "_rational_tests",
        sources=[join("src", "umath", "_rational_tests.c.src")])

    #######################################################################
    #                        struct_ufunc_test module                     #
    #######################################################################

    config.add_extension(
        "_struct_ufunc_tests",
        sources=[join("src", "umath", "_struct_ufunc_tests.c.src")],
    )

    #######################################################################
    #                        operand_flag_tests module                    #
    #######################################################################

    config.add_extension(
        "_operand_flag_tests",
        sources=[join("src", "umath", "_operand_flag_tests.c.src")],
    )

    config.add_data_dir("tests")
    config.add_data_dir("tests/data")

    config.make_svn_version_py()

    return config
Example #50
0
def console(start_time):
    print("=========> You Have Logged In as Root")
    auti.sleep(2)
    print("")
    print("We trust you have received the usual lecture from dev_bides_mega")
    auti.sleep(1)
    print("It usually boils down to these three things:")
    auti.sleep(1)
    print("")
    auti.sleep(1)
    print("    #1) Respect the privacy of others.")
    auti.sleep(1)
    print("    #2) Think before you type.")
    auti.sleep(1)
    print("    #3) With great power comes great responsibility.")
    auti.sleep(1)
    print("")
    print("")
    auti.sleep(7)
    graffiti()
    auti.sleep(1)
    print(
        "______________________________________________________________________________________________________________________________"
    )
    auti.sleep(1)
    print("")
    auti.sleep(1)
    print("")
    auti.sleep(1)
    while True:
        cmd = input("root@Konsole >> ")
        stop_time = time()
        if cmd.lower() == "exit":
            print("Bye Bye !!!!!")
            break
        if cmd.lower() == "sysinfo":
            print("CPU : " + platform.processor())
            print("Python Version : " + platform.python_version())
            # print("Python Build : "+platform.python_build())
            print("Python Compiler : " + platform.python_compiler())
            print("System Date : " + date.today().strftime("%d/%m/%y"))
            if int(datetime.now().strftime("%H")) > 12:
                h = int(datetime.now().strftime("%H")) - 12
                postfix = " PM"
            else:
                h = int(datetime.now().strftime("%H"))
                postfix = " AM"
            print("System Time : " + str(h) + ":" +
                  datetime.now().strftime("%M:%S") + postfix)
            print("System Up-Time : " + str(int(stop_time - start_time)) +
                  " Seconds")
        if cmd.lower() == "clear":
            for a in range(0, 15):
                print("\n")
        if cmd.lower() == "cashinfo":
            a = Core_Data(0, 0, 0, 0, 0, 0, 0)
            array = a.Cash_Load()
            for c in range(len(array)):
                print("Rupees " + str(notes[c]) + " = " + str(array[c]) +
                      " pcs")
        if cmd.lower() == "fun" or cmd.lower() == "graffiti":
            graffiti()
        if cmd.lower() == "help":
            print("sysinfo - This prints basic conditions of the program")
            print("clear - Clears the Terminal screen mess")
            print("fun - Prints the owner's beautiful graffiti")
            print("graffiti - Prints the owner's beautiful graffiti")
            print("cashinfo - Prints the current cash data in the machine")
            print("exit - Exit's the root console")
Example #51
0
def test(verbosity=1,
         calculators=[],
         dir=None,
         display=True,
         stream=sys.stdout):
    test_calculator_names.extend(calculators)
    disable_calculators(
        [name for name in calc_names if name not in calculators])
    ts = unittest.TestSuite()
    if dir is None:
        # ase/test (__path__[0])
        testdir = __path__[0]
    else:
        if os.path.isdir(dir):
            # absolute path
            testdir = dir
        else:
            # relative to ase/test (__path__[0])
            testdir = os.path.join(__path__[0], dir)
    files = glob(testdir + '/*')
    sdirtests = []  # tests from subdirectories: only one level assumed
    tests = []
    for f in files:
        if os.path.isdir(f):
            # add test subdirectories (like calculators)
            sdirtests.extend(glob(os.path.join(testdir, f) + '/*.py'))
        else:
            # add py files in testdir
            if f.endswith('.py'):
                tests.append(f)
    tests.sort()
    sdirtests.sort()
    tests.extend(sdirtests)  # run test subdirectories at the end
    lasttest = None  # is COCu111.py in the current set
    for test in tests:
        if test.endswith('vtk_data.py'):
            continue
        if test.endswith('__init__.py'):
            continue
        if test.endswith('COCu111.py'):
            lasttest = test
            continue
        ts.addTest(ScriptTestCase(filename=test, display=display))
    if lasttest:
        ts.addTest(ScriptTestCase(filename=lasttest, display=display))

    operating_system = platform.system() + ' ' + platform.machine()
    operating_system += ' ' + ' '.join(platform.dist())
    python = platform.python_version() + ' ' + platform.python_compiler()
    python += ' ' + ' '.join(platform.architecture())
    print 'python %s on %s' % (python, operating_system)

    from ase.utils import devnull
    sys.stdout = devnull

    ttr = unittest.TextTestRunner(verbosity=verbosity, stream=stream)
    results = ttr.run(ts)

    sys.stdout = sys.__stdout__

    return results
Example #52
0
'''
Created on 2016/3/3 16:44

@author: sodbvi
'''

import platform

profile = {
    'Architecture: ': platform.architecture(),
    'Linux Distribution: ': platform.linux_distribution(),
    'mac_ver: ': platform.mac_ver(),
    'machine: ': platform.machine(),
    'node: ': platform.node(),
    'platform: ': platform.platform(),
    'processor: ': platform.processor(),
    'python build: ': platform.python_build(),
    'python compiler: ': platform.python_compiler(),
    'python version: ': platform.python_version(),
    'release: ': platform.release(),
    'system: ': platform.system(),
    'uname: ': platform.uname(),
    'version: ': platform.version(),
}

for key in profile:
    print(key + str(profile[key]))
Example #53
0
        try:
            self.counter[0] += 1
        except (Exception):
            pass

    def incFailed(self):
        try:
            self.counter[1] += 1
        except (Exception):
            pass


os.system('cls' if os.name == 'nt' else 'clear')
print time.strftime("%b %d %Y %H:%M:%S")
print 'Python Version: ', platform.python_version()
print 'Compiler: ', platform.python_compiler()
print 'Operating system: ', platform.platform()
print 'Initiating monitor'
print \
"""
            @@@@@@@@@
                 __\_\__
     ____________|_____|_____________
      \                            /
       \      O   O   O   O       /    
|^^^^^^^\________________________/^^|
i-----------------------------------I
| /$$                 /$$           |                    
|| $$                | $$           |
|| $$       /$$   /$$| $$ /$$$$$$$$||
|| $$      | $$  | $$| $$|____ /$$/ |
Example #54
0
# Initialize some variables.
#
pynio_vfile = "pynio_version.py"         # PyNIO version file.
pkgs_pth    = get_python_lib()

#
# These variables are temporarily defined for readability.
# The default FORTRAN_CALLING_METHOD is APPEND_UNDERSCORE.
#

APPEND_UNDERSCORE = 1
NO_APPEND_UNDERSCORE = 2
CAPS_NO_APPEND_UNDERSCORE = 3

if sys.platform == "linux2" and os.uname()[-1] == "x86_64" and \
    platform.python_compiler()[:5] == "GCC 4":
    LIBRARIES.append('gfortran')

elif sys.platform == "irix6-64":
    LIBRARIES.append('ftn')
    LIBRARIES.append('fortran')
    LIBRARIES.append('sz')

elif sys.platform == "sunos5":
    LIBRARIES.append('fsu')
    LIBRARIES.append('sunmath')

elif sys.platform == "aix5":
    os.putenv('OBJECT_MODE',"64")
    LIBRARIES.append('xlf90')
    LIB_MACROS.append(('FORTRAN_CALLING_METHOD', NO_APPEND_UNDERSCORE))
import platform
print(platform.python_compiler())
Example #56
0
    match = re.search(r"^GNU gdb.*?\b(\d+)\.(\d+)", version)
    if match is None:
        raise Exception("unable to parse GDB version: %r" % version)
    return (version, int(match.group(1)), int(match.group(2)))

gdb_version, gdb_major_version, gdb_minor_version = get_gdb_version()
if gdb_major_version < 7:
    raise unittest.SkipTest("gdb versions before 7.0 didn't support python "
                            "embedding. Saw %s.%s:\n%s"
                            % (gdb_major_version, gdb_minor_version,
                               gdb_version))

if not sysconfig.is_python_build():
    raise unittest.SkipTest("test_gdb only works on source builds at the moment.")

if 'Clang' in platform.python_compiler() and sys.platform == 'darwin':
    raise unittest.SkipTest("test_gdb doesn't work correctly when python is"
                            " built with LLVM clang")

if ((sysconfig.get_config_var('PGO_PROF_USE_FLAG') or 'xxx') in
    (sysconfig.get_config_var('PY_CORE_CFLAGS') or '')):
    raise unittest.SkipTest("test_gdb is not reliable on PGO builds")

# Location of custom hooks file in a repository checkout.
checkout_hook_path = os.path.join(os.path.dirname(sys.executable),
                                  'python-gdb.py')

PYTHONHASHSEED = '123'


def cet_protection():
Example #57
0
    if __name__ == '__main__':
        # Create a multiprocessing Pool
        pool = mp.Pool()
        pool.map(collector.analyze, period)

    print("[%.4fs] Analyzing complete. Zipping dictionaries..." % (elapsed()))

    dictionary.zipper(start_month, start_year, end_month, end_year)

    final_time = datetime.timedelta(seconds=elapsed())

    print("[%.4fs] ThaiTrend: Emoji has finished in %s " %
          (elapsed(), str(final_time)))
    print("Next: Run graph.py to visualize data into graphs.")


START_TIME = time.time()
print("============ ThaiTrend: Emoji (v1.1) is running... ============")
print()
print("================= Current machine information =================")
print('** Python version\t:', platform.python_version())
print('** Compiler\t\t:', platform.python_compiler())
print('** System\t\t:', platform.system(), platform.release())
print('** Processor\t\t:', platform.processor())
print('** CPU counts\t\t:', mp.cpu_count())
print('** Interpreter\t\t:', platform.architecture()[0])
print()

main()
Example #58
0
from __future__ import division, print_function

import os
import platform
import sys
from os.path import join

from numpy.distutils.system_info import platform_bits

is_msvc = (platform.platform().startswith('Windows')
           and platform.python_compiler().startswith('MS'))


def configuration(parent_package='', top_path=None):
    from numpy.distutils.misc_util import Configuration, get_mathlibs
    config = Configuration('random', parent_package, top_path)

    def generate_libraries(ext, build_dir):
        config_cmd = config.get_config_cmd()
        libs = get_mathlibs()
        if sys.platform == 'win32':
            libs.extend(['Advapi32', 'Kernel32'])
        ext.libraries.extend(libs)
        return None

    # enable unix large file support on 32 bit systems
    # (64 bit off_t, lseek -> lseek64 etc.)
    if sys.platform[:3] == "aix":
        defs = [('_LARGE_FILES', None)]
    else:
        defs = [('_FILE_OFFSET_BITS', '64'), ('_LARGEFILE_SOURCE', '1'),
Example #59
0
def get_python_compiler():
    '''Returns a string identifying the compiler used for compiling Python'''
    return platform.python_compiler()
# Read in list of modules from cmd line
mod_list = sys.argv[1:]

# Get version info for each module in mod_list
mod_info = []
for mod in mod_list:
    mod_info.append(get_distribution(mod))

# Print all information

print '\nGeneral Information...'
print 'Author: {0}'.format(author)
print 'Date: {0}'.format(time_stamp)

print '\nPython Information...'
print 'CPython: {0}'.format(platform.python_version())
print 'IPython: {0}'.format(IPython.__version__)

print '\nModule Information...'
for mod in range(len(mod_info)):
    print mod_info[mod]

print '\nSystem Information...'
print 'Compiler: {0}'.format(platform.python_compiler())
print 'System: {0}'.format(platform.system())
print 'Release: {0}'.format(platform.release())
print 'Machine: {0}'.format(platform.machine())
print 'Processor: {0}'.format(platform.processor())
print 'CPU Cores: {0}'.format(cpu_count())
print 'Interpreter: {0}'.format(platform.architecture()[0])