Esempio n. 1
0
    def test_babel(self):
        ureg = UnitRegistry()
        dirname = os.path.dirname(__file__)
        ureg.load_definitions(os.path.join(dirname, '../xtranslated.txt'))

        distance = 24.0 * ureg.meter
        self.assertEqual(
            distance.format_babel(locale='fr_FR', length='long'),
            "24.0 mètres"
        )
        time = 8.0 * ureg.second
        self.assertEqual(
            time.format_babel(locale='fr_FR', length='long'),
            "8.0 secondes"
        )
        self.assertEqual(
            time.format_babel(locale='ro', length='short'),
            "8.0 s"
        )
        acceleration = distance / time ** 2
        self.assertEqual(
            acceleration.format_babel(locale='fr_FR', length='long'),
            "0.375 mètre par seconde²"
        )
        mks = ureg.get_system('mks')
        self.assertEqual(
            mks.format_babel(locale='fr_FR'),
            "métrique"
        )
Esempio n. 2
0
def td_to_mat_file(td_data: Any, real_units: Optional[bool]=True, fname: Optional[str]=None) -> None:
    """Export DataSet created by microscope.td_cap to .mat file for analysis.

    Args:
        td_data: qcodes DataSet created by Microscope.td_cap
        real_units: If True, converts data from DAQ voltage into
            units specified in measurement configuration file.
        fname: File name (without extension) for resulting .mat file.
            If None, uses the file name defined in measurement configuration file.
    """
    from pint import UnitRegistry
    ureg = UnitRegistry()
    ureg.load_definitions('./squid_units.txt')
    Q_ = ureg.Quantity
    meta = td_data.metadata['loop']['metadata']
    arrays = td_to_arrays(td_data, ureg=ureg, real_units=real_units)
    mdict = {}
    for name, arr in arrays.items():
        if name is not 'height':
            unit = meta['channels'][name]['unit'] if real_units else 'V'
            mdict.update({name: {'array': arr.to(unit).magnitude, 'unit': unit}})
    mdict.update({'height': {'array': arrays['height'].to('V').magnitude, 'unit': 'V'}})
    mdict.update({
        'prefactors': meta['prefactors'],
        'location': td_data.location
        })
    try:
        mdict.update({'td_height': td_data.metadata['loop']['metadata']['td_height']})
    except KeyError:
        pass
    if fname is None:
        fname = meta['fname']
    fpath = td_data.location + '/'
    io.savemat(next_file_name(fpath + fname, 'mat'), mdict)
Esempio n. 3
0
def _load_pint_units() -> UnitRegistry:
    """Missing units found in project-haystack Added to the registry"""
    unit_ureg = UnitRegistry(on_redefinition='ignore')
    unit_ureg.load_definitions(
        os.path.join(os.path.dirname(__file__), 'haystack_units.pint'))
    unit_ureg.define(
        UnitDefinition('%', 'percent', (), ScaleConverter(1 / 100.0)))
    return unit_ureg
Esempio n. 4
0
def compute_allocated_resources():
    ureg = UnitRegistry()
    ureg.load_definitions('kubernetes_units.txt')

    Q_   = ureg.Quantity
    data = {}

    # doing this computation within a k8s cluster
 #   config.load_incluster_config()
    config.load_kube_config()
    core_v1 =client.CoreV1Api()

    for node in core_v1.list_node().items:
        stats          = {}
        node_name      = node.metadata.name
        allocatable    = node.status.allocatable
        print allocatable 
        max_pods       = int(int(allocatable["pods"]) * 1.5)
        field_selector = ("status.phase!=Succeeded,status.phase!=Failed," +
                          "spec.nodeName=" + node_name)

        stats["cpu_alloc"] = Q_(allocatable["cpu"])
        print stats["cpu_alloc"]
        stats["mem_alloc"] = Q_(allocatable["memory"])

        pods = core_v1.list_pod_for_all_namespaces(limit=max_pods,
                                                   field_selector=field_selector).items

        # compute the allocated resources
        cpureqs,cpulmts,memreqs,memlmts = [], [], [], []
        for pod in pods:
            for container in pod.spec.containers:
                res  = container.resources
                reqs = defaultdict(lambda: 0, res.requests or {})
                lmts = defaultdict(lambda: 0, res.limits or {})
                cpureqs.append(Q_(reqs["cpu"]))
                memreqs.append(Q_(reqs["memory"]))
                cpulmts.append(Q_(lmts["cpu"]))
                memlmts.append(Q_(lmts["memory"]))

        stats["cpu_req"]     = sum(cpureqs)
        print stats["cpu_req"]
        stats["cpu_lmt"]     = sum(cpulmts)
        print stats["cpu_lmt"]
        stats["cpu_req_per"] = (stats["cpu_req"] / stats["cpu_alloc"] * 100)
        print stats["cpu_alloc"]
        print stats["cpu_req_per"]
        stats["cpu_lmt_per"] = (stats["cpu_lmt"] / stats["cpu_alloc"] * 100)
        stats["mem_req"]     = sum(memreqs)
        stats["mem_lmt"]     = sum(memlmts)
        stats["mem_req_per"] = (stats["mem_req"] / stats["mem_alloc"] * 100)
        stats["mem_lmt_per"] = (stats["mem_lmt"] / stats["mem_alloc"] * 100)

        data[node_name] = stats

    return data
Esempio n. 5
0
def test_registry_locale():
    ureg = UnitRegistry(fmt_locale="fr_FR")
    dirname = os.path.dirname(__file__)
    ureg.load_definitions(os.path.join(dirname, "../xtranslated.txt"))

    distance = 24.0 * ureg.meter
    assert distance.format_babel(length="long") == "24.0 mètres"
    time = 8.0 * ureg.second
    assert time.format_babel(length="long") == "8.0 secondes"
    assert time.format_babel(locale="ro", length="short") == "8.0 s"
    acceleration = distance / time ** 2
    assert acceleration.format_babel(length="long") == "0.375 mètre par seconde²"
    mks = ureg.get_system("mks")
    assert mks.format_babel(locale="fr_FR") == "métrique"
Esempio n. 6
0
    def test_registry_locale(self):
        ureg = UnitRegistry(fmt_locale="fr_FR")
        dirname = os.path.dirname(__file__)
        ureg.load_definitions(os.path.join(dirname, "../xtranslated.txt"))

        distance = 24.0 * ureg.meter
        self.assertEqual(distance.format_babel(length="long"), "24.0 mètres")
        time = 8.0 * ureg.second
        self.assertEqual(time.format_babel(length="long"), "8.0 secondes")
        self.assertEqual(time.format_babel(locale="ro", length="short"),
                         "8.0 s")
        acceleration = distance / time**2
        self.assertEqual(acceleration.format_babel(length="long"),
                         "0.375 mètre par seconde²")
        mks = ureg.get_system("mks")
        self.assertEqual(mks.format_babel(locale="fr_FR"), "métrique")
Esempio n. 7
0
def scan_to_mat_file(scan_data: Any, real_units: Optional[bool]=True, xy_unit: Optional[bool]=None,
    fname: Optional[str]=None, interpolator: Optional[Callable]=None) -> None:
    """Export DataSet created by microscope.scan_surface to .mat file for analysis.

    Args:
        scan_data: qcodes DataSet created by Microscope.scan_plane
        real_units: If True, converts z-axis data from DAQ voltage into
            units specified in measurement configuration file.
        xy_unit: String describing quantity with dimensions of length.
            If xy_unit is not None, scanner x, y DAQ ao voltage will be converted to xy_unit
            according to scanner constants defined in microscope configuration file.
        fname: File name (without extension) for resulting .mat file.
            If None, uses the file name defined in measurement configuration file.
        interpolator: Instance of scipy.interpolate.Rbf, used to interpolate touchdown points.
            Default: None.
    """
    from pint import UnitRegistry
    ureg = UnitRegistry()
    ureg.load_definitions('./squid_units.txt')
    Q_ = ureg.Quantity
    meta = scan_data.metadata['loop']['metadata']
    arrays = scan_to_arrays(scan_data, ureg=ureg, real_units=real_units, xy_unit=xy_unit)
    mdict = {}
    for name, arr in arrays.items():
        if real_units:
            if xy_unit:
                unit = meta['channels'][name]['unit'] if name.lower() not in ['x', 'y'] else xy_unit
            else: 
                unit = meta['channels'][name]['unit'] if name.lower() not in ['x', 'y'] else 'V'
        else:
            unit = 'V'
        if meta['fast_ax'] == 'y':
            arr = arr.T
        mdict.update({name: {'array': arr.to(unit).magnitude, 'unit': unit}})
    if interpolator is not None:
        surf =  interpolator(arrays['X'], arrays['Y'])
        surf = surf if meta['fast_ax'] == 'x' else surf.T
        mdict.update({'surface': {'array': surf, 'unit': 'V'}})
    mdict.update({'prefactors': meta['prefactors'], 'location': scan_data.location})
    if fname is None:
        fname = meta['fname']
    fpath = scan_data.location + '/'
    io.savemat(next_file_name(fpath + fname, 'mat'), mdict)
__all__ = ('chemical_units_of_measure', 'stream_units_of_measure', 'ureg',
           'get_dimensionality', 'DisplayUnits', 'AbsoluteUnitsOfMeasure',
           'convert', 'Quantity', 'format_plot_units')

from .exceptions import DimensionError

# %% Import unit registry

from pint import UnitRegistry
from pint.quantity import to_units_container
import os

# Set pint Unit Registry
ureg = UnitRegistry()
ureg.default_format = '~P'
ureg.load_definitions(
    os.path.dirname(os.path.realpath(__file__)) + '/units_of_measure.txt')
convert = ureg.convert
Quantity = ureg.Quantity
del os, UnitRegistry

# %% Functions


def format_degrees(units):
    if units.startswith('deg'):
        units = '^\circ ' + units[3:]
    return units


def format_units(units, isnumerator=True):
    if '^' in units:
Esempio n. 9
0
"""

from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from os.path import join, dirname, abspath, exists
from pint import UnitRegistry, DimensionalityError

ureg = UnitRegistry()
_units_file = abspath(join(dirname(__file__), 'units.txt'))
# make sure file exists
if not exists(_units_file):
    raise AssertionError(
        "Couldn't find units file in : {0}".format(_units_file))
ureg.load_definitions(_units_file)
Q_ = ureg.Quantity

# %% Pint aware arrays


class uarray(np.ndarray):
    ''' Unit-aware array based on Pint

    Example
    -------

    >>> a = uarray(np.linspace(10, 100, 10), 'Td')

    '''
Esempio n. 10
0
import os
from pint import UnitRegistry
ureg = UnitRegistry()

dir_path = os.path.dirname(os.path.realpath(__file__))
path_to_unit_defs_file = os.path.join(dir_path, 'unit_defs.txt')
ureg.load_definitions(path_to_unit_defs_file)
Esempio n. 11
0
"""Implements all the reading and translation of facility specific
data streams into a unified format that can be used by the analysis
package."""
from __future__ import print_function, absolute_import  # Compatibility with python 2 and 3
import os
from pint import UnitRegistry
from .worker import Worker  # pylint: disable=unused-import
from .event_translator import EventTranslator  # pylint: disable=unused-import
from .record import Record, add_record  # pylint: disable=unused-import

ureg = UnitRegistry()
ureg.enable_contexts('spectroscopy')
ureg.default_format = '~'
ureg.load_definitions(os.path.join(os.path.dirname(__file__), 'units.txt'))
Esempio n. 12
0
def compute_allocated_resources(spotWorkerNodeLabel, onDemandWorkerNodeLabel):
    labelSelectors = {
        "spot": spotWorkerNodeLabel,
        "on-demand": onDemandWorkerNodeLabel
    }
    ureg = UnitRegistry()
    ureg.load_definitions('kubernetes_units.txt')

    Q_ = ureg.Quantity
    data = {}

    for instanceType in labelSelectors:
        data[instanceType] = []
        for node in core_v1.list_node(
                label_selector=labelSelectors[instanceType]).items:
            stats = {}
            node_name = node.metadata.name
            allocatable = node.status.allocatable
            max_pods = int(int(allocatable["pods"]) * 1.5)
            field_selector = ("status.phase!=Succeeded,status.phase!=Failed," +
                              "spec.nodeName=" + node_name)

            stats["cpu_alloc"] = Q_(allocatable["cpu"])
            stats["mem_alloc"] = Q_(allocatable["memory"])

            pods = core_v1.list_pod_for_all_namespaces(
                limit=max_pods, field_selector=field_selector).items
            clusterOverprovisionerStats = {"cpu_req": 0, "mem_req": 0}
            # compute the allocated resources
            cpureqs, cpulmts, memreqs, memlmts = [], [], [], []
            podsStats = []
            for pod in pods:
                podCpuReqs = []
                podMemReqs = []
                for container in pod.spec.containers:
                    res = container.resources
                    reqs = defaultdict(lambda: 0, res.requests or {})
                    lmts = defaultdict(lambda: 0, res.limits or {})
                    cpureqs.append(Q_(reqs["cpu"]))
                    memreqs.append(Q_(reqs["memory"]))
                    cpulmts.append(Q_(lmts["cpu"]))
                    memlmts.append(Q_(lmts["memory"]))
                    podCpuReqs.append(Q_(reqs["cpu"]))
                    podMemReqs.append(Q_(reqs["memory"]))
                podsStats.append({
                    "cpu_req": sum(podCpuReqs),
                    "mem_req": sum(podMemReqs),
                    "spec": pod,
                    "name": pod.metadata.name
                })
                if pod.metadata.labels and pod.metadata.labels.get(
                        "app.kubernetes.io/instance"
                ) == "cluster-overprovisioner":
                    clusterOverprovisionerStats = {
                        "cpu_req":
                        clusterOverprovisionerStats["cpu_req"] +
                        sum(podCpuReqs),
                        "mem_req":
                        clusterOverprovisionerStats["mem_req"] +
                        sum(podMemReqs)
                    }
            podsStats.sort(key=lambda x: x["cpu_req"], reverse=True)

            stats["cpu_req"] = sum(cpureqs)
            stats["cpu_lmt"] = sum(cpulmts)
            stats["cpu_req_per"] = (stats["cpu_req"] / stats["cpu_alloc"] *
                                    100).to('dimensionless')
            stats["cpu_lmt_per"] = (stats["cpu_lmt"] / stats["cpu_alloc"] *
                                    100).to('dimensionless')

            stats["mem_req"] = sum(memreqs)
            stats["mem_lmt"] = sum(memlmts)
            stats["mem_req_per"] = (stats["mem_req"] / stats["mem_alloc"] *
                                    100).to('dimensionless')
            stats["mem_lmt_per"] = (stats["mem_lmt"] / stats["mem_alloc"] *
                                    100).to('dimensionless')
            stats["cpu_free"] = stats["cpu_alloc"] - stats[
                "cpu_req"] + clusterOverprovisionerStats["cpu_req"]
            stats["mem_free"] = stats["mem_alloc"] - stats[
                "mem_req"] + clusterOverprovisionerStats["mem_req"]
            dataEntry = {
                "name": node_name,
                "spec": node,
                "node-stats": stats,
                "pods": podsStats
            }
            data[instanceType].append(dataEntry)

        ### The effect of this algorithm should be, that we take the emptiest on demand nodes first
        ### and empty those before we empty a node which is busier, thus resulting in the highest number
        ### of 'empty' nodes that can be removed from the cluster.

        ### Sort on-demand instances by least requested CPU
        if instanceType == "on-demand":
            data[instanceType].sort(key=lambda x: x["node-stats"]["cpu_req"])
        ### Sort spot instances by most free CPU
        else:
            data[instanceType].sort(key=lambda x: x["node-stats"]["cpu_free"],
                                    reverse=True)
    return data
Esempio n. 13
0
## Units handling
# per the pint documentation, it's important that pint and its associated Unit
# Registry are only imported once.
from pint import UnitRegistry
# here we assign the identifier 'unit' to the UnitRegistry
unit = UnitRegistry()

#use this to enable legacy handling of offset units
# TODO fix this to handle offsets the way pint wants us to since 0.7
unit.autoconvert_offset_to_baseunit = True

# append custom unit definitions and contexts
import os
directory = os.path.dirname(__file__)
unit.load_definitions(directory+'/pint_custom_units.txt') 
# activate the "chemistry" context globally
unit.enable_contexts('chem')
# set the default string formatting for pint quantities
unit.default_format = 'P~'

def testfunc(val):
    list = []
    try:
        list.append(float(val) * unit(''))
        return list
    except ValueError:
        print('Value Error')
        return None

Esempio n. 14
0
import os
from django.conf import settings
from django.shortcuts import _get_queryset
from django.utils.http import urlencode
from django.urls import reverse
from pint import UnitRegistry
import qrcode

ureg = UnitRegistry()
ureg.load_definitions(
    os.path.join(settings.BASE_DIR, 'common/unit_definitions/units.txt'))
Q_ = ureg.Quantity


def query_reverse(viewname, kwargs=None, query_kwargs=None):
    """
    Custom reverse to add a query string after the url
    Example usage:
    url = my_reverse('my_test_url', kwargs={'pk': object.id}, query_kwargs={'next': reverse('home')})
    """
    url = reverse(viewname, kwargs=kwargs)

    if query_kwargs:
        # remove items with value None
        query_kwargs = {k: v for k, v in query_kwargs.items() if v is not None}

    if query_kwargs:
        return u'%s?%s' % (url, urlencode(query_kwargs))

    return url
Esempio n. 15
0
    :platform: Unix, Windows
    :synopsis: gestion du temps

.. moduleauthor:: Gaël PICOT <*****@*****.**>
'''
from enum import Enum
from pint import UnitRegistry
import os
import math


current_rep = os.path.abspath(os.path.split(__file__)[0])

ureg = UnitRegistry()
ureg.load_definitions(os.path.join(current_rep, 'unit_reg.txt'))


class heures(Enum):
    """ enumeration des heures de reve de dragon
    """
    vaisseau = 1
    sirene = 2
    faucon = 3
    couronne = 4
    dragon = 5
    epee = 6
    lyre = 7
    serpent = 8
    poisson_ac = 9
    araigne = 10
Esempio n. 16
0
from urllib.request import Request, urlopen
import pandas as pd
import json
import warnings
from pint import UnitRegistry, UndefinedUnitError, DimensionalityError
ureg = UnitRegistry()
ureg.load_definitions('./Data/food_units.txt') 
import numpy as np

#%matplotlib inline

import requests

def ndb_search(apikey, term, url = 'https://api.nal.usda.gov/ndb/search'):
    """
    Search Nutrition DataBase, using apikey and string "term" as search criterion.

    Returns a pd.DataFrame of results.
    """
    parms = (('format', 'json'),('q', term),('api_key', apikey))
    r = requests.get(url, params = parms)
    if 'list' in r.json():
        l = r.json()['list']['item']
    else: 
        return []

    return pd.DataFrame(l)

def ndb_report(apikey, ndbno, url = 'https://api.nal.usda.gov/ndb/V2/reports'):
    """Construct a food report for food with given ndbno.  
Esempio n. 17
0
"""This module deals with units conversion in the ROSS library."""
import inspect
import warnings
from functools import wraps
from pathlib import Path

from pint import Quantity, UnitRegistry

with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    Quantity([])

new_units_path = Path(__file__).parent / "new_units.txt"
ureg = UnitRegistry()
ureg.load_definitions(str(new_units_path))
Q_ = ureg.Quantity

__all__ = ["Q_", "check_units"]

units = {
    "E": "N/m**2",
    "G_s": "N/m**2",
    "rho": "kg/m**3",
    "L": "meter",
    "idl": "meter",
    "idr": "meter",
    "odl": "meter",
    "odr": "meter",
    "speed": "radian/second",
    "frequency": "radian/second",
    "frequency_range": "radian/second",
from past.builtins import basestring

import operator
import copy
from os.path import join, abspath, dirname

import numpy as np
from pint import UnitRegistry, set_application_registry, DimensionalityError, UndefinedUnitError

from ..utils import ResizableArray


# Set up pint's unit definitions
ureg = UnitRegistry()
unit_def_file = join(abspath(dirname(__file__)), '..', '_static_data','pint_atomic_units.txt')
ureg.load_definitions(unit_def_file)
ureg.default_system = 'nano'
set_application_registry(ureg)


class MdtUnit(ureg.Unit):
    """
    Pickleable version of pint's Unit class.
    """
    def __reduce__(self):
        return _get_unit, (str(self),)

    @property
    def units(self):
        return self
Esempio n. 19
0
class Converter(object):
    def __init__(self, definitions=None, separator='>', precision=None):
        from pint import UnitRegistry

        self.ureg = UnitRegistry()
        self.ureg.load_definitions('unit_defs.txt')
        self.load_definitions(definitions)
        self.separator = separator
        self.precision = precision

    def load_definitions(self, definitions):
        if not definitions:
            return

        if not isinstance(definitions, (list, tuple)):
            definitions = [definitions]
        for d in definitions:
            LOG.info('loading definitions from %s', d)
            self.ureg.load_definitions(d)

    def convert(self, query):
        from pint.unit import DimensionalityError
        import re

        Q_ = self.ureg.Quantity

        # step 1: split the query into an input value and output units at a
        # self.separator
        value, sep, units = query.partition(self.separator)
        value = re.sub(r'([A-Za-z]) ([A-Za-z])', r'\1_\2', value.strip())
        units = re.sub(r'(\w) (\w)', r'\1_\2', units.strip())

        LOG.debug('query: %s', query)
        LOG.debug('input: %s', value)
        LOG.debug('units: %s', units)

        in_val = Q_(value)
        out_units = Q_(units.replace(' ', '_'))

        try:
            out_val = in_val.to(out_units)
        except DimensionalityError, e:
            if str(e.units1) in SPECIAL:
                in_val2 = in_val.magnitude * Q_(SPECIAL[str(e.units1)])
                out_val = in_val2.to(out_units)
            elif str(e.units2) in SPECIAL:
                out_units2 = Q_(SPECIAL[str(e.units2)])
                out_val = in_val.to(out_units2)
            else:
                raise

        LOG.debug(u'converted {0} to {1:P}'.format(in_val, out_val))

        magnitude = out_val.magnitude
        units = out_val.units

        if self.precision:
            from decimal import Decimal
            rval = Decimal('1.' + '0' * self.precision)
            magnitude = Decimal(out_val.magnitude).quantize(rval)

        return (magnitude, u'{0} {1}'.format(magnitude, units))
Esempio n. 20
0
from pint import UnitRegistry
from os import getcwd, path
from .backend import *

if path.exists(path.join(getcwd(), "lib")):
    ruta = path.join(getcwd(), 'lib', 'engine')
else:
    ruta = path.join(getcwd(), 'engine')

ureg = UnitRegistry()
ureg.load_definitions(path.join(ruta, 'unit_definitions.txt'))
q = ureg.Quantity

material_densities = abrir_json(path.join(ruta, 'material_densities.json'))
molecular_weight = abrir_json(path.join(ruta, 'molecular_weight.json'))
recomendation = abrir_json(path.join(ruta, 'recomendation.json'))
Esempio n. 21
0
#!/usr/bin/env python3

import yaml
import re
import sys
import os
import pdb
from pint import UnitRegistry
from argparse import ArgumentParser

compile_pandoc = True

ureg = UnitRegistry()
ureg.load_definitions('unit_registry.txt')
# 1 tsp smoked paprika = 3.3g


def get_args():
    """
    Read the arguments and return them to main.
    """
    parser = ArgumentParser(description="Compile a menu and calculate "
                            "the required quantities.")
    parser.add_argument('menu', nargs='?', help="YAML file describing menu.")
    parser.add_argument('-o',
                        '--output',
                        default="output",
                        help="Output directory")
    parser.add_argument(
        '-v',
        '--verbose',
Esempio n. 22
0
import numpy as np
from math import ceil
from dataclasses import dataclass, replace

from pint import UnitRegistry
from pint.definitions import UnitDefinition
from pint.converters import ScaleConverter

from typing import Callable, Tuple, Optional

ureg = UnitRegistry()

ureg.define(UnitDefinition('percent', None, (), ScaleConverter(1 / 100)))
ureg.load_definitions('units.txt')
Q = ureg.Quantity


@dataclass
class Mortgage(object):
    loan_amount: Q = Q(500_000, 'dollars')
    home_value: Q = Q(600_000, 'dollars')
    duration: Q = Q(10, 'years')
    base_rate: Q = Q(4, 'percent') / Q(1, 'year')
    base_closing_costs: Q = Q(5_000, 'dollars')

    #: The cost as a ratio of the loan amount for each point.
    #: See the CFPB for more detail:
    #:
    #:     https://www.consumerfinance.gov/ask-cfpb/what-are-discount-points-and-lender-credits-and-how-do-they-work-en-136/
    #:
    cost_per_point: Q = Q(1, 'percent / dp')
Esempio n. 23
0
#

import sys
import importlib.resources as pkg_resources
from pint import UnitRegistry

from PySide6.QtWidgets import QApplication

from ocvl.FeederGUI import PygmyFeeder


class PygmyMetricks():
    def __init__(self):
        super().__init__()


# Press the green button in the gutter to run the script.
if __name__ == '__main__':

    ureg = UnitRegistry()
    with pkg_resources.path("ocvl", "ocvl-pint-defs.txt") as pint_path:
        deffile = open(pint_path, "r")
        ureg.load_definitions(deffile)

    app = QApplication([])
    widget = PygmyFeeder()
    widget.resize(800, 600)
    widget.show()

    sys.exit(app.exec())
Esempio n. 24
0
## Imports the class Nuclide from the NuclideClass.py

#from NuclideClass import Nuclide

## Brings in the Unit Registry from the pint module. The pints module needs to be added in the "project interprer"
## in Pycharm or using pip. This deals automatically with any units added.
## This adds the definitions for years as 'y' as well as 'yr' from mydef.txt which is stored in the proj folder

from pint import UnitRegistry
ureg = UnitRegistry()
ureg.load_definitions('my_def.txt')

## Sets these values to quantites used by pint
A_ = ureg.Quantity
B_ = ureg.Quantity
C_ = ureg.Quantity

## This is the user input section

## This script constructs the quantity for halflife to make sure it is valid. If not it quits and drops an error.

common_name = input("Common Name: ")

try:
    half_life = float(input("Enter the halflife of the nuclide: "))
    hl_units = input("Half-life units i.e. s, h, d, m, y etc: ")
    full_HL = C_(half_life, hl_units)
except:
    print(
        "The half-life input is not recognised, maybe you entered incorrect units, please try again."
    )
Esempio n. 25
0
"wraps pint in gpkit monomials"
import os
try:
    from pint import UnitRegistry, DimensionalityError

    ureg = UnitRegistry()  # pylint: disable=invalid-name
    ureg.load_definitions(os.sep.join([os.path.dirname(__file__),
                                       "usd_cpi.txt"]))
    # next line patches https://github.com/hgrecco/pint/issues/366
    ureg.define("nautical_mile = 1852 m = nmi")
    Quantity = ureg.Quantity
except ImportError:  # pint is not installed; provide dummy imports
    ureg = None  # pylint: disable=invalid-name
    Quantity = lambda a, b: None
    DimensionalityError = None

QTY_CACHE = {}
MON_CACHE = {}


def qty(unit):
    "Returns a Quantity, caching the result for future retrievals"
    if unit not in QTY_CACHE:
        QTY_CACHE[unit] = Quantity(1, unit)
    return QTY_CACHE[unit]


class GPkitUnits(object):
    "Return Monomials instead of Quantitites"

    def __call__(self, arg):
Esempio n. 26
0
class ComputeResource(BaseKubernetes):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.unit_registry = UnitRegistry()
        self.unit_registry.load_definitions(
            os.path.dirname(os.path.abspath(__file__)) +
            "/kubernetes_units.txt")
        self.quantity = self.unit_registry.Quantity
        self.request = BaseRequest(base_uri='http://')
        self.node_list = list()
        self.pod_key_resource = [
            'cpu_request', 'cpu_limit', 'mem_request', 'mem_limit'
        ]
        self.memory_free = float()
        self.memory_total = float()
        self._compute_allocated_resources()
        self.resource_result = ResourceResult()

    def _compute_allocated_resources(self):
        _queue = queue.Queue()
        get_all_node = partial(self.core_v1.list_node)
        threading.Thread(target=self._create_watcher(_queue=_queue,
                                                     stream=get_all_node),
                         daemon=True).start()
        threading.Thread(target=partial(self._get_node_event, _queue=_queue),
                         daemon=True).start()
        threading.Thread(target=partial(self._collect_resource),
                         daemon=True).start()

    def _create_watcher(self, _queue, stream):
        def watcher():
            for event in self.watch.stream(stream):
                _queue.put(event)

        return watcher

    def _get_node_event(self, _queue):
        while True:
            node_info = _queue.get()
            node_schedule = True
            if 'taints' in node_info['object']['spec']:
                for taint in node_info['object']['spec']['taints']:
                    if taint['effect'] == 'NoSchedule':
                        node_schedule = False
                        break
            if not node_schedule:
                continue
            resource_result_len = len(self.resource_result)
            for resource in self.resource_result:
                if resource['hostname'] == node_info['object']['metadata'][
                        'name']:
                    self._collect_container_resource(node_info, resource)
                    break
                resource_result_len -= 1
            if resource_result_len == 0:
                result = dict()
                for key in self.pod_key_resource:
                    result[key] = float()
                self.resource_result.append(result)
                self._collect_container_resource(node_info, result)

            self.node_list.append(node_info)

    def _collect_container_resource(self, node_info, result):
        node_name = node_info['object']['metadata']['name']
        result['hostname'] = node_name
        node_limit = int(
            int(node_info['object']['status']['allocatable']["pods"]) * 1.5)
        field_selector = ("status.phase!=Succeeded,status.phase!=Failed," +
                          "spec.nodeName=" + node_name)
        pods = self.core_v1.list_pod_for_all_namespaces(
            limit=node_limit, field_selector=field_selector).items
        for pod in pods:
            for container in pod.spec.containers:
                res = container.resources
                reqs = defaultdict(
                    lambda: 0, res.requests or {
                        'cpu': '0m',
                        'memory': '0Mi'
                    })
                limits = defaultdict(
                    lambda: 0, res.limits or {
                        'cpu': '0m',
                        'memory': '0Mi'
                    })
                for key in self.pod_key_resource:
                    if 'request' in key:
                        if 'cpu' in key:
                            result[key] += self.quantity(
                                reqs['cpu']).to('m').magnitude
                        else:
                            result[key] += self.quantity(
                                reqs['memory']).to('Mi').magnitude
                    else:
                        if 'cpu' in key:
                            result[key] += self.quantity(
                                limits['cpu']).to('m').magnitude
                        else:
                            result[key] += self.quantity(
                                limits['memory']).to('Mi').magnitude

    def _collect_resource(self):
        while True:
            for node_info in self.node_list:
                for resource in self.resource_result:
                    if resource['hostname'] == node_info['object']['metadata'][
                            'name']:
                        self._calculation_resource(node_info, resource)
            time.sleep(3)

    def _calculation_resource(self, node_info, result):
        node_allocatable = node_info['object']['status']['allocatable']
        result['memory_usage'] = round(
            self.quantity(
                (self.memory_total - self.memory_free)).to('Mi').magnitude, 2)
        result['total_cpu'] = \
            self.quantity(int(node_allocatable["cpu"]), 'cpu').to('m').magnitude
        result['total_memory'] = \
            round(self.quantity(node_allocatable["memory"]).to('Mi').magnitude, 2)
        for address in node_info['object']['status']['addresses']:
            if address['type'] == 'InternalIP':
                older_idle, older_total = self._get_resource(
                    '{}:9100/metrics'.format(address['address']))
                time.sleep(1)
                newer_idle, newer_total = self._get_resource(
                    '{}:9100/metrics'.format(address['address']))
                cpu_utilization = (((newer_total - newer_idle) -
                                    (older_total - older_idle)) /
                                   (newer_total - older_total))
                result['cpu_usage'] = result['total_cpu'] * round(
                    cpu_utilization, 2)

    def _get_resource(self, uri):
        metrics_data = self.request.get(uri).text.splitlines()
        cpu_idle = float()
        cpu_total = float()
        for data in metrics_data:
            if '#' not in data:
                if 'node_cpu_seconds_total' in data:
                    cpu_total += float(data.split(' ')[1].strip())
                    if 'idle' in data:
                        cpu_idle += float(data.split(' ')[1].strip())
                elif 'node_memory_MemFree_bytes' in data:
                    self.memory_free = float(
                        data.split('node_memory_MemFree_bytes')[1].strip())
                elif 'node_memory_MemTotal_bytes' in data:
                    self.memory_total = float(
                        data.split('node_memory_MemTotal_bytes')[1].strip())
        return cpu_idle, cpu_total
Esempio n. 27
0
"wraps pint in gpkit monomials"
from __future__ import unicode_literals
import os
try:
    from pint import UnitRegistry, DimensionalityError
    ureg = UnitRegistry()  # pylint: disable=invalid-name
    ureg.load_definitions(
        os.sep.join([os.path.dirname(__file__), "usd_cpi.txt"]))
    Quantity = ureg.Quantity
except ImportError:  # pint is not installed; provide dummy imports
    ureg = None  # pylint: disable=invalid-name
    Quantity = lambda a, b: None
    DimensionalityError = None

QTY_CACHE = {}


def qty(unit):
    "Returns a Quantity, caching the result for future retrievals"
    if unit not in QTY_CACHE:
        QTY_CACHE[unit] = Quantity(1, unit)
    return QTY_CACHE[unit]


class GPkitUnits(object):
    "Return Monomials instead of Quantitites"
    division_cache = {}
    multiplication_cache = {}
    monomial_cache = {}

    def __call__(self, arg):
Esempio n. 28
0
"""

from pint import UnitRegistry
import logging
import logging.config
import os

# Setting up logging
__location__ = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig(os.path.join(__location__, 'logging.ini'))
logger = logging.getLogger(__name__)

# Configuring units package:
ureg = UnitRegistry()
Q_ = ureg.Quantity
ureg.load_definitions(os.path.join(__location__, 'pint definitions.txt'))

# Setting units for "standard" flow
T_NTP = Q_(68, ureg.degF)  # Normal Temperature (NIST)
P_NTP = Q_(14.696, ureg.psi)  # Normal Pressure (NIST)

T_MSC = Q_(15, ureg.degC)  # Metric Standard Conditions (used by Crane TP-410)
P_MSC = Q_(101325, ureg.Pa)  # Metric Standard Conditions (used by Crane TP-410)

T_STD = Q_(60, ureg.degF)  # Standard conditions (BPVC VIII UG-129 (c))
P_STD = Q_(14.7, ureg.psi)  # Standard conditions (BPVC VIII UG-129 (c))

from .cp_wrapper import ThermState
# Default fluids
AIR = ThermState('air', P=P_NTP, T=T_NTP)
Esempio n. 29
0
class Converter(object):
    def __init__(self, definitions=None, separator='>', precision=None):
        from pint import UnitRegistry

        self.ureg = UnitRegistry()
        self.ureg.load_definitions('unit_defs.txt')
        self.load_definitions(definitions)
        self.separator = separator
        self.precision = precision

    def load_definitions(self, definitions):
        if not definitions:
            return

        if not isinstance(definitions, (list, tuple)):
            definitions = [definitions]
        for d in definitions:
            LOG.info('loading definitions from %s', d)
            self.ureg.load_definitions(d)

    def convert(self, query):
        from pint.unit import DimensionalityError
        import re

        Q_ = self.ureg.Quantity

        # step 1: split the query into an input value and output units at a
        # self.separator
        value, sep, units = re.split('(to|\>)', query)
        value = re.sub(r'([A-Za-z]) ([A-Za-z])', r'\1_\2', value.strip())
        units = re.sub(r'(\w) (\w)', r'\1_\2', units.strip())

        LOG.debug('query: %s', query)
        LOG.debug('input: %s', value)
        LOG.debug('units: %s', units)

        in_val = Q_(value)
        out_units = Q_(units.replace(' ', '_'))

        try:
            out_val = in_val.to(out_units)
        except DimensionalityError, e:
            if str(e.units1) in SPECIAL:
                in_val2 = in_val.magnitude * Q_(SPECIAL[str(e.units1)])
                out_val = in_val2.to(out_units)
            elif str(e.units2) in SPECIAL:
                out_units2 = Q_(SPECIAL[str(e.units2)])
                out_val = in_val.to(out_units2)
            else:
                raise

        LOG.debug(u'converted {0} to {1:P}'.format(in_val, out_val))

        magnitude = out_val.magnitude
        units = out_val.units

        if self.precision:
            from decimal import Decimal
            rval = Decimal('1.' + '0'*self.precision)
            magnitude = Decimal(out_val.magnitude).quantize(rval)

        return (magnitude, u'{0} {1}'.format(magnitude, units))
Esempio n. 30
0
    Formats Pint.Quantity
    :param quantity: quantity object
    :type quantity: Pint.Quantity
    :return: formatted unit string (eg. '5 meters')
    :rtype: String
    """

    return "{0} {1}".format(
        format_number(quantity.magnitude), format_quantity_unit(quantity.units, quantity.magnitude > 1)
    )


def format_value(unit_from, value_from):
    """
    Formats provided unit and value to single string
    :param unit_from: unit identifier (eg. 'm' for meters)
    :type unit_from: String
    :param value_from: amount (eg. 5)
    :type value_from: Number
    :return: formated unit string (eg. '5 meters')
    :rtype: String
    """

    return format_quantity(ureg.Quantity(value_from, unit_from))


ureg = UnitRegistry()
rates = currency.get_currency_rates(config)
register_exchange_rates(rates)
ureg.load_definitions(os.path.join(os.path.dirname(__file__), "config/custom_units.txt"))
Esempio n. 31
0
])

#: Chemical engineering plant cost index (defaults to 567.5 at 2017)
CE = 567.5

# %% Import base utils

import pandas as pd
import numpy as np
from pint import UnitRegistry
import os

# Set pint Unit Registry
_ureg = UnitRegistry()
_ureg.default_format = '~P'
_ureg.load_definitions(
    os.path.dirname(os.path.realpath(__file__)) + '/my_units_defs.txt')
_Q = _ureg.Quantity
_Q._repr_latex_ = _Q._repr_html_ = _Q.__str__ = _Q.__repr__ = lambda self: self.__format__(
    '')

# Set number of digits displayed
np.set_printoptions(suppress=False)
np.set_printoptions(precision=3)
pd.options.display.float_format = '{:.3g}'.format
pd.set_option('display.max_rows', 35)
pd.set_option('display.max_columns', 10)
pd.set_option('max_colwidth', 35)
del np, pd, os, UnitRegistry

# %% Initialize BioSTEAM
Esempio n. 32
0
## Units handling
# per the pint documentation, it's important that pint and its associated Unit
# Registry are only imported once.
from pint import UnitRegistry
# here we assign the identifier 'unit' to the UnitRegistry
unit = UnitRegistry()

#use this to enable legacy handling of offset units
# TODO fix this to handle offsets the way pint wants us to since 0.7
unit.autoconvert_offset_to_baseunit = True

# append custom unit definitions and contexts
import os
directory = os.path.dirname(__file__)
unit.load_definitions(directory + '/pint_custom_units.txt')
# activate the "chemistry" context globally
unit.enable_contexts('chem')
# set the default string formatting for pint quantities
unit.default_format = 'P~'


def testfunc(val):
    list = []
    try:
        list.append(float(val) * unit(''))
        return list
    except ValueError:
        print('Value Error')
        return None
Esempio n. 33
0
from pint import UnitRegistry
from utils import config_file_path

# Instantiate a common units registry
UR = UnitRegistry(autoconvert_offset_to_baseunit=True)
UR.load_definitions(config_file_path('pint.txt'))

# Specify order and name of data columns
DATA_SPEC = ('date', 'temperature_C', 'pH', 'supply_tank_L', 'nutrients_mL')

PH_CONFIG = {
    'temperature': {
        # 'value': 25 * UR.degC
        'device_id': '28-0517b11b28ff'
    },
    'adc': {
        'i2c_busn': 1,
        'i2c_addr': 0x4F,
        'v_ref': 2.5 * UR.V,
        'v_off': 1.251 * UR.V,
        'filter_samples': 256
    },
    'calibration': {
        'temperature':
        24 * UR.degC,
        'points': ({
            'ph': 4.0 * UR.pH,
            'v': 1.426 * UR.V
        }, {
            'ph': 7.0 * UR.pH,
            'v': 1.250 * UR.V
Esempio n. 34
0
from nidaqmx.constants import AcquisitionType

#: scanning-squid modules
from instruments.daq import DAQAnalogInputs
from plots import ScanPlot
from .microscope import Microscope
import utils

#: Pint for manipulating physical units
from pint import UnitRegistry
ureg = UnitRegistry()
#: Tell UnitRegistry instance what a Phi0 is, and that Ohm = ohm
with open('squid_units.txt', 'w') as f:
    f.write('Phi0 = 2.067833831e-15 * Wb\n')
    f.write('Ohm = ohm\n')
ureg.load_definitions('./squid_units.txt')

import logging
log = logging.getLogger(__name__)


class SusceptometerMicroscope(Microscope):
    """Scanning SQUID susceptometer microscope class.
    """
    def __init__(self,
                 config_file: str,
                 temp: str,
                 ureg: Any = ureg,
                 log_level: Any = logging.INFO,
                 log_name: str = None,
                 **kwargs) -> None:
Esempio n. 35
0
core_api = client.CoreV1Api()
apis_api = client.AppsV1Api()
#sdclient = SdcClient(<Your Sysdig API token>)
sysdig_metric = "net.http.request.time"
metrics = [{
    "id": sysdig_metric,
    "aggregations": {
        "time": "timeAvg",
        "group": "avg"
    }
}]

CustomSchedulerName = 'K8SCustomScheduler'

ureg = UnitRegistry()
ureg.load_definitions('kubernetes_units.txt')

pendingPodsList = []
pendingNotReadyPodsList = []
failedPodsList = []
runningPodsList = []
succeededPodsList = []
unknownPodsList = []
nodesListPerNodeLabel = {}
DEBUG_ENABLED = 0

Q_ = ureg.Quantity


def scheduler(name, node, namespace):
Esempio n. 36
0
Set up physical constants and unit systems
"""
import operator
import copy
from os.path import join, abspath, dirname
import numbers

import numpy as np
from pint import UnitRegistry, set_application_registry, DimensionalityError

from ..utils import ResizableArray

# Set up pint's unit definitions
ureg = UnitRegistry()
unit_def_file = join(abspath(dirname(__file__)), '../_static_data/pint_atomic_units.txt')
ureg.load_definitions(unit_def_file)
set_application_registry(ureg)


class MdtUnit(ureg.Unit):
    """
    Pickleable version of pint's Unit class.
    """
    def __reduce__(self):
        return _get_unit, (str(self),)


def _get_unit(unitname):
    """pickle helper for deserializing MdtUnit objects"""
    return getattr(ureg, unitname)