Exemple #1
0
async def get_fkeys(company, table_name):
    with await fkey_lock:
        if company not in fkeys:

            src_fkeys = DD(list)
            tgt_fkeys = DD(list)

            src_fkey = NT('src_fkey',
                          'src_col, tgt_tbl, tgt_col, alt_src, alt_tgt, test')
            tgt_fkey = NT('tgt_fkey',
                          'src_tbl, src_col, tgt_col, is_child, test')

            sql = ("SELECT b.table_name, a.col_name, a.fkey "
                   f"FROM {company}.db_columns a, {company}.db_tables b "
                   "WHERE b.row_id = a.table_id "
                   "AND a.deleted_id = 0 "
                   "AND a.fkey IS NOT NULL")

            async with db_session.get_connection() as db_mem_conn:
                conn = db_mem_conn.db
                cur = await conn.exec_sql(sql)

                async for src_tbl, src_col, fkey in cur:
                    tgt_tbl, tgt_col, alt_src, alt_tgt, is_child, cursor = loads(
                        fkey)
                    if isinstance(tgt_tbl, str):  # normal case
                        test = None
                        src_fkeys[src_tbl].append(
                            src_fkey(src_col, tgt_tbl, tgt_col, alt_src,
                                     alt_tgt, test))
                        tgt_fkeys[tgt_tbl].append(
                            tgt_fkey(src_tbl, src_col, tgt_col, is_child,
                                     test))
                    else:
                        col_name, vals_tables = tgt_tbl
                        for val, tgt_tbl in vals_tables:
                            test = (col_name, val)
                            src_fkeys[src_tbl].append(
                                src_fkey(src_col, tgt_tbl, tgt_col, alt_src,
                                         alt_tgt, test))
                            tgt_fkeys[tgt_tbl].append(
                                tgt_fkey(src_tbl, src_col, tgt_col, is_child,
                                         test))

            fkeys[company] = src_fkeys, tgt_fkeys

    comp_fkeys = fkeys[company]
    src_fkeys = comp_fkeys[0][table_name]  # returns [] if not found
    tgt_fkeys = comp_fkeys[1][table_name]  # returns [] if not found
    return src_fkeys, tgt_fkeys
Exemple #2
0
 def __init__(self, Prime=0, RunID=0):
     TH.Thread.__init__(self)
     PrimeBase.THL.acquire()
     if True:
         self._PR = Prime  # current prrime
         self._ID = RunID  # 0 : dispatcher ---  1..p : worker threads
         # print('init thread: ', Prime, RunID, flush=True)
         #
         tn = self._MakeName(Prime, RunID)
         self.setName(tn)
         # easy identifier over all threads
         #
         nt = NT('Info', [
             'Prime', 'RunID', 'TObject', 'Alive', 'I_AmReady', 'PauseCnt',
             'WhereItIs', 'InitTime', 'StartTime', 'ReadyTime', 'FinitTime'
         ])
         nt.Prime = Prime  # current prrime
         nt.RunID = RunID  # 0 : dispatcher ---  1..p : worker threads
         nt.TObject = self
         nt.alive = self.is_alive()
         nt.I_AmReady = False
         nt.InitTime = DT.datetime.now()
         nt.StartTime = None
         nt.ReadyTime = None
         nt.FinitTime = None
         nt.PauseCnt = 0  # counter for thread sleep phases
         nt.WhereItIs = 0
         #
         self.AllThreads.setdefault(tn, nt)
     PrimeBase.THL.release()
def FillNC(root_grp_ptr, scene_location):
    retGps = NT("returnGroups",  "calGrp, productsGrp, navGrp, slaGrp, periodGrp")
    root_grp_ptr.createDimension('samples', 512)
    root_grp_ptr.createDimension('scan_lines', 2000)
    root_grp_ptr.createDimension('bands', 128)
    root_grp_ptr.instrument = 'HICO'
    root_grp_ptr.institution = 'NASA Goddard Space Flight Center'
    root_grp_ptr.resolution = '100m'
    root_grp_ptr.location_description = scene_location
    root_grp_ptr.license = 'http://science.nasa.gov/earth-science/earth-science-data/data-information-policy/'
    root_grp_ptr.naming_authority = 'gov.nasa.gsfc.sci.oceandata'
    root_grp_ptr.date_created = DT.strftime(DT.utcnow(), '%Y-%m-%dT%H:%M:%SZ')
    root_grp_ptr.creator_name = 'NASA/GSFC'
    root_grp_ptr.creator_email = '*****@*****.**'
    root_grp_ptr.publisher_name = 'NASA/GSFC'
    root_grp_ptr.publisher_url = 'http_oceancolor.gsfc.nasa.gov'
    root_grp_ptr.publisher_email = '*****@*****.**'
    root_grp_ptr.processing_level = 'L1B'
    nav_grp = root_grp_ptr.createGroup('navigation')
    nav_vars = list()
    nav_vars.append(nav_grp.createVariable('sensor_zenith', 'f4', ('scan_lines', 'samples',)))
    nav_vars.append(nav_grp.createVariable('solar_zenith', 'f4', ('scan_lines', 'samples',)))
    nav_vars.append(nav_grp.createVariable('sensor_azimuth', 'f4', ('scan_lines', 'samples',)))
    nav_vars.append(nav_grp.createVariable('solar_azimuth', 'f4', ('scan_lines', 'samples',)))
    nav_vars.append(nav_grp.createVariable('longitudes', 'f4', ('scan_lines', 'samples',)))
    nav_vars.append(nav_grp.createVariable('latitudes', 'f4', ('scan_lines', 'samples',)))
    for var in nav_vars:
        var.units = 'degrees'
        var.valid_min = -180
        var.valid_max = 180
        var.long_name = var.name.replace('_', ' ').rstrip('s')
    retGps.navGrp = nav_grp
    retGps.productsGrp = root_grp_ptr.createGroup('products')
    lt = retGps.productsGrp.createVariable('Lt', 'u2', ('scan_lines',
                                                        'samples', 'bands'))
    lt.scale_factor = float32([0.02])
    lt.add_offset = float32(0)
    lt.units = "W/m^2/micrometer/sr"
    # lt.valid_range = nparray([0, 16384], dtype='u2')
    lt.long_name = "HICO Top of Atmosphere"
    lt.wavelength_units = "nanometers"
    # lt.createVariable('fwhm', 'f4', ('bands',))
    lt.fwhm = npones((128,), dtype='f4') * -1
    # wv = lt.createVariable('wavelengths', 'f4', ('bands',))
    lt.wavelengths = npones((128,), dtype='f4')
    lt.wavelength_units = "nanometers"
    retGps.slaGrp = root_grp_ptr.createGroup('scan_line_attributes')
    retGps.slaGrp.createVariable('scan_quality_flags', 'u1', ('scan_lines',
                                                              'samples'))
    # Create metadata group and sub-groups
    meta_grp = root_grp_ptr.createGroup('metadata')
    pl_info_grp = meta_grp.createGroup("FGDC/Identification_Information/Platform_and_Instrument_Identification")
    pl_info_grp.Instrument_Short_Name = "hico"
    prc_lvl_grp = meta_grp.createGroup("FGDC/Identification_Information/Processing_Level")
    prc_lvl_grp.Processing_Level_Identifier = "Level-1B"
    retGps.periodGrp = meta_grp.createGroup("FGDC/Identification_Information/Time_Period_of_Content")
    # fill HICO group
    retGps.calGrp = meta_grp.createGroup("HICO/Calibration")
    return retGps
Exemple #4
0
        :class:`Edge`'s single input node as the key and the flow as the value.
        """
        return self._inputs

    @property
    def outputs(self):
        """ dict:
        Dictionary mapping output :class:`Nodes <Node>` :obj:`n` to
        :class:`Edges` from :obj:`self` into :obj:`n`.
        If :obj:`self` is an :class:`Edge`, returns a dict containing the
        :class:`Edge`'s single output node as the key and the flow as the value.
        """
        return self._outputs


EdgeLabel = NT("EdgeLabel", ['input', 'output'])


class Edge(Node):
    """ :class:`Bus`es/:class:`Component`s are always connected by an :class:`Edge`.

    :class:`Edge`s connect a single non-:class:`Edge` Node with another. They
    are directed and have a (sequence of) value(s) attached to them so they can
    be used to represent a flow from a source/an input to a target/an output.

    Parameters
    ----------
    input, output: :class:`Bus` or :class:`Component`, optional
    flow, values: object, optional
        The (list of) object(s) representing the values flowing from this
        edge's input into its output. Note that these two names are aliases of
Exemple #5
0
# Primes = [2, 3, 5]
# XJumps = bytes([4, 2])
# Primes = [2, 3]
XJumps = bytes([2])
Primes = [2]

Strikes = set()
XFrame = OD()
ZDatas = OD()
curPrime = 1
# thisPool = MP.pool.Pool()
# MAXPROCS = 5
MAX2SHOW = 24
MAXPRIME = 29  # where should we stop - lists/sets grow very fast

Xtuple = NT('Xtuple',
            ['prime', 'step', 'start', 'finit', 'frame', 'strikes', 'jumpers'])


def profile(fnc):
    """
    decorator for profiling
    """
    def DoIt(*args, **kwargs):
        pr = cProfile.Profile()
        pr.enable()
        retval = fnc(*args, **kwargs)
        pr.disable()
        s = io.StringIO()
        sortby = 'cumulative'
        ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
        ps.print_stats()
Exemple #6
0
import csv
import requests
from types import SimpleNamespace
from collections import namedtuple as NT

# Local imports
from tbl import util
from tbl import validation as V

# NamedTuples provide simple record-type structures for giving
# names to information. I don't need full classes here (yet),
# but I don't want to use plain lists or tuples, because down that road
# lies madness. There will ultimately be quite a few, and I'll probably
# move them into their own namespace.
# http://bit.ly/2PVUSha
Column = NT("Column", ["name", "type"])


class tbl:
    def __init__(self, url=None, has_header=True):
        # A SimpleNamespace is really an empty object that
        # lets me set, modify, and delete attributes.
        # http://bit.ly/3aHq2Rx
        self.fields = SimpleNamespace()
        # Initialize fields
        self.fields.columns = list()
        self.fields.status = V.OK()

        # Handle keywords
        if url:
            self.fields.status = V._check_from_sheet(url, has_header)
    result = C.interp(hclc, cs, desired)
    assert (result)


# A 200-level will pass if a student
# has a 100-level course.
def test_hclc3():
    hclc = C.HasCourseLevelConstraint(200, 100)
    cs = [S.Course(level=200), S.Course(level=100)]
    desired = S.Course(level=200)
    result = C.interp(hclc, cs, desired)
    assert (result)


# Do I have a sequence of required courses?
ST = NT('ST', ['target', 'sequence', 'cs', 'desired', 'expected'])
seqdata = [
    ST(a_100, [], [], a_100, True),
    ST(a_200, [a_100], [], a_200, False),
    ST(a_200, [a_100], [a_100], a_200, True),
    ST(a_300, [a_100, a_200], [a_100], a_300, False),
    ST(a_300, [a_100, a_200], [a_100, a_200], a_300, True),
    ST(a_200,
       [S.Course(level=100, rubric="CS"),
        S.Course(level=200, rubric="LIT")], [a_100, a_200], a_200, False),
]


@pytest.mark.parametrize("st", seqdata)
def test_seq(st):
    seqc = C.SequenceConstraint(st.target, st.sequence)
Exemple #8
0
            await menu.setval('module_row_id', module_row_id)
            await menu.setval('ledger_row_id', ledger_row_id)
            await menu.save()


#-----------------------------------------------------------------------------

# adm_periods - dictionary object for each company
# key is company, value is a list of adm_period objects - one for each period for that company
# period row_ids are numbered from 0, not 1
# therefore any period can be retrieved from the list using its period_no as an index
# i.e. if period_no is 42, the adm_period object is in adm_periods[company][42]
# [TODO] set up callbacks to update list if adm_periods is changed

adm_period = NT(
    'adm_period',
    'period_no, year_no, year_per_id, year_per_no, opening_date, closing_date')

adm_periods = {}
adm_per_lock = asyncio.Lock()


async def get_adm_periods(company):
    with await adm_per_lock:
        if company not in adm_periods:
            adm_per_list = []
            adm_per_obj = await db.objects.get_db_object(
                cache_context, company, 'adm_periods')
            await adm_per_obj.getfld('year_no')  # to set up virtual field
            await adm_per_obj.getfld('year_per_id')  # ditto
            await adm_per_obj.getfld('year_per_no')  # ditto
Exemple #9
0
import pstats
import io

MAXNUM = 1_000_000
NEWLIN = 100
NCORES = 0  # 0 non parallel execution
MCORES = max(1, MP.cpu_count() - 2)  # maximum number of wanted processes
MCORES = 60  # maximum number of wanted processes

#
JPfile = 'start_jumpers.csv'
PPfile = 'CheckedPrimes.csv'
startP = [2, 3, 5]
startJ = [6, 4, 2, 4, 2, 4, 6, 2]
#
pInfo = NT('T', ['P', 'Q', 'R'])
Aprime = DQ([])
Xprime = DQ([])
Ajumps = bytearray([])
DTYPE = 'int8'  # check dtype on greater MAXNUM = difference between primes
Pjumps = NP.array([], dtype=DTYPE)


def profile(fnc):
    """
    decorator for profiling
    """
    def DoIt(*args, **kwargs):
        pr = cProfile.Profile()
        pr.enable()
        retval = fnc(*args, **kwargs)
from collections import namedtuple as NT

nt = NT("name", "field")
Exemple #11
0
from grako.util import (
    compress_seq,
    indent,
    re,
    safe_name,
)
from grako.objectmodel import Node
from grako.objectmodel import BASE_CLASS_TOKEN
from grako.exceptions import CodegenError
from grako.rendering import Renderer
from grako.codegen.cgbase import ModelRenderer, CodeGenerator

NODE_NAME_PATTERN = '(?!\d)\w+(' + BASE_CLASS_TOKEN + '(?!\d)\w+)*'

_TypeSpec = NT('TypeSpec', ['class_name', 'base'])


def codegen(model):
    return ObjectModelCodeGenerator().render(model)


def _get_node_class_name(rule):
    if not rule.params:
        return None

    typespec = rule.params[0]
    if not re.match(NODE_NAME_PATTERN, typespec):
        return None
    if not typespec[0].isupper():
        return None