예제 #1
0
def _init_timer(time_budgets):
    timer = Timer()
    for process in PROCESSES:
        timer.add_process(process, time_budgets[process],
                          PROCESSES_MODE[process])
        LOGGER.debug(f"init time budget of {process}: {time_budgets[process]} "
                     f"mode: {PROCESSES_MODE[process]}")
    return timer
예제 #2
0
 def __init__(self):
     self.window = tkinter.Tk()
     self.canvas = tkinter.Canvas(self.window, width=800, height=600)
     self.canvas.pack()
     self.window.bind("<Down>", self.scrolldown)
     self.window.bind("<Up>", self.scrollup)
     self.window.bind("<Button-1>", self.handle_click)
     self.window.bind("<Button-3>", self.go_back)
     self.history = []
     self.scrolly = 0
     self.maxh = 0
     self.timer = Timer()
예제 #3
0
파일: layout.py 프로젝트: keram88/ember
 def __init__(self, parent, node):
     self.parent = parent
     self.mt = px(node.style.get("margin-top", "0px"))
     self.mb = px(node.style.get("margin-bottom", "0px"))
     self.x = parent.content_left()
     self.h = None
     self.node = node
     self.timer = Timer()
예제 #4
0
def batch_read(datapath_list, **kwargs):
    print('loading data files...')
    datalist = []
    timeit = Timer()
    for datapath in datapath_list:
        print(os.path.split(datapath)[1])
        importdf = read_data(datapath, **kwargs)
        datalist.append(importdf)

    masterdata = pd.concat(datalist, sort=True)
    masterdata.sort_index(inplace=True)

    if any(masterdata.index.duplicated()):  # need to collapse duplicate rows
        masterdata = masterdata.pivot_table(index=masterdata.index,
                                            dropna=False)  # this seems to work

    timeit.split('elapsed for import')
    return masterdata
예제 #5
0
def _init_timer(PREDICT_TIME_BUDGET):
    timer = Timer()
    timer.add_process('initialize.sh', INIT_TIME_BUDGET, timing.CUM)
    LOGGER.debug(f"init time budget of initialize.sh: {INIT_TIME_BUDGET} "
                 f"mode: {timing.CUM}")
    timer.add_process('enrollment.sh', ENROLL_TIME_BUDGET, timing.CUM)
    LOGGER.debug(f"init time budget of enrollment.sh: {ENROLL_TIME_BUDGET} "
                 f"mode: {timing.CUM}")
    timer.add_process('predict.sh', PREDICT_TIME_BUDGET, timing.CUM)
    LOGGER.debug(f"init time budget of predict.sh: {PREDICT_TIME_BUDGET} "
                 f"mode: {timing.CUM}")
    return timer
예제 #6
0
def _predict(args):
    result = {}
    try:
        timer = Timer.from_file(join(args.temp_dir, 'timer.yaml'))
        LOGGER.info("===== Load test data")
        dataset = Dataset(args.dataset_dir)
        args.time_budget = dataset.get_metadata().get("time_budget")
        path.append(args.model_dir)
        LOGGER.info('==== Load user model')
        umodel = init_usermodel(dataset)
        with timer.time_limit('load'):
            umodel.load(args.temp_dir, timer.get_all_remain())

        LOGGER.info('==== start predicting')
        idx = args.idx
        y_preds = []
        while not dataset.is_end(idx):
            history = dataset.get_history(idx)
            pred_record = dataset.get_next_pred(idx)
            with timer.time_limit('predict', verbose=False):
                y_pred, next_step = umodel.predict(
                    history, pred_record, timer.get_all_remain())
            y_preds.extend(y_pred)
            idx += 1
            if next_step == 'update':
                result['is_end'] = False
                break
        else:
            result['is_end'] = True

        # Write predictions to output_dir
        _write_predict(idx, args.output_dir, y_preds)
        result = {
            **result,
            'idx': idx,
            'status': 'success',
            'next_step': next_step,
        }

        with timer.time_limit('save'):
            umodel.save(args.temp_dir, timer.get_all_remain())
        timer.save(join(args.temp_dir, 'timer.yaml'))

    except TimeoutException as ex:
        LOGGER.error(ex, exc_info=True)
        result['status'] = 'timeout'
    except Exception as ex:
        LOGGER.error(ex, exc_info=True)
        result['status'] = 'failed'

    return result
예제 #7
0
                                        epoch_avg=epoch_avg,
                                        verbose=r_verbose)
                            elif "-rn" in commands:
                                # run neural with epoch_avg=true
                                for name in psr_names:
                                    rfi = NN_Mitigator(name,
                                                       *dirs,
                                                       epoch_avg=epoch_avg,
                                                       verbose=r_verbose)

                rfi.mitigation_setup()

            if t_index > r_index:
                timer = Timer(name,
                              jump_flags="",
                              epochs=1,
                              subbands=subbands,
                              verbose=t_verbose)
                timer.time()
                pass

        elif r_index < c_index:
            # run rfi first, then cal
            if t_index < r_index:
                print("Timing should go last")
                pass

            if (("-r" in commands) or ("-rs" in commands) or
                ("-rn" in commands) or ("-rb" in commands)) and (counter == 1):
                if epoch_avg == None:
                    epoch_avg = False
예제 #8
0
    if datatype in SIGNED_TYPES: A = A - 2**(bits - 1)

    # return corrected array

    return A


if __name__ == '__main__':

    high = int(2)
    datatype = uint8

    a = array(random.randint(0, high, size=(1000000, 1000)), dtype=datatype)
    print(a)

    with Timer(callback="Time to save tiny gzip: {time:.3f}s") as t:

        x = pack_int(a, datatype=datatype, low=0, high=high)

        with gzip.open('tiny_gzip.gzip', 'wb') as f:

            f.write(x)

    print(os.path.getsize('tiny_gzip.gzip'))

    with Timer(callback="Time to save gzip only: {time:.3f}s") as t:

        with gzip.open('gzip_only.gzip', 'wb') as f:

            f.write(a)
예제 #9
0
class Browser:
    def __init__(self):
        self.window = tkinter.Tk()
        self.canvas = tkinter.Canvas(self.window, width=800, height=600)
        self.canvas.pack()
        self.window.bind("<Down>", self.scrolldown)
        self.window.bind("<Up>", self.scrollup)
        self.window.bind("<Button-1>", self.handle_click)
        self.window.bind("<Button-3>", self.go_back)
        self.history = []
        self.scrolly = 0
        self.maxh = 0
        self.timer = Timer()

    def go_back(self, e):
        if len(self.history) < 2:
            return
        current, s_type1, post = self.history.pop()
        # browse puts this back
        last, s_type2, post2 = self.history.pop()
        if s_type1 is not None:
            if s_type1 == 'post':
                result = input("Resending POST data. Confirm: ")
                if result.lower() != "yes":
                    self.history.append((last, s_type2, post2))
                    self.history.append((current, s_type1, post))
                    return
                else:
                    print("OK!!!")
                    self.send_post(current, post)
                    return
            else:
                self.browse(current, 'get', post)
        self.browse(last, s_type2, post2)

    def browse(self, url, s_type=None, post=None):
        try:
            host, port, path, fragment = parse_url(url)
            self.timer.start("Downloading")
            headers, body = request('GET', host, port, path)
            self.timer.stop()
        except AssertionError:
            return
        self.headers = headers
        self.body = body
        self.url = url
        self.history.append((url, s_type, post))
        self.scrolly = 0
        self.parse()

    def parse(self):
        self.timer.start("HTML")
        self.text = lex(self.body)
        self.timer.stop()
        self.nodes = parse(self.text)
        self.timer.start("Parse CSS")
        self.rules = parse_css(DEFAULT_STYLE)
        self.timer.stop()
        self.rules.sort(key=lambda x: x[0].score())
        self.timer.start("JS")
        self.js = dukpy.JSInterpreter()
        self.js_handles = dict()

        # Registration
        self.js.export_function("log", print)
        self.js.export_function("querySelectorAll", self.js_querySelectorAll)

        # Run runtime
        self.js.evaljs(DEFAULT_JS)

        for script in find_scripts(self.nodes, []):
            lhost, lport, lpath, lfragment = parse_url(
                relative_url(script, self.history[-1]))
            header, body = request('GET', lhost, lport, lpath)
            self.js.evaljs(body)
        self.timer.stop()
        self.relayout()

    def js_querySelectorAll(self, sel):
        selector, _ = css_selector(sel + "{", 0)
        elts = find_selected(self.nodes, selector, [])
        out = []
        for elt in elts:
            if not elt.handle:
                handle = len(self.js_handles) + 1
                elt.handle = handle
                self.js_handles[handle] = elt
            out.append(handle)
        return out

    def js_getAttribute(self, handle, attr):
        elt = self.js_handles[handle]
        return elt.attributes.get(attr, None)

    def relayout(self):
        self.timer.start("Style")
        style(self.nodes, self.rules)
        self.timer.stop()

        self.page = Page()
        self.layout = BlockLayout(self.page, self.nodes)
        self.timer.start("Layout1")
        self.layout.layout1()
        self.timer.stop()
        self.timer.start("Layout2")
        self.layout.layout2(0)
        self.timer.stop()
        self.maxh = self.layout.height()
        self.timer.start("Display list")
        self.display_list = self.layout.display_list()
        self.timer.stop()
        self.render()

    def render(self):
        #self.timer.start("Render")
        self.canvas.delete("all")
        for cmd in self.display_list:
            if cmd.y2 - self.scrolly < -20: continue
            if cmd.y2 - self.scrolly > 600 + 20: continue
            cmd.draw(self.scrolly, self.canvas)
        #self.timer.stop()

    def scrolldown(self, e):
        self.scrolly = min(self.scrolly + SCROLL_STEP, 13 + self.maxh - 600)
        self.render()

    def scrollup(self, e):
        self.scrolly = max(self.scrolly - SCROLL_STEP, 0)
        self.render()

    def handle_click(self, e):
        x, y = e.x, e.y + self.scrolly
        elt = find_element(x, y, self.layout)
        while elt and not \
              (isinstance(elt, ElementNode) and ((elt.tag == "a" and "href" in elt.attributes)
                                                 or elt.tag in ("input", "textarea", "button"))):
            elt = elt.parent
        if not elt:
            pass
        elif elt.tag == 'a':
            # Follow link!
            url = relative_url(elt.attributes["href"], self.history[-1][0])
            self.browse(url)
        # lab x
        elif elt.tag in ("input", "textarea"):
            if is_checkbox(elt):  # EX 1
                if is_checked(elt):
                    del elt.attributes["checked"]
                else:
                    elt.attributes["checked"] = ""
            else:
                edit_input(elt)
            self.relayout()
        elif elt.tag == 'button':
            self.submit_form(elt)
        else:
            pass

    def submit_form(self, elt):
        while elt and elt.tag != 'form':
            elt = elt.parent
        if not elt: return

        # EX 2
        method = elt.attributes.get('method', 'get').lower()
        if method not in ('get', 'post'):
            # "Sane" default
            method = 'get'

        inputs = []
        find_inputs(elt, inputs)
        params = dict()
        for input in inputs:
            # EX 1
            if input.tag == 'input':
                if is_checkbox(input):
                    if is_checked(input):
                        params[input.attributes['name']] = ''
                else:
                    params[input.attributes['name']] = input.attributes.get(
                        'value', '')
            else:
                params[input.attributes[
                    'name']] = input.children[0].text if input.children else ""
        url = relative_url(elt.attributes['action'], self.history[-1][0])
        # EX 2
        if method == 'get':
            host, port, path, fragment = parse_url(url)
            get = self.format_post(params)
            nurl = "http://" + host + ":" + str(port) + path + "?" + get + (
                fragment if fragment is not None else "")
            self.browse(nurl, 'get')

        else:
            body = self.format_post(params)
            self.send_post(url, body)

    def format_post(self, params):
        body = ""
        for param, value in params.items():
            body += "&" + param + "="
            body += value.replace(" ", "%20")
        body = body[1:]
        return body

    def send_post(self, url, body):
        host, port, path, fragment = parse_url(url)
        self.headers, self.body = request('POST', host, port, path, body)
        self.history.append((url, 'post', body))
        self.parse()
예제 #10
0
        shared.isClient = False
        shared.isNetworked = True
        logging.info('Running server')
    elif (arg.lower() == 'client'):
        from network.client import NetworkHandler
        shared.isClient = True
        shared.isNetworked = True
        logging.info('Running client')
    elif (len(arg) > 3 and arg.lower()[:3] == 'ip='):
        shared.host = arg.lower()[3:]
        logging.info('IP: ' + str(shared.host))
    elif (len(arg) > 5 and arg.lower()[:5] == 'port='):
        shared.port = int(arg.lower()[5:])
        logging.info('Port: ' + str(shared.port))

timer = Timer()
map = Map(96, 48)
renderer = Renderer()
inputHandler = InputHandler()
inputHandler.start()
if (shared.isNetworked):
    networkHandler = NetworkHandler()
    networkHandler.start()
while shared.running:
    gameLogic()
    gameRender()
    gameInput()
    timer.startSection('SLEEP')
    timer.sync(10)
if (shared.isNetworked):
    networkHandler.stop()
예제 #11
0
def query_sqlite(db,
                 start=None,
                 end=None,
                 tags=None,
                 condition=None,
                 addquery=None,
                 tz='US/Mountain',
                 verbose=False):
    """
    Query data from a SQLite database.
    
    Parameters
    ----------
    db : str
        Path to the database.

    Optional Parameters
    -------------------
    start : str
        Pandas-parseable date-time string; default will query from start of database if None.
    end : str
        Pandas-parseable date-time string; default will query from start of database if None.
    tags : list
        List of tags to import; default is None and imports all tags.
    condition : list of strings
        Additional SQL language to specify certain constraints, given as
        a list of conditionals.
    addquery : str
        Additional query language to specify conditions on what data to
        return; default=None.
    tz : str
        The time zone used for the start/end values.
    verbose : bool
        Report additional information during execution.

    Returns
    -------
    Pandas dataframe with query results.
    """
    # convert datetimes to unix for query, or default to limits
    if start is None:
        start = '(SELECT MIN("date_time") from "values")'
    else:
        start = int(pd.Timestamp(start, tz=tz).value / 1e9)
    if end is None:
        end = '(SELECT MAX("date_time") from "values")'
    else:
        end = int(pd.Timestamp(end, tz=tz).value / 1e9)

    if tags is None:
        cols = '*'
    else:
        cols = '"date_time",'
        for t in tags:
            cols += '"{:}",'.format(t)
        cols = cols[:-1]  # delete trailing comma

    query = 'SELECT {cols} FROM "values" WHERE "date_time" BETWEEN {start} AND {end}'\
            .format(cols=cols, start=start, end=end)
    if condition is not None:
        warn("'condition' will be replaced with 'addquery' in the future",
             category=PendingDeprecationWarning)
        for c in condition:
            query += ' AND ' + c
    if addquery is not None:
        query += addquery

    if verbose:
        print(query)

    timeit = Timer()
    engine = create_engine('sqlite:///' + db)
    data = pd.read_sql(query,
                       engine,
                       index_col='date_time',
                       parse_dates=['date_time'])
    # convert back to requested time zone and strip tz-awareness
    data.index = data.index.tz_localize('utc') \
                .tz_convert(tz) \
                .tz_localize(None)

    timeit.split('data loaded', report=verbose)

    return data
예제 #12
0
def ignition_query(start_time,
                   end_time,
                   taglist,
                   login,
                   addquery=None,
                   verbose=False):
    """
    Query the IBRF Pilot Plant SCADA (Ignition) SQL server 
    for the desired time range and tags. Note, query times are local
    and resulting date-times are local (America/Denver).
    
    Parameters
    ----------
    start_time : str
        Date-time string, parseable by pandas.
    end_time : str
        Date-time string, parseable by pandas.
    taglist : list
        A list of all desired tags. Must be in same format as
        found in SQL tables created by Ignition (consult your own documentation).
    login : list or str
        Server login info: [server, database, user, passwd]. Script will
        automatically find this in user-provided file inside the module
        directory if a string with the name of this file is passed instead.
        The file format is the same items in the above list in ASCII
        document with line break between each item.

    Optional Parameters
    -------------------
    addquery : str
        Additional query language to specify conditions on what data to
        return; default=None.
    verbose : bool
        If True, prints progress & statistics during execution; default=False.

    Returns
    -------
    Pandas DataFrame containing the requested data.
    """

    timeit = Timer()

    # =============================================================================
    # Setup
    # =============================================================================
    # SQL server login
    if type(login) is str:
        # find local login info
        with open(os.path.join(ppath, login), 'r') as f:
            logintext = f.read()
        server, database, user, password = logintext.split('\n')

    # join tag paths copied from Ignition tag browser
    assert type(taglist) is list
    tags = r"('" + r"', '".join(taglist) + "')"

    # SQL times in UNIX format (ms)
    # must convert given local time to UTC, since database timestamps are in UTC
    epoch_start_time = pd.Timestamp(start_time,
                                    tz='America/Denver').value / 1e9 * 1000
    epoch_end_time = pd.Timestamp(end_time,
                                  tz='America/Denver').value / 1e9 * 1000

    # =============================================================================
    # Query to determine what database partitions to look in for specified range
    # =============================================================================
    # select partitions at the start, end, and everything in-between
    query1 = "SELECT pname FROM sqlth_partitions WHERE \
            ({start} BETWEEN start_time AND end_time) \
            OR ({end} BETWEEN start_time AND end_time) \
            OR (({start} < start_time) AND ({end} > end_time))"\
            .format(start=str(epoch_start_time), end=str(epoch_end_time))
    if verbose:
        print(query1)
    # http://pymssql.org/en/stable/pymssql_examples.html#using-the-with-statement-context-managers
    with pymssql.connect(server, user, password, database) as conn:
        with conn.cursor() as cursor:
            cursor.execute(query1)
            partitions = pd.DataFrame(cursor.fetchall(), columns=['pname'])
    partitions = partitions['pname'].tolist()

    if verbose:
        print(partitions)
    timeit.split('database partitions queried for locations', report=verbose)
    # exit early if empty result
    if len(partitions) == 0:
        print(
            '*** ignition_query alert: No data exists for the given date-time range ***'
        )
        return

    # =============================================================================
    # Query the partition tables for the tags and range of interest
    # =============================================================================
    query2 = ""
    for i, partition in enumerate(partitions):
        query2 += "SELECT t_stamp, tagpath, floatvalue, intvalue \
                   FROM {partition} JOIN sqlth_te ON tagid = id \
                   WHERE tagpath IN {tag} AND (t_stamp >= {start}) AND (t_stamp <= {end})"\
                  .format(partition=partition, tag=tags,
                          start=str(epoch_start_time), end=str(epoch_end_time))

        if addquery is not None:
            query2 += addquery
        if i < (len(partitions) - 1):
            query2 += " UNION "
        else:  # finish query
            query2 += " ORDER BY t_stamp"
    if verbose:
        print(query2)
    with pymssql.connect(server, user, password, database) as conn:
        with conn.cursor() as cursor:
            cursor.execute(query2)
            data = pd.DataFrame(
                cursor.fetchall(),
                columns=['t_stamp', 'tagpath', 'floatvalue', 'intvalue'])

    timeit.split('data queried', report=verbose)

    # =============================================================================
    # Clean up and reformat the returned data
    # =============================================================================
    # exit if no data was returned
    if data.shape[0] * data.shape[1] == 0:
        print(
            '*** ignition_query alert: No results were returned for the given date-time range and tags ***'
        )
        print(
            'Values are only recorded when they change outside their deadbands. Try querying a larger date-time range.'
        )
        return

    # Creates a column labeled value and fills it with either the Floatvalue or the Intvalue by Summing together
    datana = data.fillna(0)
    data['value'] = datana['floatvalue'] + datana['intvalue']

    #Drops the floatvalue and intvalue columns now that value is the only one needed (may not be necessary)
    data = data.drop(['floatvalue', 'intvalue'], axis=1)

    # Note that if querying the same data from ignition the times may be 1 second off
    # since it appears ignition converts to an int instead of rounding to the nearest second.
    # We will be rounding to the nearest second for more accurate data.
    # Time is also converted back from ms to s.
    data['t_stamp'] = pd.to_datetime(round(data['t_stamp'].astype(float) /
                                           1000),
                                     unit='s',
                                     utc=True)

    data.set_index('t_stamp', drop=True, inplace=True)
    data.index = data.index.tz_convert('America/Denver')
    data.index = data.index.tz_localize(None)
    data['value'] = data['value'].astype(float)

    # reconfigures dataframe to have a timestamp index with tagpath columns fills
    # in the values with floatvalue which now includes the int values.
    # Data assigned to the nearest second (rouded earlier)
    data = data.pivot_table(index=data.index,
                            columns='tagpath',
                            values='value')

    # first forwardfill in NaN values with last, then backfill the beginning of the data if needed
    data.fillna(method='ffill', inplace=True)
    data.fillna(method='bfill', inplace=True)

    # check for missing tags (nothing recorded in the queried time span)
    try:
        requested = set(taglist)
        result = set(data.columns)
        missing = requested - result
        if len(missing) > 0:
            print(
                'WARNING: the following tags are missing from the SQL import')
            for i in missing:
                print(i)
            print(
                'This is possibly caused by the date range too narrow to catch'
            )
            print('a recorded value---occurring when a value is not changing.')
    except KeyError:
        pass

    timeit.split('post-processing complete', report=verbose)

    return data
예제 #13
0
def dbupdate_sqlite(db,
                    datadir,
                    tz='US/Mountain',
                    dropcols=None,
                    selectcols=None,
                    depnames={}):
    """
    Update a sqlite database file with new data or create new if none exists.
        
    Parameters
    ----------
    db : str
        Database file path. 
    datadir : str
        Path of the directory housing the files to be imported. Required if 
        db is a path. Caution, all files will be scanned so only put compatible
        files in this directory.

    Optional Parameters
    -------------------
    tz : str
        The time zone used for the date/time values in data files.
    dropcols : list
        A list of columns to ignore in the source data files.
    selctcols : list
        A list of columns to use; default is all, besides dropcols.
    depnames : dict
        A dictionary used for updating/renaming the column names prior
        to database insert.
    """
    datapath_list = np.asarray(glob.glob(os.path.join(datadir, '*')))
    # check for new data
    # data will only be imported if not in the database already
    # 'import_history' table contains imported files and their date/time ranges

    # import previous file import history and existing column names (if exists)
    if os.path.isfile(db):
        history = set()
        with sqlite3.connect(db) as conn:
            c = conn.cursor()
            c.execute('SELECT "import_history"."file" FROM "import_history"')
            history = set(np.asarray(c.fetchall()).flatten())
            c.execute('PRAGMA table_info("values")')
            exist_cols = {i[1] for i in c.fetchall()}
        # delete entries from datapath_list (globbed) if also exist in history
        # print(datapath_list)
        newmask = np.ones(datapath_list.shape, dtype=bool)
        for i, f in enumerate(datapath_list):
            if os.path.split(f)[1] in history:
                newmask[i] = False
        datapath_list = datapath_list[newmask]
        # print(datapath_list)
    else:
        exist_cols = set()
    # Import New Data
    if len(datapath_list) == 0:
        raise Exception('*** no new files were found to import ***')
    timeit = Timer()
    # import the data
    masterdata = batch_read(datapath_list,
                            dropcols=dropcols,
                            selectcols=selectcols,
                            depricated_names=depnames,
                            tz=tz)

    # make a list of imported files to add to the db history table
    filelist = []
    for file in datapath_list:
        filelist.append(os.path.split(file)[1])
    filedata = pd.DataFrame(filelist, columns=['file'])

    timeit.split(report=False)

    # Export to SQL
    # check for new colums and add them if needed
    new_cols = list(set(masterdata.columns.values) - exist_cols)

    if os.path.isfile(db):
        with sqlite3.connect(db) as conn:
            c = conn.cursor()
            for i, n in enumerate(new_cols):
                if 'int' in str(masterdata[n].dtype):
                    dtp = 'INT'
                elif 'float' in str(masterdata[n].dtype):
                    dtp = 'FLOAT'
                elif 'str' in str(masterdata[n].dtype):
                    dtp = 'VARCHAR(255)'
                else:
                    dtp = ''
                c.execute('ALTER TABLE "values" ADD "{0}" {1}'.format(n, dtp))

    # change index to unix time integers (for speed when querying later)
    masterdata.index = (masterdata.index.values.astype(int) / 1e9).astype(int)

    #TODO add functionality for using MySQL
    #engine = create_engine('mysql+pymysql://root:root@localhost:8889/'+switch)
    engine = create_engine('sqlite:///' + db)
    masterdata.to_sql("values",
                      engine,
                      if_exists='append',
                      index=True,
                      index_label='date_time')
    filedata.to_sql("import_history", engine, if_exists='append', index=False)

    timeit.split('elapsed for SQL insert')
예제 #14
0
from timing import Timer, Argument

mpiProcs = Argument ("mpirun -np %d", extent = [1, 2, 4, 8], prepend = True, processes = True, runOnly = "-machinefile $HOSTFILE -genv I_MPI_FABRICS shm:ofa")
maxThreads = Argument ("-V parallel.maxthreads %d", extent = [1, 4, 8], threads = True)

timer = Timer ("isces", 
               mpiProcs,
               maxThreads, 
               Argument ("-V parallel.transform.threads %d", extent = [1, 4], upperBound = maxThreads),
               Argument ("-V grid.z.points %d", value = 256) * mpiProcs,
               setupCommandRoot = "isces_init", 
               directory = "../../run", 
               commandArgs = ["-D2"],
               uniques = [Argument ("-V input.file input_%03d_%%02i")])

results = timer.calculateTimes (torque = True, iterations = 8, hours = 4)
예제 #15
0
def _init_timer(time_budgets):
    timer = Timer()
    timer.add_process('train_predict', time_budgets, timing.RESET)
    LOGGER.debug(f"init time budget of train_predict: {time_budgets} "
                 f"mode: {timing.RESET}")
    return timer
예제 #16
0
from telebot import apihelper
from peewee import DoesNotExist
from functools import wraps
import logging
import pickle

import telebot
import config
from mwt import MWT
from config import db
from users import User, Player, Challenge, Role
from timing import Timer

default_duration = 12 * 60

timer = Timer(name='Round')
timer.set_duration(default_duration)

saveTimer = Timer(name='saver')
saveTimer.set_duration(1 * 60)

currentRound = 0
FILENAME = 'time.dat'

logger = logging.getLogger('bot')
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    level=logging.INFO)
logger.setLevel(logging.DEBUG)

# To store chosen groups. Group: recipient
transfers = {}
speedtags = [
    'LHR weigh-belt speed', 'LHR pug-mill speed', 'LHR cross-feeder speed',
    'LHR PSF speed'
]

# times when cablevey was started
idx = np.nonzero(
    np.diff(np.array(rawprocess['LHR cablevey load'] > 0.5, dtype=int)) ==
    1)[0]
cablevey_startup = rawprocess.index[idx]

# make new df for filter tags and then smooth
print('smoothing raw process data...')
# smooth filter
t = Timer()
proc_smooth = df_smooth(rawprocess[tags], smoothingWindowSize)
t.split('smoothing complete')
"""
Check speed variations across runs. This may be a problem with applying
phasing, since the pug mill and maybe other equipment were messed with 
in the middle of the runs. The speed settings on the pug mill are 
different for three runs and two are constant, so a run-specific phasing
delay may be applied (see below).
"""
if False:
    plt.close('all')
    fig, axs = plt.subplots(len(speedtags))
    fig.set_size_inches(12, 4)
    fig.set_tight_layout(True)
    for run in runs: