Example #1
0
 def _transaction(self, cmd, value=None):
     # normal transaction: send command, receive response
     if Channel.Perf: ts = _timer()
     self._sendCmd(cmd, value)
     r = _receive_dictio(self.sock, self.hostPort)
     if Channel.Perf: print('transaction time: %.5f' % (_timer() - ts))
     # _printd(f'reply from channel {self.name}: {r}')
     return r
Example #2
0
 def _run(self, args):
     with self._std_err_out() as (stderr, stdout):
         start = _timer()
         subp  = subprocess.Popen \
             ( args
             , env     = dict (sos.environ)
             , stderr  = stderr
             , stdout  = stdout
             )
         subp.wait()
         self.time = _timer() - start
     return self.resulting_error, self.resulting_output
Example #3
0
 def _run (self, args) :
     with self._std_err_out () as (stderr, stdout) :
         start = _timer ()
         subp  = subprocess.Popen \
             ( args
             , env     = dict (sos.environ)
             , stderr  = stderr
             , stdout  = stdout
             )
         subp.wait ()
         self.time = _timer () - start
     return self.resulting_error, self.resulting_output
Example #4
0
    def __process_task  ( self , task , chunks , **kwargs ) :
        """Helper internal method to process the task with chunks of data 
        """
            
        from timeit import  default_timer as _timer
        start = _timer()

        ## inialize the task
        task.initialize_local ()
        
        ## mergers for statistics 
        merged_stat    = StatMerger ()
        merged_stat_pp = StatMerger ()

        ## start index for jobs
        index = 0 

        ## total number of jobs 
        njobs = sum  ( len ( c ) for c in chunks ) 
        from ostap.utils.progress_bar import ProgressBar
        with ProgressBar ( max_value = njobs , silent = self.silent ) as bar :

            while chunks :

                chunk = chunks.pop ( 0 ) 
                
                jobs_args = zip ( repeat ( task ) , count ( index ) , chunk )

                for jobid , result , stat in self.iexecute ( task_executor    ,
                                                             jobs_args        ,
                                                             progress = False ) :

                    ## merge statistics 
                    merged_stat += stat

                    ## merge/collect resuls
                    task.merge_results ( result , jobid )

                    bar += 1 

                index           += len ( chunk )
                
                pp_stat = self.get_pp_stat() 
                if pp_stat : merged_stat_pp  += pp_stat 

        ## finalize the task 
        task.finalize () 
        self.print_statistics ( merged_stat_pp , merged_stat , _timer() - start )
        ## 
        return task.results ()
Example #5
0
    def __process(self, task, chunks, **kwargs):
        """Helper internal method to process the task with chunks of data 
        """

        from timeit import default_timer as _timer
        start = _timer()

        if isinstance(task, Task):
            kwargs.pop('merger', None)
            return self.__process_task(task, chunks, **kwargs)

        ## mergers for statistics
        merged_stat = StatMerger()
        merged_stat_pp = StatMerger()
        merger = kwargs.pop('merger', TaskMerger())

        njobs = sum(len(c) for c in chunks)
        from ostap.utils.progress_bar import ProgressBar
        with ProgressBar(max_value=njobs, silent=self.silent) as bar:

            while chunks:

                chunk = chunks.pop()

                from itertools import repeat, count
                jobs_args = zip(repeat(task), count(), chunk)

                self.pool.restart(True)
                jobs = self.pool.uimap(func_executor, jobs_args)
                del jobs_args

                for result, stat in jobs:
                    bar += 1
                    merged_stat += stat
                    merger += result

                    del result
                    del stat

                merged_stat_pp += self.get_pp_stat()
                self.pool.close()
                self.pool.join()

        ## finalize task
        what.finalize()
        self.print_statistics(merged_stat_pp, merged_stat, _timer() - start)
        ##
        return merger.results
Example #6
0
def _main (cao) :
    cao_path       = list (cao.path or [])
    summary        = new_summary ()
    py_executables = [sys.executable] + list (cao.Extra_Interpreters)
    py_options     = sos.python_options ()
    py_vs          = "[Python %s]" % ", ".join \
        (sorted (py_version (pyx) for pyx in py_executables))
    if cao.RExclude :
        x_pat      = Regexp (cao.RExclude)
        exclude    = x_pat.search
    elif cao.exclude :
        exclude    = lambda a : fnmatch (a, cao.exclude)
    else :
        exclude    = lambda a : False
    run_dir = run_app_tests_dir_transitive \
        if cao.transitive else run_app_tests_dir
    start = _timer ()
    for a in cao.argv :
        runner = run_dir if sos.path.isdir (a) else run_app_tests_mod
        runner (a, exclude, py_executables, py_options, summary)
    if cao.summary :
        et = " in %7.5fs" % (_timer () - start, )
        if summary.failed :
            fmt = "%(argv)s fails %(f)s of %(t)s app-tests in %(cases)s test-cases%(et)s %(py_vs)s"
        else :
            fmt = "%(argv)s passes all of %(t)s app-tests in %(cases)s test-cases%(et)s %(py_vs)s"
        print ("=" * 79, file = sys.stderr)
        print \
            ( fmt % TFL.Caller.Scope
                ( argv   = " ".join (cao.argv)
                , cases  = summary.cases
                , et     = et
                , f      = summary.failed
                , t      = summary.total
                )
            , file = sys.stderr
            )
        print \
            ( "    %s"
            % ("\n    ".join ("%-68s : %s" % f for f in summary.failures))
            , file = sys.stderr
            )
        if summary.excluded :
            print \
                ("    %s excluded" % (", ".join (summary.excluded), )
                , file = sys.stderr
                )
Example #7
0
def _main (cao) :
    cao_path       = list (cao.path or [])
    summary        = new_summary ()
    py_executables = [sys.executable] + list (cao.Extra_Interpreters)
    py_options     = sos.python_options ()
    py_vs          = "[Python %s]" % ", ".join \
        (sorted (py_version (pyx) for pyx in py_executables))
    if cao.RExclude :
        x_pat      = Regexp (cao.RExclude)
        exclude    = x_pat.search
    elif cao.exclude :
        exclude    = lambda a : fnmatch (a, cao.exclude)
    else :
        exclude    = lambda a : False
    run_dir = run_app_tests_dir_transitive \
        if cao.transitive else run_app_tests_dir
    start = _timer ()
    for a in cao.argv :
        runner = run_dir if sos.path.isdir (a) else run_app_tests_mod
        runner (a, exclude, py_executables, py_options, summary)
    if cao.summary :
        et = " in %7.5fs" % (_timer () - start, )
        if summary.failed :
            fmt = "%(argv)s fails %(f)s of %(t)s app-tests in %(cases)s test-cases%(et)s %(py_vs)s"
        else :
            fmt = "%(argv)s passes all of %(t)s app-tests in %(cases)s test-cases%(et)s %(py_vs)s"
        print ("=" * 79, file = sys.stderr)
        print \
            ( fmt % TFL.Caller.Scope
                ( argv   = " ".join (cao.argv)
                , cases  = summary.cases
                , et     = et
                , f      = summary.failed
                , t      = summary.total
                )
            , file = sys.stderr
            )
        print \
            ( "    %s"
            % ("\n    ".join ("%-68s : %s" % f for f in summary.failures))
            , file = sys.stderr
            )
        if summary.excluded :
            print \
                ("    %s excluded" % (", ".join (summary.excluded), )
                , file = sys.stderr
                )
Example #8
0
    def __process_task(self, task, chunks, **kwargs):
        """Helper internal method to process the task with chunks of data 
        """
        assert isinstance(task, Task), 'Invalid task type  %s' % type(task)

        from timeit import default_timer as _timer
        start = _timer()

        ## inialize the task
        task.initialize_local()

        ## mergers for statistics
        merged_stat = StatMerger()
        merged_stat_pp = StatMerger()

        njobs = sum(len(c) for c in chunks)
        from ostap.utils.progress_bar import ProgressBar
        with ProgressBar(max_value=njobs, silent=self.silent) as bar:

            while chunks:

                chunk = chunks.pop()

                from itertools import repeat, count
                jobs_args = zip(repeat(task), count(), chunk)

                self.pool.restart(True)
                jobs = self.pool.uimap(task_executor, jobs_args)
                del jobs_args

                for result, stat in jobs:
                    bar += 1
                    merged_stat += stat
                    task.merge_results(result)

                    del result
                    del stat

                merged_stat_pp += self.get_pp_stat()
                self.pool.close()
                self.pool.join()

        task.finalize()
        self.print_statistics(merged_stat_pp, merged_stat, _timer() - start)
        ##
        return task.results()
Example #9
0
    def __exit__(self, *_):
        self.stop = _timer()
        self.delta = self.stop - self.start
        try:
            message = self.format % (self.name, self.delta)
        except TypeError:
            message = 'Timing %-18s %s' % (self.name, self.delta)

        self.logger(message)
Example #10
0
        def _timeit(*args, **kwargs):
            # Temporarily turn off garbage collection during the timing.
            # Makes independent timings more comparable.
            # If it was originally enabled, switch it back on afterwards.
            gcold = gc.isenabled()
            gc.disable()

            try:
                # Outer loop - the number of repeats.
                trials = []
                for _ in _repeat(repeat):
                    # Inner loop - the number of calls within each repeat.
                    total = 0
                    for _ in _repeat(number):
                        start = _timer()
                        result = func(*args, **kwargs)
                        end = _timer()
                        total += end - start
                    trials.append(total)

                # We want the *average time* from the *best* trial.
                # For more on this methodology, see the docs for
                # Python's `timeit` module.
                #
                # "In a typical case, the lowest value gives a lower bound
                # for how fast your machine can run the given code snippet;
                # higher values in the result vector are typically not
                # caused by variability in Python’s speed, but by other
                # processes interfering with your timing accuracy."
                best = min(trials) / number
                print("Best of {} trials with {} function"
                      " calls per trial:".format(repeat, number))
                print(
                    "Function `{}` ran in average"
                    " of {:0.3f} seconds.".format(func.__name__, best),
                    end="\n\n",
                    file=file,
                )
            finally:
                if gcold:
                    gc.enable()
            # Result is returned *only once*
            return result
  def __exit__(self, exc_type, exc_val, exc_tb):
    if (Timelimit.interrupted_flag):
      print(
        'The time limit of', self.seconds, 'seconds was reached. '
        'The results may be incomplete or inaccurate.',
        file=sys.stderr)
      if __debug__:
        print('INFO:', _timer() - Timelimit.interrupted_flag,
          'seconds between interruption and program termination.',
          file=sys.stderr)

    assert Timelimit.interrupted_flag is not None or not self.seconds
    Timelimit.interrupted_flag = None
Example #12
0
def time_block (fmt = "Execution time: %s", out = None, cb = None) :
    """Context manager measuring the execution time for a block.

       After finishing the block, `cb` will be called with the arguments
       `start`, `finish`, and `delta`, if specified.

       Otherwise, `time_block` will use `fmt` to write the execution time to
       sys.stdout.
    """
    start  = _timer ()
    yield
    finish = _timer ()
    delta  = finish - start
    if cb is not None :
        cb (start, finish, delta)
    else :
        from _TFL.pyk import pyk
        try :
            msg = fmt % (delta, )
        except (TypeError, ValueError) as exc :
            msg = "%s: %s" % (fmt, delta)
        pyk.fprint (msg, file = out)
Example #13
0
def time_block(fmt="Execution time: %s", out=None, cb=None):
    """Context manager measuring the execution time for a block.

       After finishing the block, `cb` will be called with the arguments
       `start`, `finish`, and `delta`, if specified.

       Otherwise, `time_block` will use `fmt` to write the execution time to
       sys.stdout.
    """
    start = _timer()
    yield
    finish = _timer()
    delta = finish - start
    if cb is not None:
        cb(start, finish, delta)
    else:
        from _TFL.pyk import pyk
        try:
            msg = fmt % (delta, )
        except (TypeError, ValueError) as exc:
            msg = "%s: %s" % (fmt, delta)
        pyk.fprint(msg, file=out)
Example #14
0
    def run_pipeline(self):
        # Uses a special logger for logging profiling information.
        logger = logging.getLogger("numba.pipeline.profiler")
        ast = self.ast
        talpha = _timer() # for profiling complete pipeline
        for method_name in self.order:
            ts = _timer() # for profiling individual stage
            if __debug__ and logger.getEffectiveLevel() < logging.DEBUG:
                stage_tuple = (method_name, utils.ast2tree(ast))
                logger.debug(pprint.pformat(stage_tuple))

            self._current_pipeline_stage = method_name
            ast = getattr(self, method_name)(ast)

            te = _timer() #  for profiling individual stage
            logger.info("%X pipeline stage %30s:\t%.3fms",
                        id(self), method_name, (te - ts) * 1000)

        tomega = _timer() # for profiling complete pipeline
        logger.info("%X pipeline entire:\t\t\t\t\t%.3fms",
                    id(self), (tomega - talpha) * 1000)

        return self.func_signature, self.symtab, ast
    def __exit__(self, exc_type, exc_val, exc_tb):
        if self.interrupted_flag:
            print(
                'The time limit of {} seconds was reached. The results may be '
                'incomplete or inaccurate.'.format(self.seconds),
                file=sys.stderr)
            if _timer:
                print('INFO:',
                      _timer() - self.interrupted_flag,
                      'seconds between interruption and program termination.',
                      file=sys.stderr)

        assert self.interrupted_flag is not None or not self.seconds
        self.interrupted_flag = None
Example #16
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        if (Timelimit.interrupted_flag):
            print('The time limit of',
                  self.seconds, 'seconds was reached. '
                  'The results may be incomplete or inaccurate.',
                  file=sys.stderr)
            if __debug__:
                print('INFO:',
                      _timer() - Timelimit.interrupted_flag,
                      'seconds between interruption and program termination.',
                      file=sys.stderr)

        assert Timelimit.interrupted_flag is not None or not self.seconds
        Timelimit.interrupted_flag = None
Example #17
0
    def __process_func ( self , task , chunks  , **kwargs ) :
        """Helper internal method for parallel processiing of
        the plain function with chunks of data
        """
        from ostap.utils.cidict import cidict
        my_args = cidict( kwargs )
        
        from timeit import default_timer as _timer
        start = _timer()
        
        init      = my_args.pop ( 'init'      , None )
        merger    = my_args.pop ( 'merger'    , None )
        collector = my_args.pop ( 'collector' , None )
        
        ## mergers for statistics & results
        if   not merger and not collector :
            logger.warning ( "Neither ``merger'' nor ``collector'' are specified for merging!")
        elif     merger and     collector :
            logger.warning ( "Both    ``merger'' and ``collector'' are specified for merging!")
            
        ## mergers for statistics 
        merged_stat    = StatMerger ()
        merged_stat_pp = StatMerger ()

        ## start index for the jobs 
        index = 0

        ## initialize the results 
        results = init

        from ostap.utils.progress_bar import ProgressBar
        ## total number of jobs  
        njobs = sum  ( len ( c ) for c in chunks )
        with ProgressBar ( max_value = njobs , silent = self.silent ) as bar :
            
            while chunks :

                chunk = chunks.pop ( 0 ) 
                
                jobs_args = zip ( repeat ( task ) , count ( index ) , chunk )

                ## call for the actual jobs handling method 
                for jobid , result , stat in self.iexecute ( func_executor    ,
                                                             jobs_args        ,
                                                             progress = False ) :
                    
                    merged_stat += stat
                    
                    ## merge results if merger or collector are provided 
                    if   merger    : results = merger    ( results , result ) 
                    elif collector : results = collector ( results , result , jobid )
                    
                    bar += 1 

                index           += len ( chunk )
                
                pp_stat = self.get_pp_stat() 
                if pp_stat : merged_stat_pp  += pp_stat 

        ## print statistics 
        self.print_statistics ( merged_stat_pp , merged_stat , _timer() - start )
        ##
        return results 
Example #18
0
 def __enter__(self):
     self.start = _timer()
     if self.start_message:
         self.logger(self.start_message)
     return self
Example #19
0
def _main (cmd) :
    cmd_path   = list (cmd.path or [])
    replacer   = Re_Replacer (r"\.py[co]", ".py")
    a          = cmd.argv [0]
    et         = ""
    one_arg_p  = len (cmd.argv) == 1 and not sos.path.isdir (a)
    if one_arg_p and not cmd.Extra_Interpreters :
        f              = Filename (a)
        m              = f.base
        py_version     = " [py %s]" % \
            ".".join (str (v) for v in sys.version_info [:3])
        sys.path [0:0] = cmd_path
        mod_path       = f.directory if f.directory else "./"
        if sos.path.exists \
               (Filename ("__init__.py", default_dir = mod_path).name) :
            sys.path [0:0] = [sos.path.join (mod_path, "..")]
        sys.path [0:0] = [mod_path]
        flags = doctest.NORMALIZE_WHITESPACE
        if not cmd.nodiff :
            flags |= doctest.REPORT_NDIFF
        try :
            logging.disable (logging.WARNING)
            start  = _timer ()
            module = __import__ (m)
            module.expect_except = TFL.CAO.expect_except
            cases  = len (getattr (module, "__test__", ())) or 1
            f, t   = doctest.testmod \
                ( module
                , verbose     = cmd.verbose
                , optionflags = flags
                )
            exec_time = _timer () - start
        except KeyboardInterrupt :
            raise
        except Exception as exc :
            exec_time = _timer () - start
            if cmd.timing :
                et = " in %7.5fs" % (exec_time, )
            msg = format_x % (replacer (a), py_version, exc, et)
            print (msg, file = sys.stderr)
            raise
        else :
            format = format_f if f else format_s
            if cmd.timing :
                et = " in %7.5fs" % (exec_time, )
            print (replacer (format % TFL.Caller.Scope ()), file = sys.stderr)
    else :
        py_executables = [sys.executable] + list (cmd.Extra_Interpreters)
        py_version     = ""
        head_pieces    = sos.python_options () + \
            [ __file__
            , "-path %r" % (",".join (cmd_path), ) if cmd_path else ""
            ]
        for opt in ("nodiff", "timing", "verbose") :
            if getattr (cmd, opt) :
                head_pieces.append ("-" + opt)
        head = " ".join (hp for hp in head_pieces if hp)
        if cmd.summary :
            run_cmd = run_command_with_summary
        else :
            run_cmd = run_command
        if cmd.RExclude :
            x_pat   = Regexp (cmd.RExclude)
            exclude = x_pat.search
        elif cmd.exclude :
            exclude = lambda a : fnmatch.fnmatch (a, cmd.exclude)
        else :
            exclude = lambda a : False
        def run_mod (a) :
            if exclude (a) :
                summary.excluded.append (a)
                print ("%s excluded" % (a, ))
            else :
                summary.modules += 1
                for pyx in py_executables :
                    run_cmd ("%s %s %s" % (pyx, head, a))
        def run_mods (d) :
            for f in sorted (sos.listdir_exts (d, ".py")) :
                if has_doctest (f) :
                    run_mod (f)
        if cmd.transitive :
            from _TFL.subdirs import subdirs
            def run_dir (d) :
                run_mods (d)
                for s in subdirs (d) :
                    run_dir (s)
        else :
            run_dir = run_mods
        start = _timer ()
        for a in cmd.argv :
            if sos.path.isdir (a) :
                run_dir (a)
            else :
                if has_doctest (a) :
                    run_mod (a)
        if cmd.summary :
            format = format_f if summary.failed else format_s
            if cmd.timing :
                et = " in %7.5fs" % (_timer () - start, )
            print ("=" * 79, file = sys.stderr)
            print \
                ( format % TFL.Caller.Scope
                    ( f      = summary.failed
                    , module = TFL.Record (__file__ = " ".join (cmd.argv))
                    , t      = summary.total
                    , cases  = summary.cases
                    , et     = et
                    )
                , "[%s/%s modules fail]" %
                    (len (summary.failures), summary.modules)
                , file = sys.stderr
                )
            print \
                ( "    %s"
                % ("\n    ".join ("%-68s : %s" % f for f in summary.failures))
                , file = sys.stderr
                )
            if summary.excluded :
                print \
                    ("    %s excluded" % (", ".join (summary.excluded), )
                    , file = sys.stderr
                    )
Example #20
0
def cluster_filelist_mtimes(filelist):
    """
    Perform a statistical clustering of the timestamps (`mtime` values) of a
    list of files to find "relatively" large gaps in acquisition time. The
    definition of `relatively` depends on the context of the entire list of
    files. For example, if many files are simultaneously acquired,
    the "inter-file" time spacing between these will be very small (near zero),
    meaning even fairly short gaps between files may be important.
    Conversely, if files are saved every 30 seconds or so, the tolerance for
    a "large gap" will need to be correspondingly larger.

    The approach this method uses is to detect minima in the
    `Kernel Density Estimation`_ (KDE) of the file modification times. To
    determine the optimal bandwidth parameter to use in KDE, a `grid search`_
    over possible appropriate bandwidths is performed, using `Leave One Out`_
    cross-validation. This approach allows the method to determine the
    important gaps in file acquisition times with sensitivity controlled by
    the distribution of the data itself, rather than a pre-supposed optimum.
    The KDE minima approach was suggested `here`_.

    .. _Kernel Density Estimation: https://scikit-learn.org/stable/modules/density.html#kernel-density
    .. _grid search: https://scikit-learn.org/stable/modules/grid_search.html#grid-search
    .. _Leave One Out: https://scikit-learn.org/stable/modules/cross_validation.html#leave-one-out-loo
    .. _here: https://stackoverflow.com/a/35151947/1435788


    Parameters
    ----------
    filelist : list
        The files (as a list) whose timestamps will be interrogated to find
        "relatively" large gaps in acquisition time (as a means to find the
        breaks between discrete Acquisition Activities)

    Returns
    -------
    aa_boundaries : list
        A list of the `mtime` values that represent boundaries between
        discrete Acquisition Activities
    """
    _logger.info('Starting clustering of file mtimes')
    start_timer = _timer()
    mtimes = sorted([_os.path.getmtime(f) for f in filelist])

    # remove duplicate file mtimes (since they cause errors below):
    mtimes = sorted(list(set(mtimes)))
    m_array = _np.array(mtimes).reshape(-1, 1)

    # mtime_diff is a discrete differentiation to find the time gap between
    # sequential files
    mtime_diff = [j - i for i, j in zip(mtimes[:-1], mtimes[1:])]

    # Bandwidth to use is uncertain, so do a grid search over possible values
    # from smallest to largest sequential mtime difference (logarithmically
    # biased towards smaller values). we do cross-validation using the Leave
    # One Out strategy and using the total log-likelihood from the KDE as
    # the score to maximize (goodness of fit)
    bandwidths = _np.logspace(_math.log(min(mtime_diff)),
                              _math.log(max(mtime_diff)),
                              35,
                              base=_math.e)
    _logger.info('KDE bandwidth grid search')
    grid = _GridSearchCV(_KernelDensity(kernel='gaussian'),
                         {'bandwidth': bandwidths},
                         cv=_LeaveOneOut(),
                         n_jobs=-1)
    grid.fit(m_array)
    bw = grid.best_params_['bandwidth']
    _logger.info(f'Using bandwidth of {bw:.3f} minutes for KDE')

    # Calculate AcquisitionActivity boundaries by "clustering" the timestamps
    # using KDE using KDTree nearest neighbor estimates, and the previously
    # identified "optimal" bandwidth
    kde = _KernelDensity(kernel='gaussian', bandwidth=bw)
    kde = kde.fit(m_array)
    s = _np.linspace(m_array.min(), m_array.max(), num=len(mtimes) * 10)
    e = kde.score_samples(s.reshape(-1, 1))

    mins = _argrelextrema(e, _np.less)[0]  # the minima indices
    aa_boundaries = [s[m] for m in mins]  # the minima mtime values
    end_timer = _timer()
    _logger.info(f'Detected {len(aa_boundaries) + 1} activities in '
                 f'{end_timer - start_timer:.2f} seconds')

    return aa_boundaries
Example #21
0
def _recvUdp(sock, socketSize):
    """Receive the chopped UDP data"""
    port = sock.getsockname()[1]
    #print(f'>_recvUdp {port} locked: {recvLock.locked()}')
    #with recvLock:
    global retransmitInProgress
    chunks = {sock: {}}
    tryMore = 5  # Max number of allowed lost packets
    ts = _timer()
    ignoreEOD = 3

    def ask_retransmit(offsetSize):
        global retransmitInProgress
        retransmitInProgress = tuple(offsetSize)
        cmd = {'cmd': ('retransmit', offsetSize)}
        _printi(f'Asking to retransmit port {port}: {cmd}')
        sock.sendto(ubjson.dumpb(cmd), addr)

    while tryMore:
        try:
            buf, addr = sock.recvfrom(socketSize)
        #else:#except Exception as e:
        except socket.timeout as e:
            msg = f'Timeout in recvfrom port {port}'
            _printi(msg)
            raise
            # Don not return, raise exception, otherwise the pypet will not recover
            #_printw(msg)
            #return [],0
            #return ('WARNING: '+msg).encode(), 0
            #buf = None
        if buf is None:
            raise RuntimeError(msg)
        size = len(buf) - PrefixLength
        offset = int.from_bytes(buf[:PrefixLength], 'big')  # python3

        #DNP_printi(f'chunk received at port {port}: {offset,size}')

        if size > 0:
            chunks[sock][offset, size] = buf[PrefixLength:]
        if offset > 0 and not retransmitInProgress:
            # expect more chunks to come
            continue

        # check if chunk is EOD, i.e. offset,size = 0,0
        if size == 0:
            ignoreEOD -= 1
            if ignoreEOD >= 0:
                #print(f'premature EOD{ignoreEOD} received from, ignore it')
                continue
            else:
                msg = f'Looks like first chunk is missing at port {port}'
                _printw(msg)
                #This is hard to recover. Give up
                return [], 0
                #return ('WARNING: '+msg).encode(), addr
                # sortedKeys = sorted(chunks[sock])
                # offsetSize = [0,sortedKeys[0][0]]
                # allAssembled = False
                # ask_retransmit(offsetSize)
                # break
        else:
            #print('First chunk received')
            pass

        if retransmitInProgress is not None:
            if (offset, size) in chunks[sock]:
                _printi(f'retransmission received {offset,size}')
            else:
                _printw(
                    f'server failed to retransmit chunk {retransmitInProgress}'
                )
                tryMore = 1
            retransmitInProgress = None

        # last chunk have been received, offset==0, size!=0
        # check for lost  chunks
        sortedKeys = sorted(chunks[sock])
        prev = [0, 0]
        allAssembled = True
        for offset, size in sortedKeys:
            #print('check offset,size:'+str((offset,size)))
            last = prev[0] + prev[1]
            if last != offset:
                l = offset - last
                if l > 65536:
                    msg = f'Lost too many bytes at port {port}: {last,l}, data discarded'
                    _printw(msg)
                    #raise RuntimeError(msg)
                    return [], 0
                    #return 'WARNING: '+msg, addr
                ask_retransmit((last, l))
                allAssembled = False
                break
            prev = [offset, size]

        if allAssembled:
            break
        #print(f'tryMore: {tryMore}')
        tryMore -= 1
    ts1 = _timer()

    if not allAssembled:
        msg = 'Partial assembly of %i frames' % len(chunks[sock])
        #raise BufferError(msg)
        _printw(msg)
        return [], 0
        #return ('WARNING: '+msg).encode(), addr

    data = bytearray()
    sortedKeys = sorted(chunks[sock])
    for offset, size in sortedKeys:
        # _printd('assembled offset,size '+str((offset,size)))
        data += chunks[sock][(offset, size)]
    tf = _timer()
    # if len(data) > 500000:
    # _printd('received %i bytes in %.3fs, assembled in %.6fs'\
    # %(len(data),ts1-ts,tf-ts1))
    #_printi('assembled %i bytes'%len(data))
    return data, addr
Example #22
0
 def __timeout_handler(signum, frame):
     if signum == signal.SIGALRM:
         Timelimit.interrupted_flag = _timer() if __debug__ else True
 def __timeout_handler(cls, signum, frame):
     if signum == signal.SIGALRM:
         if _timer:
             cls.interrupted_flag = _timer()
         else:
             cls.interrupted_flag = True
Example #24
0
def build_acq_activities(instrument, dt_from, dt_to, sample_id,
                         generate_previews):
    """
    Build an XML string representation of each AcquisitionActivity for a
    single microscopy session. This includes setup parameters and metadata
    associated with each dataset obtained during a microscopy session. Unique
    AcquisitionActivities are delimited via clustering of file collection
    time to detect "long" breaks during a session.

    Parameters
    ----------
    instrument : :py:class:`~nexusLIMS.instruments.Instrument`
        One of the NexusLIMS instruments contained in the
        :py:attr:`~nexusLIMS.instruments.instrument_db` database.
        Controls what instrument calendar is used to get events.
    dt_from : datetime.datetime
        The starting timestamp that will be used to determine which files go
        in this record
    dt_to : datetime.datetime
        The ending timestamp used to determine the last point in time for
        which files should be associated with this record
    sample_id : str
        An identifier for the sample from which data was collected
    generate_previews : bool
        Whether or not to create the preview thumbnail images

    Returns
    -------
    acq_activities : str
        A string representing the XML output for each AcquisitionActivity
        associated with a given reservation/experiment on a microscope.

    activities : :obj:`list` of :obj:`~nexusLIMS.schemas.activity.AcquisitionActivity`:
        The list of :py:class:`~nexusLIMS.schemas.activity.AcquisitionActivity`
        objects generated for the record
    """
    _logging.getLogger('hyperspy.io_plugins.digital_micrograph').setLevel(
        _logging.WARNING)

    start_timer = _timer()
    path = _os.path.abspath(
        _os.path.join(_os.environ['mmfnexus_path'], instrument.filestore_path))
    # find the files to be included
    files = get_files(path, dt_from, dt_to)

    # remove all files but those supported by nexusLIMS.extractors
    files = [
        f for f in files if _os.path.splitext(f)[1].strip('.') in _ext.keys()
    ]

    end_timer = _timer()
    _logger.info(f'Found {len(files)} files in'
                 f' {end_timer - start_timer:.2f} seconds')

    # return a string indicating no files found if none were found
    if len(files) == 0:
        raise FileNotFoundError('No files found in this time range')

    # get the timestamp boundaries of acquisition activities
    aa_bounds = cluster_filelist_mtimes(files)

    # add the last file's modification time to the boundaries list to make
    # the loop below easier to process
    aa_bounds.append(_os.path.getmtime(files[-1]))

    activities = [None] * len(aa_bounds)

    i = 0
    aa_idx = 0
    while i < len(files):
        f = files[i]
        mtime = _os.path.getmtime(f)

        # check this file's mtime, if it is less than this iteration's value
        # in the AA bounds, then it belongs to this iteration's AA
        # if not, then we should move to the next activity
        if mtime <= aa_bounds[aa_idx]:
            # if current activity index is None, we need to start a new AA:
            if activities[aa_idx] is None:
                start_time = _datetime.fromtimestamp(mtime)
                activities[aa_idx] = _AcqAc(start=start_time)

            # add this file to the AA
            _logger.info(
                f'Adding file {i}/{len(files)} '
                f'{f.replace(_os.environ["mmfnexus_path"], "").strip("/")} '
                f'to activity {aa_idx}')
            activities[aa_idx].add_file(f, generate_previews)
            # assume this file is the last one in the activity (this will be
            # true on the last iteration where mtime is <= to the
            # aa_bounds value)
            activities[aa_idx].end = _datetime.fromtimestamp(mtime)
            i += 1
        else:
            # this file's mtime is after the boundary and is thus part of the
            # next activity, so increment AA counter and reprocess file (do
            # not increment i)
            aa_idx += 1

    acq_activities_str = ''
    _logger.info('Finished detecting activities')
    for i, a in enumerate(activities):
        # aa_logger = _logging.getLogger('nexusLIMS.schemas.activity')
        # aa_logger.setLevel(_logging.ERROR)
        _logger.info(f'Activity {i}: storing setup parameters')
        a.store_setup_params()
        _logger.info(f'Activity {i}: storing unique metadata values')
        a.store_unique_metadata()

        acq_activities_str += a.as_xml(i,
                                       sample_id,
                                       indent_level=1,
                                       print_xml=False)

    return acq_activities_str, activities
 def __timeout_handler(signum, frame):
   if signum == signal.SIGALRM:
     Timelimit.interrupted_flag = _timer() if __debug__ else True
Example #26
0
            if self.verbose:
                print('Number of iterations to converge: {}'.format(ct_iter))
                print('Finished detrending spectra {}/{}'.format(
                    ct + 1, sig_n_to_detrend))

        return baseline_output


if __name__ == '__main__':  # pragma: no cover

    x = _np.linspace(-100, 100, 1000)
    y = 10 * _np.exp(-(x**2 / (2 * 20**2)))

    rng = _np.arange(200, 800)
    asym_vec = 0 * x + 1e-7
    fix_rng = _np.arange(600)

    Y = _np.dot(_np.ones((200, 1)), y[None, :])

    als = AlsCvxopt(use_prev=False)
    tmr = _timer()
    y_als = als.calculate(Y)
    tmr -= _timer()
    print('Time with cold start: {:1.3f} sec'.format(-tmr))

    als = AlsCvxopt(use_prev=True)
    tmr = _timer()
    y_als = als.calculate(Y)
    tmr -= _timer()
    print('Time with warm start: {:1.3f} sec'.format(-tmr))