示例#1
0
def higher_request(start_time, bbox, db, level=0):
    """ Try to insert all photos in this region into db by potentially making
    recursing call, eventually to lower_request when the region accounts for
    less than 4000 photos. """
    if level > 20:
        logging.warn("Going too deep with {}.".format(bbox))
        return 0

    _, total = make_request(start_time, bbox, 1, need_answer=True,
                            max_tries=10)
    if level == 0:
        logging.info('Aiming for {} photos'.format(total))
    if total > 4000:
        photos = 0
        start = clock()
        quads = split_bbox(bbox)
        for q in quads:
            photos += higher_request(start_time, q, db, level+1)
        logging.info('Finish {}: {} photos in {}s'.format(bbox, photos,
                                                          clock()-start))
        return photos
    if total > 5:
        return lower_request(start_time, bbox, db, total/PER_PAGE + 1)
    logging.warn('Cannot get any photos in {}.'.format(bbox))
    return 0
示例#2
0
def S1():
    var('x,y,z')
    f = (x+y+z+1)**7
    t1 = clock()
    g = expand(f*(f+1))
    t2 = clock()
    return t2 - t1
示例#3
0
文件: ServeNN.py 项目: daureg/illalla
def perform_search(from_city, to_city, region, metric):
    start = clock()
    for res, _, progress in nb.best_match(from_city, to_city, region, 900,
                                          progressive=True,
                                          metric=metric):
        # print(progress)
        try:
            distance, r_vids, center, radius = res
        except (ValueError, TypeError):
            import json
            desc = {"type": "Feature", "properties":
                    {"nb_venues": len(res),
                     "venues": res,
                     "origin": from_city},
                    "geometry": region}
            with open('scratch.json', 'a') as out:
                out.write(json.dumps(desc, sort_keys=True, indent=2,
                                     separators=(',', ': '))+'\n')
            return
        if len(center) == 2:
            center = c.euclidean_to_geo(to_city, center)
        relevant = {'dst': distance, 'radius': radius, 'center': center,
                    'nb_venues': len(r_vids)}
        SEARCH_STATUS.update(dict(seen=False, progress=progress, res=relevant))
    print("done search in {:.3f}".format(clock() - start))
    SEARCH_STATUS.update(dict(seen=False, progress=1.0, done=True,
                              res=relevant))
示例#4
0
def S3a():
    var('x,y,z')
    f = expand((x**y + y**z + z**x)**500)
    t1 = clock()
    g = f.diff(x)
    t2 = clock()
    return t2 - t1
示例#5
0
文件: torture.py 项目: fperez/sympy
def testit(line):
    print line
    t1 = clock()
    exec line
    t2 = clock()
    elapsed = t2-t1
    print "Time:", elapsed, "for", line, "(OK)"
示例#6
0
文件: HeadTail.py 项目: robj137/Fun
def CreateGraph(textFile, nLetters):
  pickleFileName = 'wordList'+str(nLetters)+'.pkl'
  try:
    wordList = pickle.load(open(pickleFileName,'r'))
    return wordList
  except:
    start = clock()
    inFile = open(textFile, 'r')
    wordList = {}
    for word in inFile:
      word = word.rstrip().lower()
      if len(word) == nLetters:
        wordList[word] = [] #good, ignores duplicates
    maimed = {}
    for word in wordList:
      for altWord in GetMaimedWords(word):
        if altWord not in maimed:
          maimed[altWord] = []
        maimed[altWord].append(word)
    print 'Time taken = %.5f' %(clock()-start)
    for word in wordList:
      for maimWord in GetMaimedWords(word):
        for altWord in maimed[maimWord]:
          if altWord not in wordList[word] and altWord != word:
            wordList[word].append(altWord)
    print 'Created pre-processed graph in %.5f seconds' % (clock()-start)
    pickle.dump(wordList, open(pickleFileName, 'w'))
    return wordList
示例#7
0
 def _compute_rpi(self):
     E = self.Etrain
     N = self.N
     G = {u: set() for u in range(N)}
     for v, u in E:
         G[u].add(v)
     cst = np.log(self.beta/(1-self.beta))
     incoming = [[] if len(G[i]) == 0 else sorted(G[i]) for i in range(N)]
     wincoming = [np.array([self.lambda_1 * E[(j, i)] for j in ins])
                  for i, ins in enumerate(incoming)]
     pi = self.beta*np.ones(N)
     sstart = clock()
     nb_iter, eps = 0, 1
     while eps > self.tol and nb_iter < self.n_iters:
         next_pi = np.zeros_like(pi)
         for i in range(N):
             if not incoming[i]:
                 continue
             pij = pi[incoming[i]]
             ratio = pij/(1 + np.exp(-cst - wincoming[i]))
             opp_prod = np.exp(np.log(1-pij).sum())
             next_pi[i] = (ratio.sum() + self.beta*opp_prod)/(pij.sum() + opp_prod)
         eps = (np.abs(next_pi - pi)/pi).mean()
         pi = next_pi.copy()
         nb_iter += 1
     self.time_taken += clock() - sstart
     self.rpi = pi
示例#8
0
def R7():
    var('x')
    f = x**24+34*x**12+45*x**3+9*x**18 +34*x**10+ 32*x**21
    t1 = clock()
    a = [f.subs({x: random()}) for _ in xrange(10^4)]
    t2 = clock()
    return t2 - t1
示例#9
0
def S2():
    var("x y z")
    e = (x**sin(x) + y**cos(y) + z**(x + y))**100
    t1 = clock()
    f = e.expand()
    t2 = clock()
    return t2 - t1
示例#10
0
def timing(f, *args, **kwargs):
    """
    Returns time elapsed for evaluating ``f()``. Optionally arguments
    may be passed to time the execution of ``f(*args, **kwargs)``.

    If the first call is very quick, ``f`` is called
    repeatedly and the best time is returned.
    """
    once = kwargs.get('once')
    if 'once' in kwargs:
        del kwargs['once']
    if args or kwargs:
        if len(args) == 1 and not kwargs:
            arg = args[0]
            g = lambda: f(arg)
        else:
            g = lambda: f(*args, **kwargs)
    else:
        g = f
    from timeit import default_timer as clock
    t1=clock(); v=g(); t2=clock(); t=t2-t1
    if t > 0.05 or once:
        return t
    for i in range(3):
        t1=clock();
        # Evaluate multiple times because the timer function
        # has a significant overhead
        g();g();g();g();g();g();g();g();g();g()
        t2=clock()
        t=min(t,(t2-t1)/10)
    return t
示例#11
0
def pairs(name, data, labels=None):
    """ Generate something similar to R `pairs` """
    nvariables = data.shape[1]
    mpl.rcParams["figure.figsize"] = 3.5 * nvariables, 3.5 * nvariables
    if labels is None:
        labels = ["var {}".format(i) for i in range(nvariables)]
    fig = plt.figure()
    s = clock()
    for i in range(nvariables):
        for j in range(i, nvariables):
            nsub = i * nvariables + j + 1
            ax = fig.add_subplot(nvariables, nvariables, nsub)
            ax.tick_params(left="off", bottom="off", right="off", top="off", labelbottom="off", labelleft="off")
            ax.spines["top"].set_visible(False)
            ax.spines["left"].set_linewidth(0.5)
            ax.spines["bottom"].set_linewidth(0.5)
            ax.spines["right"].set_visible(False)
            if i == j:
                ppl.hist(ax, data[:, i], grid="y")
                ax.set_title(labels[i], fontsize=10)
            else:
                ax.set_xlim([data[:, i].min(), data[:, i].max()])
                ax.set_ylim([data[:, j].min(), data[:, j].max()])
                ax.scatter(data[:, i], data[:, j], marker=".", color="k", s=4)
            ax.tick_params(labelbottom="off", labelleft="off")
    print(clock() - s)
    s = clock()
    plt.savefig(name + "_corr.png", dpi=96, transparent=False, frameon=False, bbox_inches="tight", pad_inches=0.1)
    print(clock() - s)
示例#12
0
def main():
   strings = ["key_" + str(i) for i in xrange(1000000)]

   t = clock()
   d = dict.fromkeys(strings, 0)
   print round(clock() - t, 2)
   if len(d) < 200: print d
示例#13
0
文件: __init__.py 项目: AALEKH/sympy
def doctests():
    try:
        import psyco
        psyco.full()
    except ImportError:
        pass
    import sys
    from timeit import default_timer as clock
    filter = []
    for i, arg in enumerate(sys.argv):
        if '__init__.py' in arg:
            filter = [sn for sn in sys.argv[i+1:] if not sn.startswith("-")]
            break
    import doctest
    globs = globals().copy()
    for obj in globs: #sorted(globs.keys()):
        if filter:
            if not sum([pat in obj for pat in filter]):
                continue
        sys.stdout.write(str(obj) + " ")
        sys.stdout.flush()
        t1 = clock()
        doctest.run_docstring_examples(globs[obj], {}, verbose=("-v" in sys.argv))
        t2 = clock()
        print(round(t2-t1, 3))
示例#14
0
def R3():
    var('x,y,z')
    f = x+y+z
    t1 = clock()
    a = [bool(f==f) for _ in range(10)]
    t2 = clock()
    return t2 - t1
示例#15
0
def S3a():
    var("x y z")
    e = (x**y + y**z + z**x)**500
    e = e.expand()
    t1 = clock()
    f = e.diff(x)
    t2 = clock()
    return t2 - t1
示例#16
0
def S1():
    var("x y z")
    e = (x+y+z+1)**7
    f = e*(e+1)
    t1 = clock()
    f = f.expand()
    t2 = clock()
    return t2 - t1
示例#17
0
def run_benchmark(n):
    var("x y z w")
    e = (x + y + z + w)**n
    f = e * (e + w)
    t1 = clock()
    g = f.expand()
    t2 = clock()
    print("%s ms" % (1000 * (t2 - t1)))
示例#18
0
def testit(line):
    if filt in line:
        print(line)
        t1 = clock()
        exec_(line, globals(), locals())
        t2 = clock()
        elapsed = t2-t1
        print("Time:", elapsed, "for", line, "(OK)")
示例#19
0
def run_benchmark(n):
    x, y = symbols("x y")
    e = (1 + sqrt(3) * x + sqrt(5) * y) ** n
    f = e * (e + sqrt(7))
    t1 = clock()
    f = expand(f)
    t2 = clock()
    print("%s ms" % (1000 * (t2 - t1)))
示例#20
0
def spherical_averaging(rad, wt, tau, N_atoms_uc, atom_list, csection, eig_list,
                       temperature, eief = True, efixed = 14.7,thetas=None,phis=None,):
    """
    N_atoms_uc - number of atoms in unit cell
    csection - analytic cross-section expression
    kap - kappa vector
    tau - tau
    eig_list - list of eigenvalues
    wt - omega
    temperature - temperature
    eief - True => E_initial = efixed, False => E_final = efixed
    efixed - fixed energy; either E_final or E_initial, subject to eief
    """
    
    args=(rad, N_atoms_uc, atom_list, csection, tau, eig_list, wt, temperature, eief, efixed)

    theta_test=np.pi/2.0
    phi_test = np.pi/4.0

    thetas = np.linspace(0,np.pi,25) 
    phis = np.linspace(0,2*np.pi,25)
    cs = []
    partial_res=[]
    
    # Form Factor
    ff = 0
    for i in range(N_atoms_uc):
        el = elements[atom_list[i].atomicNum]
        val = atom_list[i].valence
        if val != None:
            Mq = el.magnetic_ff[val].M_Q(rad)
        else:
            Mq = el.magnetic_ff[0].M_Q(rad)
        ff = Mq
    
    start1 = clock()
    for t in thetas:
        temp_cs = []
        for p in phis:
            val = single_cross_section_calc(t,p,*args)*ff*np.sin(t)*rad**2
            temp_cs.append(val)
        cs.append(temp_cs)
    end1 = clock()
    calc_time = end1-start1

    start2 = clock()
    for t in thetas:
        single_res = simps(temp_cs,phis)
        partial_res.append(single_res)
    total_res = simps(partial_res,thetas)

    end2 = clock()
    inte_time = end2-start2
    print 'result', total_res
    print 'calc time', calc_time
    print 'inte time', inte_time
    return total_res
示例#21
0
def inputhook_wx3():
    """Run the wx event loop by processing pending events only.

    This is like inputhook_wx1, but it keeps processing pending events
    until stdin is ready.  After processing all pending events, a call to
    time.sleep is inserted.  This is needed, otherwise, CPU usage is at 100%.
    This sleep time should be tuned though for best performance.
    """
    # We need to protect against a user pressing Control-C when IPython is
    # idle and this is running. We trap KeyboardInterrupt and pass.
    try:
        app = wx.GetApp()
        if app is not None:
            assert wx.Thread_IsMain()

            # The import of wx on Linux sets the handler for signal.SIGINT
            # to 0.  This is a bug in wx or gtk.  We fix by just setting it
            # back to the Python default.
            if not callable(signal.getsignal(signal.SIGINT)):
                signal.signal(signal.SIGINT, signal.default_int_handler)

            evtloop = wx.EventLoop()
            ea = wx.EventLoopActivator(evtloop)
            t = clock()
            while not stdin_ready():
                while evtloop.Pending():
                    t = clock()
                    evtloop.Dispatch()
                app.ProcessIdle()
                # We need to sleep at this point to keep the idle CPU load
                # low.  However, if sleep to long, GUI response is poor.  As
                # a compromise, we watch how often GUI events are being processed
                # and switch between a short and long sleep time.  Here are some
                # stats useful in helping to tune this.
                # time    CPU load
                # 0.001   13%
                # 0.005   3%
                # 0.01    1.5%
                # 0.05    0.5%
                used_time = clock() - t
                if used_time > 5*60.0:
                    # print 'Sleep for 5 s'  # dbg
                    time.sleep(5.0)
                elif used_time > 10.0:
                    # print 'Sleep for 1 s'  # dbg
                    time.sleep(1.0)
                elif used_time > 0.1:
                    # Few GUI events coming in, so we can sleep longer
                    # print 'Sleep for 0.05 s'  # dbg
                    time.sleep(0.05)
                else:
                    # Many GUI events coming in, so sleep only very little
                    time.sleep(0.001)
            del ea
    except KeyboardInterrupt:
        pass
    return 0
示例#22
0
def R2():
    def hermite(n,y):
        if n == 1: return 2*y
        if n == 0: return 1
        return expand(2*y*hermite(n-1,y) - 2*(n-1)*hermite(n-2,y))
    t1 = clock()
    hermite(15,var('y'))
    t2 = clock()
    return t2 - t1
示例#23
0
def run_cross_section(interactionfile, spinfile):
    start = clock()

    # Generate Inputs
    atom_list, jnums, jmats,N_atoms_uc=readFiles(interactionfile,spinfile)
    
    atom_list=atom_list[:N_atoms_uc]
    N_atoms = len(atom_list)

    kx = sp.Symbol('kx', real = True)
    ky = sp.Symbol('ky', real = True)
    kz = sp.Symbol('kz', real = True)
    k = spm.Matrix([kx,ky,kz])
    
    (b,bd) = generate_b_bd_operators(atom_list)
    (a,ad) = generate_a_ad_operators(atom_list, k, b, bd)
    (Sp,Sm) = generate_Sp_Sm_operators(atom_list, a, ad)
    (Sa,Sb,Sn) = generate_Sa_Sb_Sn_operators(atom_list, Sp, Sm)
    (Sx,Sy,Sz) = generate_Sx_Sy_Sz_operators(atom_list, Sa, Sb, Sn)
    print ''
    
    #Ham = generate_Hamiltonian(N_atoms, atom_list, b, bd)
    ops = generate_possible_combinations(atom_list, [Sx,Sy,Sz])
    ops = holstein(atom_list, ops)
    ops = apply_commutation(atom_list, ops)
    ops = replace_bdb(atom_list, ops)

    ops = reduce_options(atom_list, ops)
    list_print(ops)
    
    print "prelims complete. generating cross-section","\n"

    aa = bb = cc = np.array([2.0*np.pi], 'Float64')
    alpha = beta = gamma = np.array([np.pi/2.0], 'Float64')
    vect1 = np.array([[1,0,0]])
    vect2 = np.array([[0,0,1]])
    lattice = Lattice(aa, bb, cc, alpha, beta, gamma, Orientation(vect1, vect2))
    
    tau_list = []
    for i in range(1):
        tau_list.append(np.array([0,0,0], 'Float64'))

    h_list = np.linspace(0.1,6.3,50)
    k_list = np.zeros(h_list.shape)
    l_list = np.zeros(h_list.shape)
    
    w_list = np.linspace(-10,10,50)

    (N_atoms_uc,csection,kaprange,
     tau_list,eig_list,kapvect,wtlist,fflist) = generate_cross_section(interactionfile, spinfile, lattice, ops, 
                                                                tau_list, h_list, k_list, l_list, w_list)
    print csection

    return N_atoms_uc,csection,kaprange,tau_list,eig_list,kapvect,wtlist,fflist
    end = clock()
    print "\nFinished %i atoms in %.2f seconds" %(N_atoms,end-start)
示例#24
0
文件: HeadTail.py 项目: robj137/Fun
def main(head = 'head', tail = 'tail'):
  head = head.lower()
  tail = tail.lower()
  wordList = CreateGraph('/usr/share/dict/words', len(head))
  start = clock()
  
  theSum, path = dijkstra(wordList, head,tail)
  print 'The path is', path
  print 'with length', len(path.split('-'))-1
  print 'Time taken = %.5f' %(clock()-start)
示例#25
0
文件: treestar.py 项目: daureg/magnet
def full_treestar(G, E, k):
    root = max(G.items(), key=lambda x: len(x[1]))[0]
    start = clock()
    (gold, pred), m = treestar(G, E, k, root)
    end = clock() - start
    C = confusion_matrix(gold, pred)
    fp, tn = C[0, 1], C[0, 0]
    return [accuracy_score(gold, pred),
            f1_score(gold, pred, average='weighted', pos_label=None),
            matthews_corrcoef(gold, pred), fp/(fp+tn), end, 1-len(pred)/m]
示例#26
0
    def spinwait(self, time_to_wait=0.05):
        now = clock()
        job_index = 0
        slot_freed = False
        for tuple in self.jobs:
            if tuple != None:
                (p, command, tester, start_time, f, slots) = tuple
                # Look for completed tests
                if p.poll() != None:
                    if not self.checkOutputReady(tester):
                        # Here we'll use the files_not_ready set to start a new timer
                        # for waiting for files so that we can do somthing else while we wait
                        if tester not in self.files_not_ready:
                            self.jobs[job_index] = (p, command, tester, clock(), f, slots)
                            self.files_not_ready.add(tester)
                        # We've been waiting for awhile, let's read what we've got
                        # and send it back to the tester.
                        elif now > (start_time + self.default_reporting_time):
                            # Report
                            self.returnToTestHarness(job_index)
                            self.reported_timer = now
                            slot_freed = True

                    # Output is ready
                    else:
                        # Report
                        self.returnToTestHarness(job_index)
                        self.reported_timer = now
                        slot_freed = True

                # Timeouts
                elif now > (start_time + float(tester.specs['max_time'])):
                    # Report
                    self.returnToTestHarness(job_index)
                    self.reported_timer = now
                    slot_freed = True

                # Has the TestHarness done nothing for awhile?
                elif now > (self.reported_timer + self.default_reporting_time):
                    # Has the current test been previously reported?
                    if tester not in self.reported_jobs:
                        seconds_to_report = self.default_reporting_time
                        if tester.specs.isValid('min_reported_time'):
                            seconds_to_report = float(tester.specs['min_reported_time'])

                        if now >= self.reported_timer + seconds_to_report:
                            tester.setStatus('RUNNING...', tester.bucket_pending)
                            self.harness.handleTestStatus(tester)
                            self.reported_jobs.add(tester)
                            self.reported_timer = now

            job_index += 1

        if not slot_freed:
            sleep(time_to_wait)
示例#27
0
    def statusWorker(self, job_container):
        """ Method the status_pool calls when an available thread becomes ready """
        # Wrap entire statusWorker thread inside a try/exception to catch thread errors
        try:
            # There exists the possibility that jobs are still trying to empty out of the
            # status_pool (like this job), and we wish to no longer print any statuses.
            if self.error_state:
                return

            tester = job_container.getTester()

            # If the job is still running for a long period of time and we have not reported
            # this same job alread, report it now.
            if tester.isPending():
                if clock() - self.last_reported >= float(tester.getMinReportTime()) and job_container not in self.jobs_reported:
                    # Inform the TestHarness of a long running test (RUNNING...)
                    self.harness.handleTestStatus(job_container)

                    # ...And then set the finished caveat now that the running status has printed
                    tester.addCaveats('FINISHED')

                    # Add this job to the reported container so it does not happen again
                    self.jobs_reported.add(job_container)

                # Job is 'Pending', but is under the threshold to be reported (return now so
                # last_reported time does not get updated). This will ensure that if nothing
                # has happened between 'now' and another occurrence of our thread timer event
                # we do report it.
                else:
                    return

            else:
                # All other statuses are sent unmolested
                self.harness.handleTestStatus(job_container)

            # Record current reported time only if it is an activity the user will see
            if not tester.isSilent() or not tester.isDeleted():
                self.last_reported = clock()

            # Do job_queue_count decrement last. If zero, the thread pools may close prematurely,
            # preventing the last job status from printing (see waitFinish method).
            if tester.isFinished():
                with self.job_queue_lock:
                    self.job_queue_count -= 1

        except Exception:
            self.error_state = True
            print('Test: %s, statusWorker Exception: %s' % (tester.getTestName(), traceback.format_exc()))

        except KeyboardInterrupt:
            # Just set the error state. The base class will catch the KeyboardInterrupt, and will
            # print the appropriate message.
            self.error_state = True
            pass
示例#28
0
def R5():
    def blowup(L,n):
        for i in range(n):
            L.append( (L[i] + L[i+1]) * L[i+2] )
    (x, y, z)=var('x,y,z')
    L = [x, y, z]
    blowup(L, 8)
    t1 = clock()
    L=uniq(L)
    t2 = clock()
    return t2 - t1
示例#29
0
def test_distarray(dist, context):
    global draw_coord
    global julia
    local_draw_coord = local(draw_coord)
    vect_julia = vectorize(julia)
    darr = make_empty_da(resolution, dist, context)
    darr = local_draw_coord(darr, re_ax, im_ax, resolution)
    start = clock()
    darr = vect_julia(darr, c, z_max, n_max)
    stop = clock()
    return stop - start
示例#30
0
    def jobStatus(self, job, Jobs, j_lock):
        """
        Instruct the TestHarness to print the status of job. This is a serial
        threaded operation, so as to prevent clobbering of text being printed
        to stdout.
        """
        if self.status_pool._state:
            return

        # Its possible, the queue is just trying to empty
        try:
            job_was_running = False
            # Check if we should print due to inactivity
            with j_lock:
                if job.isRunning():
                    if job in self.jobs_reported:
                        return

                    # we have not yet been inactive long enough to report
                    elif clock() - self.last_reported_time < self.min_report_time:
                        return

                    job_was_running = True
                    job.addCaveats('FINISHED')

                    with self.activity_lock:
                        self.jobs_reported.add(job)

            # Immediately following the Job lock, print the status
            self.harness.handleJobStatus(job)

            # We just reported 'something', restart the clock
            self.last_reported_time = clock()

            # Do last, to prevent premature thread pool closures
            with j_lock:
                if job.isFinished() and not job_was_running:
                    tester = job.getTester()
                    if tester.isFail():
                        self.__failures += 1

                    if self.maxFailures():
                        self.killRemaining()
                    else:
                        with self.job_count_lock:
                            self.job_count -= 1

        except Exception:
            print('statusWorker Exception: %s' % (traceback.format_exc()))
            self.killRemaining()

        except KeyboardInterrupt:
            self.killRemaining(keyboard=True)
示例#31
0
def inputhook_glut():
    """Run the pyglet event loop by processing pending events only.

    This keeps processing pending events until stdin is ready.  After
    processing all pending events, a call to time.sleep is inserted.  This is
    needed, otherwise, CPU usage is at 100%.  This sleep time should be tuned
    though for best performance.
    """
    # We need to protect against a user pressing Control-C when IPython is
    # idle and this is running. We trap KeyboardInterrupt and pass.

    signal.signal(signal.SIGINT, glut_int_handler)

    try:
        t = clock()

        # Make sure the default window is set after a window has been closed
        if glut.glutGetWindow() == 0:
            glut.glutSetWindow( 1 )
            glutMainLoopEvent()
            return 0

        while not stdin_ready():
            glutMainLoopEvent()
            # We need to sleep at this point to keep the idle CPU load
            # low.  However, if sleep to long, GUI response is poor.  As
            # a compromise, we watch how often GUI events are being processed
            # and switch between a short and long sleep time.  Here are some
            # stats useful in helping to tune this.
            # time    CPU load
            # 0.001   13%
            # 0.005   3%
            # 0.01    1.5%
            # 0.05    0.5%
            used_time = clock() - t
            if used_time > 5*60.0:
                # print 'Sleep for 5 s'  # dbg
                time.sleep(5.0)
            elif used_time > 10.0:
                # print 'Sleep for 1 s'  # dbg
                time.sleep(1.0)
            elif used_time > 0.1:
                # Few GUI events coming in, so we can sleep longer
                # print 'Sleep for 0.05 s'  # dbg
                time.sleep(0.05)
            else:
                # Many GUI events coming in, so sleep only very little
                time.sleep(0.001)
    except KeyboardInterrupt:
        pass
    return 0
示例#32
0
 def __add__(self, increment):
     if self.sof is None:
         self.sof = clock()
         elapsed = 0
     else:
         elapsed = clock() - self.sof
     increment = self._get_progress(increment)
     if 100 > self.progress + increment:
         self.progress += increment
         self.remaining = 100 * elapsed / self.progress - elapsed
     else:
         self.progress = 100
         self.remaining = 0
     return self
示例#33
0
    def inputhook(context):
        """Run the event loop to process window events

        This keeps processing pending events until stdin is ready.  After
        processing all pending events, a call to time.sleep is inserted.  This is
        needed, otherwise, CPU usage is at 100%.  This sleep time should be tuned
        though for best performance.
        """

        try:
            t = clock()

            # Make sure the default window is set after a window has been closed

            while not context.input_is_ready():

                outfile.write(datetime.datetime.now().__str__() + "\n")
                outfile.flush()

                _mechanica.pollEvents()

                continue

                # We need to sleep at this point to keep the idle CPU load
                # low.  However, if sleep to long, GUI response is poor.  As
                # a compromise, we watch how often GUI events are being processed
                # and switch between a short and long sleep time.  Here are some
                # stats useful in helping to tune this.
                # time    CPU load
                # 0.001   13%
                # 0.005   3%
                # 0.01    1.5%
                # 0.05    0.5%
                used_time = clock() - t
                if used_time > 10.0:
                    # print 'Sleep for 1 s'  # dbg
                    time.sleep(1.0)
                elif used_time > 0.1:
                    # Few GUI events coming in, so we can sleep longer
                    # print 'Sleep for 0.05 s'  # dbg
                    time.sleep(0.05)
                else:
                    # Many GUI events coming in, so sleep only very little
                    time.sleep(0.001)

        except KeyboardInterrupt:
            pass

        outfile.write("user input ready, returning, " +
                      datetime.datetime.now().__str__())
示例#34
0
def lower_request(start_time, bbox, db, num_pages):
    failed_page = []
    total = 0
    hstart = clock()
    for page in range(1, num_pages + 1):
        start = clock()
        res, _ = make_request(start_time, bbox, page)
        if res is None:
            failed_page.append(page)
        else:
            took = ' ({:.4f}s)'.format(clock() - start)
            logging.info('Get result for page {}{}'.format(page, took))
            saved = save_to_mongo(res, db)
            took = ' ({:.4f}s)'.format(clock() - start)
            page_desc = 'page {}, {} photos {}'.format(page, saved, took)
            logging.info('successfully insert ' + page_desc)
            total += saved
            sleep(1)
    for page in failed_page:
        start = clock()
        res, _ = make_request(start_time, bbox, page, need_answer=True)
        if res is None:
            took = ' ({:.4f}s)'.format(clock() - start)
            logging.warn('Failed to get page {}{}'.format(page, took))
        else:
            saved = save_to_mongo(res, photos)
            took = ' ({:.4f}s)'.format(clock() - start)
            page_desc = 'page {}, {} photos {}'.format(page, saved, took)
            logging.info('Finally get ' + page_desc)
            total += saved
            sleep(1)
    logging.info('Finish {}: {} photos in {}s'.format(bbox, total,
                                                      clock() - hstart))
    return total
示例#35
0
def R8():
    def right(f,a,b,n):
       Deltax = (b-a)/n
       c = a
       est = 0
       for i in range(n):
           c += Deltax
           est += f.subs({x: c})
       return est*Deltax
    var('x')
    t1 = clock()
    right(x**2, 0, 5, 10**4)
    t2 = clock()
    return t2 - t1
示例#36
0
def inputhook_wx3(context):
    """Run the wx event loop by processing pending events only.

    This is like inputhook_wx1, but it keeps processing pending events
    until stdin is ready.  After processing all pending events, a call to
    time.sleep is inserted.  This is needed, otherwise, CPU usage is at 100%.
    This sleep time should be tuned though for best performance.
    """
    app = wx.GetApp()
    if app is not None:
        assert wx.Thread_IsMain()

        # The import of wx on Linux sets the handler for signal.SIGINT
        # to 0.  This is a bug in wx or gtk.  We fix by just setting it
        # back to the Python default.
        if not callable(signal.getsignal(signal.SIGINT)):
            signal.signal(signal.SIGINT, signal.default_int_handler)

        evtloop = wx.EventLoop()
        ea = wx.EventLoopActivator(evtloop)
        t = clock()
        while not context.input_is_ready():
            while evtloop.Pending():
                t = clock()
                evtloop.Dispatch()
            app.ProcessIdle()
            # We need to sleep at this point to keep the idle CPU load
            # low.  However, if sleep to long, GUI response is poor.  As
            # a compromise, we watch how often GUI events are being processed
            # and switch between a short and long sleep time.  Here are some
            # stats useful in helping to tune this.
            # time    CPU load
            # 0.001   13%
            # 0.005   3%
            # 0.01    1.5%
            # 0.05    0.5%
            used_time = clock() - t
            if used_time > 10.0:
                # print 'Sleep for 1 s'  # dbg
                time.sleep(1.0)
            elif used_time > 0.1:
                # Few GUI events coming in, so we can sleep longer
                # print 'Sleep for 0.05 s'  # dbg
                time.sleep(0.05)
            else:
                # Many GUI events coming in, so sleep only very little
                time.sleep(0.001)
        del ea
    return 0
示例#37
0
def setup_connection() :
    start = clock()
    #-----------------------------------
    if (debug) :
        time_print("Setup connection.")
    #-----------------------------------
    global sock
    try :
        thr_listen = threading.Thread(target=listen_to_others)
        thr_listen.start()
        thr_connect = threading.Thread(target=connect_to_others)
        thr_connect.start()
        thr_listen.join()
        thr_connect.join()
        stop = clock()
        if (debug) :
            print("Setup connection :","{0:.3f}".format(stop-start))
        for x in sock :
            x.settimeout(None)
        for i in range(n_node) :
            if (i!=node_id) :
                try :
                    sock[i].send("OK")
                except :
                    raise Exception("Connection error. Cannot send validate message to %s."%i2ip[i])
        for i in range(n_node) :
            if (i!=node_id) :
                try :
                    msg = sock[i].recv(2)
                except :
                    time_print(traceback.format_exc())
                    raise Exception("Validate recv: socket[%i] with %s."%(i,i2ip[i]))
                if (msg!="OK") :
                    raise Exception("Connection error. Message is not \"OK\".")
#        for x in sock :
#            x.settimeout(None)
        sock[node_id].close()
        sock[node_id] = None
        #-----------------------------------
        if (debug) :
            time_print("Setup succeed.")
        #-----------------------------------
    except :
        #-----------------------------------
        if (debug) :
            time_print("Setup failed.")
        #-----------------------------------
        time_print(traceback.format_exc())
示例#38
0
def R5():
    def blowup(L, n):
        for i in range(n):
            L.append((L[i] + L[i + 1]) * L[i + 2])

    def uniq(x):
        v = list(set(x))
        return v

    var('x y z')
    L = [x, y, z]
    blowup(L, 8)
    t1 = clock()
    L = uniq(L)
    t2 = clock()
    return t2 - t1
示例#39
0
    def __init__(self, harness, max_processes=1, average_load=64.0):
        ## The test harness to run callbacks on
        self.harness = harness

        # Retrieve and store the TestHarness options for use in this object
        self.options = harness.getOptions()

        ## List of currently running jobs as (Popen instance, command, test, time when expires) tuples
        # None means no job is running in this slot
        self.jobs = [None] * max_processes

        # Requested average load level to stay below
        self.average_load = average_load

        # queue for jobs needing a prereq
        self.queue = deque()

        # Jobs that have been finished
        self.finished_jobs = set()

        # List of skipped jobs to resolve prereq issues for tests that never run
        self.skipped_jobs = set()

        # Jobs we are reporting as taking longer then 10% of MAX_TIME
        self.reported_jobs = set()

        # Reporting timer which resets when ever data is printed to the screen.
        self.reported_timer = clock()
示例#40
0
 def update(self, current_size):
     """
     actual_size ... the current size of the downloading file
     """
     time_delta = clock() - self._t1
     f1 = self._bar_length * current_size / self._total_size
     f2 = self._bar_length - f1
     percent = 100. * current_size / self._total_size
     if time_delta == 0:
         rate_eta_str = ""
     else:
         rate = 1. * current_size / time_delta  # in bytes / second
         eta = (self._total_size - current_size) / rate  # in seconds
         rate_eta_str = "%.3fMB/s ETA " % (rate / 1024.**2)
         if eta < 70:
             rate_eta_str += "%ds" % (int(eta))
         else:
             minutes = int(eta / 60)
             seconds = eta - minutes * 60
             rate_eta_str += "%dmin %ds" % (minutes, seconds)
     msg = "\r[" + "="*f1 + " "*f2 + "] %4.1f%% (%.1fMB of %.1fMB) %s  " % \
             (percent, current_size / 1024.**2, self._total_size / 1024.**2,
                     rate_eta_str)
     sys.stdout.write(msg)
     sys.stdout.flush()
示例#41
0
 def start(self):
     self.write_center("test process starts")
     executable = sys.executable
     v = tuple(sys.version_info)
     python_version = "%s.%s.%s-%s-%s" % v
     self.write("executable:   %s  (%s)\n\n" % (executable, python_version))
     self._t_start = clock()
示例#42
0
文件: Job.py 项目: vanwdani/moose
    def run(self):
        """
        A blocking method to handle the exit status of the process object while keeping track of the
        time the process was active. When the process exits, read the output and close the file.
        """

        # Do not execute app, but allow processResults to commence
        if not self.__tester.shouldExecute():
            return

        if self.options.pedantic_checks:
            # Before the job does anything, get the times files below it were last modified
            self.fileChecker.get_all_files(self, self.fileChecker.getOriginalTimes())
            time.sleep(1)

        self.__tester.prepare(self.options)

        self.__start_time = clock()
        self.timer.reset()
        self.__tester.run(self.timer, self.options)
        self.__start_time = self.timer.starts[0]
        self.__end_time = self.timer.ends[-1]
        self.__joined_out = self.__tester.joined_out

        if self.options.pedantic_checks:
            # Check if the files we checked on earlier were modified.
            self.fileChecker.get_all_files(self, self.fileChecker.getNewTimes())
            self.modifiedFiles = self.fileChecker.check_changes(self.fileChecker.getOriginalTimes(), self.fileChecker.getNewTimes())
 def __init__(self, initial_waiting_time):
     """`initial_waiting_time` is in minutes."""
     #print 'twitter_helper.py/Failures/__init__'
     self.total_failures = 0
     self.last_failure = clock()
     self.initial_waiting_time = float(initial_waiting_time) * 60.0
     self.reset()
示例#44
0
 def __init__(self, total_size, bar_length=25):
     """
     total_size ... the size in bytes of the file to be downloaded
     """
     self._total_size = total_size
     self._bar_length = bar_length
     self._t1 = clock()
示例#45
0
    def __init__(self, tester, job_dag, options):
        self.options = options
        self.__tester = tester
        self.__job_dag = job_dag
        self.timer = Timer()
        self.__outfile = None
        self.__start_time = clock()
        self.__end_time = None
        self.__previous_time = None
        self.__joined_out = ''
        self.report_timer = None
        self.__slots = None
        self.__meta_data = {}

        # Enumerate available job statuses
        self.status = StatusSystem.JobStatus()

        self.hold = self.status.hold
        self.queued = self.status.queued
        self.running = self.status.running
        self.skip = self.status.skip
        self.crash = self.status.crash
        self.error = self.status.error
        self.finished = self.status.finished

        # Initialize the Tester statuses
        self.__tester.initStatusSystem(self.options)
示例#46
0
  def returnToTestHarness(self, job_index):
    (p, command, tester, time, f, slots) = self.jobs[job_index]

    log( 'Command %d done:    %s' % (job_index, command) )
    did_pass = True

    output = 'Working Directory: ' + tester.specs['test_dir'] + '\nRunning command: ' + command + '\n'
    output += self.readOutput(f)
    if p.poll() == None: # process has not completed, it timed out
      output += '\n' + "#"*80 + '\nProcess terminated by test harness. Max time exceeded (' + str(tester.specs['max_time']) + ' seconds)\n' + "#"*80 + '\n'
      f.close()
      if platform.system() == "Windows":
        p.terminate()
      else:
        pgid = os.getpgid(p.pid)
        os.killpg(pgid, SIGTERM)

      if not self.harness.testOutputAndFinish(tester, RunParallel.TIMEOUT, output, time, clock()):
        did_pass = False
    else:
      f.close()

      if tester in self.reported_jobs:
        tester.specs.addParam('caveats', ['FINISHED'], "")

      if not self.harness.testOutputAndFinish(tester, p.returncode, output, time, clock()):
        did_pass = False

    if did_pass:
      self.finished_jobs.add(tester.specs['test_name'])
    else:
      self.skipped_jobs.add(tester.specs['test_name'])

    self.jobs[job_index] = None
    self.slots_in_use = self.slots_in_use - slots
示例#47
0
  def run(self, tester, command, recurse=True):
    # First see if any of the queued jobs can be run but only if recursion is allowed on this run
    if recurse:
      self.startReadyJobs()

    # Now make sure that this job doesn't have an unsatisfied prereq
    if tester.specs['prereq'] != None and len(set(tester.specs['prereq']) - self.finished_jobs):
      self.queue.append([tester, command, os.getcwd()])
      return

    # Make sure we are complying with the requested load average
    self.satisfyLoad()

    # Wait for a job to finish if the jobs queue is full
    while self.jobs.count(None) == 0:
      self.spinwait()

    # Pre-run preperation
    tester.prepare()

    job_index = self.jobs.index(None) # find an empty slot
    log( 'Command %d started: %s' % (job_index, command) )

    # It seems that using PIPE doesn't work very well when launching multiple jobs.
    # It deadlocks rather easy.  Instead we will use temporary files
    # to hold the output as it is produced
    try:
      f = TemporaryFile()
      p = Popen([command],stdout=f,stderr=f,close_fds=False, shell=True)
    except:
      print "Error in launching a new task"
      raise

    self.jobs[job_index] = (p, command, tester, clock(), f)
示例#48
0
def run_benchmark(n):
    a0 = symbols("a0")
    a1 = symbols("a1")
    e = a0 + a1
    f = 0
    for i in range(2, n):
        s = symbols("a%s" % i)
        e = e + sin(s)
        f = f + sin(s)
    f = -f
    t1 = clock()
    e = expand(e**2)
    e = e.xreplace({a0: f})
    e = expand(e)
    t2 = clock()
    print("%s ms" % (1000 * (t2 - t1)))
示例#49
0
def inputhook_pyglet():
    """Run the pyglet event loop by processing pending events only.

    This keeps processing pending events until stdin is ready.  After
    processing all pending events, a call to time.sleep is inserted.  This is
    needed, otherwise, CPU usage is at 100%.  This sleep time should be tuned
    though for best performance.
    """
    # We need to protect against a user pressing Control-C when IPython is
    # idle and this is running. We trap KeyboardInterrupt and pass.
    try:
        t = clock()
        while not stdin_ready():
            pyglet.clock.tick()
            for window in pyglet.app.windows:
                window.switch_to()
                window.dispatch_events()
                window.dispatch_event('on_draw')
                flip(window)

            # We need to sleep at this point to keep the idle CPU load
            # low.  However, if sleep to long, GUI response is poor.  As
            # a compromise, we watch how often GUI events are being processed
            # and switch between a short and long sleep time.  Here are some
            # stats useful in helping to tune this.
            # time    CPU load
            # 0.001   13%
            # 0.005   3%
            # 0.01    1.5%
            # 0.05    0.5%
            used_time = clock() - t
            if used_time > 5 * 60.0:
                # print 'Sleep for 5 s'  # dbg
                time.sleep(5.0)
            elif used_time > 10.0:
                # print 'Sleep for 1 s'  # dbg
                time.sleep(1.0)
            elif used_time > 0.1:
                # Few GUI events coming in, so we can sleep longer
                # print 'Sleep for 0.05 s'  # dbg
                time.sleep(0.05)
            else:
                # Many GUI events coming in, so sleep only very little
                time.sleep(0.001)
    except KeyboardInterrupt:
        pass
    return 0
示例#50
0
    def returnToTestHarness(self, job_index):
        (p, command, tester, time, f, slots) = self.jobs[job_index]

        log('Command %d done:    %s' % (job_index, command))
        did_pass = True

        output = 'Working Directory: ' + tester.specs[
            'test_dir'] + '\nRunning command: ' + command + '\n'

        if tester.specs.isValid('redirect_output') and tester.specs[
                'redirect_output'] and tester.getProcs(self.options) > 1:
            # If the tester enabled redirect_stdout and is using more than one processor
            output += self.getOutputFromFiles(tester)
        else:
            # Handle the case were the tester did not inherite from RunApp (like analyzejacobian)
            output += self.readOutput(f)

        if p.poll() == None:  # process has not completed, it timed out
            output += '\n' + "#" * 80 + '\nProcess terminated by test harness. Max time exceeded (' + str(
                tester.specs['max_time']) + ' seconds)\n' + "#" * 80 + '\n'
            f.close()
            if platform.system() == "Windows":
                p.terminate()
            else:
                pgid = os.getpgid(p.pid)
                os.killpg(pgid, SIGTERM)

            if not self.harness.testOutputAndFinish(
                    tester, RunParallel.TIMEOUT, output, time, clock()):
                did_pass = False
        else:
            f.close()

            if tester in self.reported_jobs:
                tester.specs.addParam('caveats', ['FINISHED'], "")

            if not self.harness.testOutputAndFinish(tester, p.returncode,
                                                    output, time, clock()):
                did_pass = False

        if did_pass:
            self.finished_jobs.add(tester.specs['test_name'])
        else:
            self.skipped_jobs.add(tester.specs['test_name'])

        self.jobs[job_index] = None
        self.slots_in_use = self.slots_in_use - slots
示例#51
0
def run():
    try:
        f = open('patterns.txt', 'r')
        start = clock()
        states = calculateStates()

        for line in f.read().splitlines():
            print(solve(toPattern(line), states))

        print('Runtime (s) =',
              clock() - start)  # I consistently get 0.024~0.032
        f.close()  # seconds on Intel Core i5-7200U.

    except FileNotFoundError:
        print('Where\'s your patterns.txt, sahib?')
    except:
        print('Oops. Something went sideways.')
示例#52
0
    def __init__(self, harness, params):
        MooseObject.__init__(self, harness, params)

        ## The test harness to run callbacks on
        self.harness = harness

        # Retrieve and store the TestHarness options for use in this object
        self.options = harness.getOptions()

        # The Scheduler class can be initialized with no "max_processes" argument and it'll default
        # to a soft limit. If however a max_processes is passed we'll treat it as a hard limit.
        # The difference is whether or not we allow single jobs to exceed the number of slots.
        if params['max_processes'] == None:
            self.available_slots = 1
            self.soft_limit = True
        else:
            self.available_slots = params['max_processes'] # hard limit
            self.soft_limit = False

        # Requested average load level to stay below
        self.average_load = params['average_load']

        # The time the status queue reported no activity to the TestHarness
        self.last_reported = clock()

        # A set containing jobs that have been reported
        self.jobs_reported = set([])

        # Initialize run_pool based on available slots
        self.run_pool = ThreadPool(processes=self.available_slots)

        # Initialize status_pool to only use 1 process (to prevent status messages from getting clobbered)
        self.status_pool = ThreadPool(processes=1)

        # Slot Lock when processing resource allocations
        self.slot_lock = threading.Lock()

        # DAG Lock when processing the DAG
        self.dag_lock = threading.Lock()

        # Queue Lock when processing thread pool states
        self.queue_lock = threading.Lock()

        # Job Count Lock when processing job_queue_count
        self.job_queue_lock = threading.Lock()

        # Workers in use (single job might request multiple slots)
        self.slots_in_use = 0

        # Jobs waiting to finish (includes actively running jobs)
        self.job_queue_count = 0

        # Set containing our Job containers. We use this in the event of a KeyboardInterrupt to
        # iterate over and kill any subprocesses
        self.tester_datas = set([])

        # Allow threads to set an exception state
        self.error_state = False
示例#53
0
    def cleanup(self):
        # Print the results table again if a bunch of output was spewed to the screen between
        # tests as they were running
        if (self.options.verbose or
            (self.num_failed != 0
             and not self.options.quiet)) and not self.options.dry_run:
            print('\n\nFinal Test Results:\n' + ('-' * (util.TERM_COLS - 1)))
            for (tester_data, result, timing) in sorted(self.test_table,
                                                        key=lambda x: x[1],
                                                        reverse=True):
                print(util.formatResult(tester_data, result, self.options))

        time = clock() - self.start_time

        print('-' * (util.TERM_COLS - 1))

        # Mask off TestHarness error codes to report parser errors
        fatal_error = ''
        if self.error_code & Parser.getErrorCodeMask():
            fatal_error += ', <r>FATAL PARSER ERROR</r>'
        if self.error_code & ~Parser.getErrorCodeMask():
            fatal_error += ', <r>FATAL TEST HARNESS ERROR</r>'

        # Print a different footer when performing a dry run
        if self.options.dry_run:
            print('Processed %d tests in %.1f seconds' %
                  (self.num_passed + self.num_skipped, time))
            summary = '<b>%d would run</b>'
            summary += ', <b>%d would be skipped</b>'
            summary += fatal_error
            print(util.colorText( summary % (self.num_passed, self.num_skipped),  "", html = True, \
                             colored=self.options.colored, code=self.options.code ))

        else:
            print('Ran %d tests in %.1f seconds' %
                  (self.num_passed + self.num_failed, time))

            if self.num_passed:
                summary = '<g>%d passed</g>'
            else:
                summary = '<b>%d passed</b>'
            summary += ', <b>%d skipped</b>'
            if self.num_failed:
                summary += ', <r>%d FAILED</r>'
            else:
                summary += ', <b>%d failed</b>'
            summary += fatal_error

            print(util.colorText( summary % (self.num_passed, self.num_skipped, self.num_failed),  "", html = True, \
                             colored=self.options.colored, code=self.options.code ))

        if self.file:
            self.file.close()

        # Close the failed_tests file
        if self.writeFailedTest != None:
            self.writeFailedTest.close()
示例#54
0
 def start(self):
     t = clock()
     t_start = t
     for i, skip in self.wait(fps=self.fps):
         t_new = clock()
         self.screengrab.get_from_drawable(
             gtk.gdk.get_default_root_window(),
             gtk.gdk.colormap_get_system(), self.x, self.y, 0, 0,
             self.width, self.height)
         img = self.screengrab.get_pixels()
         frame_header = (len(img), skip)
         dump(frame_header, self._datafile)
         self._datafile.write(img)
         i += 1
         print "time: %.3f, frame: %04d, current fps: %6.3f, skip: %d, " \
                 "lag: %.6f" % ( t_new-t_start, i, 1/(t_new-t), skip,
                         t_new-t_start - float(i)/self.fps)
         t = t_new
示例#55
0
 def start(self):
     self.write_center("test process starts")
     executable = sys.executable
     v = tuple(sys.version_info)
     python_version = "%s.%s.%s-%s-%s" % v
     self.write("executable:   %s  (%s)\n" % (executable, python_version))
     from sympy.polys.domains import GROUND_TYPES
     self.write("ground types: %s\n\n" % GROUND_TYPES)
     self._t_start = clock()
示例#56
0
def timing(f, *args, **kwargs):
    """
    Returns time elapsed for evaluating ``f()``. Optionally arguments
    may be passed to time the execution of ``f(*args, **kwargs)``.

    If the first call is very quick, ``f`` is called
    repeatedly and the best time is returned.
    """
    once = kwargs.get("once")
    if "once" in kwargs:
        del kwargs["once"]
    if args or kwargs:
        if len(args) == 1 and not kwargs:
            arg = args[0]
            g = lambda: f(arg)
        else:
            g = lambda: f(*args, **kwargs)
    else:
        g = f
    from timeit import default_timer as clock

    t1 = clock()
    v = g()
    t2 = clock()
    t = t2 - t1
    if t > 0.05 or once:
        return t
    for i in range(3):
        t1 = clock()
        # Evaluate multiple times because the timer function
        # has a significant overhead
        g()
        g()
        g()
        g()
        g()
        g()
        g()
        g()
        g()
        g()
        t2 = clock()
        t = min(t, (t2 - t1) / 10)
    return t
示例#57
0
    def statusWorker(self, job_container):
        """ Method the status_pool calls when an available thread becomes ready """
        # Wrap entire statusWorker thread inside a try/exception to catch thread errors
        try:
            tester = job_container.getTester()

            # If the job is still running for a long period of time and we have not reported
            # this same job alread, report it now.
            if tester.isPending():
                if clock() - self.last_reported >= float(
                        tester.getMinReportTime(
                        )) and job_container not in self.jobs_reported:
                    # Inform the TestHarness of a long running test (RUNNING...)
                    self.harness.handleTestStatus(job_container)

                    # ...And then set the finished caveat now that the running status has printed
                    tester.specs.addParam('caveats', ['FINISHED'], "")

                    # Add this job to the reported container so it does not happen again
                    self.jobs_reported.add(job_container)

                # Job is 'Pending', but is under the threshold to be reported (return now so
                # last_reported time does not get updated). This will ensure that if nothing
                # has happened between 'now' and another occurrence of our thread timer event
                # we do report it.
                else:
                    return

            else:
                # All other statuses are sent unmolested
                self.harness.handleTestStatus(job_container)

            # Decrement the job queue count now that this job has finished
            if tester.isFinished():
                with self.slot_lock:
                    self.job_queue_count -= 1

            # Record current reported time only if it is an activity the user will see
            if not tester.isSilent() or not tester.isDeleted():
                self.last_reported = clock()

        except Exception as e:
            print('statusWorker Exception: %s' % (e))
            self.killRemaining()
示例#58
0
文件: par.py 项目: hudec/machlearn
def main_sync(delka):
    t1 = clock()

    mapa = [0, 2, 1, 3]
    print('abeceda', ABCD, 'mapa', mapa)
    predpisy = generuj_predpisy(ABCD, mapa)
    print('pocet predpisu', len(predpisy))

    vysledky = [najdi_bez_3_mocniny_sync(predpis, delka) for predpis in predpisy]

    for posloupnost, predpis in vysledky:
        if posloupnost != None:
            print(posloupnost)
            print(predpis)

    t2 = clock()
    print('Doba zpracovani (s)', t2 - t1)

    print_prof_data()
示例#59
0
def doctests(filter=[]):
    import sys
    from timeit import default_timer as clock
    for i, arg in enumerate(sys.argv):
        if '__init__.py' in arg:
            filter = [sn for sn in sys.argv[i+1:] if not sn.startswith("-")]
            break
    import doctest
    globs = globals().copy()
    for obj in globs: #sorted(globs.keys()):
        if filter:
            if not sum([pat in obj for pat in filter]):
                continue
        sys.stdout.write(str(obj) + " ")
        sys.stdout.flush()
        t1 = clock()
        doctest.run_docstring_examples(globs[obj], {}, verbose=("-v" in sys.argv))
        t2 = clock()
        print(round(t2-t1, 3))
示例#60
0
 def __init__(self, tester, tester_dag, options):
     self.options = options
     self.__tester = tester
     self.timer = Timer()
     self.__dag = tester_dag
     self.__dag_clone = None
     self.__outfile = None
     self.__start_time = clock()
     self.__end_time = None
     self.__std_out = ''
     self.report_timer = None