def test_cross_first_vs_resample_first():
    c.upsample_factor = 100
    c.subsignal_length_max = 2**19
    c.time_domain_padding = 100
    sig_lengths = [np.int(x) for x in np.linspace(12000, 16000, 20)]
    x_first_times = []
    resample_first_times = []
    for sig_len in sig_lengths:
        c.subsignal_length_max = sig_len
        x_first_times.append(
            timeit.timeit(
                stmt = "c.do_time_domain_cross_correlation_cross_first()",
                setup = "from __main__ import c",
                number = 1))
        resample_first_times.append(
            timeit.timeit(
                stmt = "c.do_time_domain_cross_correlation_resample_first()",
                setup = "from __main__ import c",
                number = 1))
        print("{l} -> {t}".format(l = sig_len, t = resample_first_times[-1]))
    fig = plt.figure()
    ax1 = fig.gca()
    ax1.plot(sig_lengths, x_first_times, 'b', label='x first')
    ax1.plot(sig_lengths, resample_first_times, 'r',  label="resample first")
    #ax2 = ax1.twinx()
    #ax2.plot(sig_lengths, resample_first_times, 'r',  label="resample first")
    ax1.legend()
    #ax2.legend()
    plt.show()
Пример #2
0
def benchmark_fd_exact_grad_bar_g():
    print(timeit.timeit('bench_fd_grad()',
                        setup='from dccsupport import bench_fd_grad',
                        number=1000))
    print(timeit.timeit('bench_exact_grad()',
                        setup='from dccsupport import bench_exact_grad',
                        number=1000))
Пример #3
0
def test_group_list(app, db):  # access db and ui via app and db fixtures
    print(timeit(lambda: app.group.get_gr_list(), number=1)) # по умолчанию timeit вызывает функцию миллион раз, поэтому number=1

    def clean(group):
        return Group(id=group.id, name=group.name.strip())  # remove extra spaces to match db and ui data
    print(timeit(lambda: map(clean, db.get_group_list()), number=1000))  # obtain list with spaces removed
    assert False # sorted(ui_list, key=Group.id_or_max) == sorted(db_list, key=Group.id_or_max)
Пример #4
0
 def get_speedup_factor(self, baseline_fcn, optimized_fcn, num_tests):
     baseline_performance = timeit.timeit(baseline_fcn, number=num_tests)
     optimized_performance = timeit.timeit(optimized_fcn, number=num_tests)
     _message = "Performance test too fast, susceptible to overhead"
     T.assert_gt(baseline_performance, 0.005, _message)
     T.assert_gt(optimized_performance, 0.005, _message)
     return (baseline_performance / optimized_performance)
Пример #5
0
def _fast_crc(count=50):
    """
    On certain platforms/builds zlib.adler32 is substantially
    faster than zlib.crc32, but it is not consistent across
    Windows/Linux/OSX.

    This function runs a quick check (2ms on my machines) to
    determine the fastest hashing function available in zlib.

    Parameters
    ------------
    count: int, number of repetitions to do on the speed trial

    Returns
    ----------
    crc32: function, either zlib.adler32 or zlib.crc32
    """
    import timeit

    setup = 'import numpy, zlib;'
    setup += 'd = numpy.random.random((500,3));'

    crc32 = timeit.timeit(setup=setup,
                          stmt='zlib.crc32(d)',
                          number=count)
    adler32 = timeit.timeit(setup=setup,
                            stmt='zlib.adler32(d)',
                            number=count)
    if adler32 < crc32:
        return zlib.adler32
    else:
        return zlib.crc32
def main():
    number = 10000
    precision = 4

    stmt="parse_json()"
    best = timeit.timeit(setup="from __main__ import parse_json", stmt=stmt, number=number)
    print "%s:" % stmt,
    usec = best * 1e6 / number
    if usec < 1000:
        print "%.*g usec per loop" % (precision, usec)
    else:
        msec = usec / 1000
        if msec < 1000:
            print "%.*g msec per loop" % (precision, msec)
        else:
            sec = msec / 1000
            print "%.*g sec per loop" % (precision, sec)

    stmt="parse_pb()"
    best = timeit.timeit(setup="from __main__ import parse_pb", stmt=stmt, number=number)
    print "%s:" % stmt,
    usec = best * 1e6 / number
    if usec < 1000:
        print "%.*g usec per loop" % (precision, usec)
    else:
        msec = usec / 1000
        if msec < 1000:
            print "%.*g msec per loop" % (precision, msec)
        else:
            sec = msec / 1000
            print "%.*g sec per loop" % (precision, sec)
Пример #7
0
    def run(self):
        """Perform the oct2py speed analysis.

        Uses timeit to test the raw execution of an Octave command,
        Then tests progressively larger array passing.

        """
        print('oct2py speed test')
        print('*' * 20)
        time.sleep(1)

        print('Raw speed: ')
        avg = timeit.timeit(self.raw_speed, number=200) / 200
        print('    {0:0.01f} usec per loop'.format(avg * 1e6))
        sides = [1, 10, 100, 1000]
        runs = [200, 200, 100, 10]
        for (side, nruns) in zip(sides, runs):
            self.array = np.reshape(np.arange(side ** 2), (-1))
            print('Put {0}x{1}: '.format(side, side))
            avg = timeit.timeit(self.large_array_put, number=nruns) / nruns
            print('    {0:0.01f} msec'.format(avg * 1e3))

            print('Get {0}x{1}: '.format(side, side))
            avg = timeit.timeit(self.large_array_get, number=nruns) / nruns
            print('    {0:0.01f} msec'.format(avg * 1e3))

        self.octave.close()
        print('*' * 20)
        print('Test complete!')
Пример #8
0
def aunique_benchmark(lib, prof):
    def _build_tree():
        vfs.libtree(lib)

    # Measure path generation performance with %aunique{} included.
    lib.path_formats = [
        (library.PF_KEY_DEFAULT,
         Template('$albumartist/$album%aunique{}/$track $title')),
    ]
    if prof:
        cProfile.runctx('_build_tree()', {}, {'_build_tree': _build_tree},
                        'paths.withaunique.prof')
    else:
        interval = timeit.timeit(_build_tree, number=1)
        print('With %aunique:', interval)

    # And with %aunique replaceed with a "cheap" no-op function.
    lib.path_formats = [
        (library.PF_KEY_DEFAULT,
         Template('$albumartist/$album%lower{}/$track $title')),
    ]
    if prof:
        cProfile.runctx('_build_tree()', {}, {'_build_tree': _build_tree},
                        'paths.withoutaunique.prof')
    else:
        interval = timeit.timeit(_build_tree, number=1)
        print('Without %aunique:', interval)
Пример #9
0
def main():


    monitorDevice=deviceMonitor.MonitorDevices('monitorDevice')



    print '\n'
    print "Start conn test"
    deviceConn=timeit.timeit('MonitorDevices.deviceConnectivity', setup='from deviceMonitor import MonitorDevices', number=1000)
    print 'Time to execute dev conn: %s ' % deviceConn
    print monitorDevice.deviceConnectivity()

    print '\n'

    print "Start devRunState test"
    deviceRunning=timeit.timeit('MonitorDevices.deviceRunning', setup='from deviceMonitor import MonitorDevices', number=1000)
    print 'Time to execute dev run state: %s ' % deviceRunning
    print monitorDevice.deviceRunning()

    print '\n'

    devicePollInterval=timeit.timeit('MonitorDevices.devicePollInterval', setup='from deviceMonitor import MonitorDevices', number=1000)
    print 'Time to execute dev poll interval: %s ' % devicePollInterval
    print monitorDevice.devicePollInterval()
Пример #10
0
def compare_convolve_speeds(x, h):
    from scipy.signal import fftconvolve
    import timeit
    
    def test_scipy():
        return fftconvolve(x, h, mode='valid')

    def test_fft():
        return multichannel_fftconvolve(x, h, mode='valid')

    def test_overlap_add():
        return multichannel_overlap_add_fftconvolve(x, h, mode='valid')

    def test_my_convolve():
        return multichannel_convolve(x, h, mode='valid')

    print "SciPY", timeit.timeit(
        lambda: test_scipy(), 
        number=max(1, int(100000/x.shape[0])))
    print "Naive FFT", timeit.timeit(
        lambda: test_fft(), 
        number=max(1, int(100000/x.shape[0])))
    print "Overlap Add", timeit.timeit(
        lambda: test_overlap_add(), 
        number=max(1, int(100000/x.shape[0])))
    print "Mine", timeit.timeit(
        lambda: test_my_convolve(), 
        number=max(1, int(100000/x.shape[0])))

    return
Пример #11
0
def main():
    vector = Vector(.333, .555, .2831).normalize()
    origin = Vector(138.2, 22.459, 12)
    maxDistance = 1000

    def cast_py():
        return numpy.array(list(_cast(origin, vector, maxDistance, 1)))
    def cast_np():
        return _cast_np(origin, vector, maxDistance, 1)

    res_py = cast_py()
    res_np = cast_np()

    print("res_py", res_py.shape)
    print("res_np", res_np.shape)
    s = min(len(res_py), len(res_np))
    passed = (res_py[:s] == res_np[:s]).all()
    print("Passed" if passed else "Failed")

    from timeit import timeit

    t_py = timeit(cast_py, number=1)
    t_np = timeit(cast_np, number=1)

    print("Time cast_py", t_py)
    print("Time cast_np", t_np)
Пример #12
0
def main():

    # calculate least squares beta coefficient using ls()
    beta_old = ls(variables['body'], variables['brain'])

    # calculate least squares beta coefficient using curve_fit()
    # reshape to go from (65, 1) to (65,)
    beta_new, vcov = curve_fit(linear, abrain, abody)

    print('Week 5 model: bo = %.4f * br + %.4f' %
          (beta_old[1][0], beta_old[0][0]))

    print('SciPy model: bo = %.4f * br + %.4f\n' %
          (beta_new[0], beta_new[1]))

    imports = 'from __main__ import curve_fit, linear, abrain, abody, ' +  \
                'brain, body, ls, gaussian'
    times = 1000
    scipy_implementation = timeit('curve_fit(linear, abrain, abody)', setup=imports,
                      number=times)
    my_implementation = timeit('ls(body, brain)', setup=imports,
                   number=times)
    gaussian_implementation = timeit('curve_fit(gaussian, abrain, abody)', setup=imports,
                                    number=times)

    for result in ('my_implementation', 'scipy_implementation', 'gaussian_implementation'):
        print('Avg execution time for %s (%i reps):\n%s' %
              (result, times, eval(result)))

    print('\nGuassian:\nscaling var = %.4f\nmu = %.4f\nsigma = %.4f' %
          tuple(curve_fit(gaussian, abrain, abody)[0]))
Пример #13
0
    def test_speed_of_reading_fcs_files(self):
        """ Testing the speed of loading a FCS files"""
        fname = file_formats["mq fcs 3.1"]
        number = 1000

        print

        time = timeit.timeit(
            lambda: parse_fcs(fname, meta_data_only=True, output_format="DataFrame", reformat_meta=False), number=number
        )
        print "Loading fcs file {0} times with meta_data only without reformatting of meta takes {1} per loop".format(
            time / number, number
        )

        time = timeit.timeit(
            lambda: parse_fcs(fname, meta_data_only=True, output_format="DataFrame", reformat_meta=True), number=number
        )
        print "Loading fcs file {0} times with meta_data only with reformatting of meta takes {1} per loop".format(
            time / number, number
        )

        time = timeit.timeit(
            lambda: parse_fcs(fname, meta_data_only=False, output_format="DataFrame", reformat_meta=False),
            number=number,
        )
        print "Loading fcs file {0} times both meta and data but without reformatting of meta takes {1} per loop".format(
            time / number, number
        )
def test_compare_addition():
    num_ops = 10000
    op_range = range(0, num_ops)
    random.seed('test')

    data = map(lambda _: (random.randint(0, 1000), random.randint(0, 30)), range(0, 1000))
    intervals = map(lambda v: Interval(v[0], v[0] + v[1]), data)
    interval_list = list(intervals)
    pairs_map = map(lambda _:(random.choice(interval_list), random.choice(interval_list)), op_range)
    pairs = list(pairs_map)
    trips_map = map(lambda _:(random.choice(interval_list), random.choice(interval_list), random.choice(interval_list)),
                    op_range)
    trips = list(trips_map)

    op_1 = lambda: [combine_intervals(pair[0], pair[1]) for pair in pairs]
    op_2 = lambda: [get_minimum_bounding_interval(pair[0], pair[1]) for pair in pairs]
    op_3 = lambda: [pair[0] + pair[1] for pair in pairs]

    op_3x_1 = lambda: [combine_intervals(combine_intervals(trip[0], trip[1]), trip[2]) for trip in trips]
    op_3x_2 = lambda: [combine_three_intervals(trip[0], trip[1], trip[2]) for trip in trips]

    repetitions = 20

    op_1_result = timeit.timeit(op_1, number=repetitions)
    op_2_result = timeit.timeit(op_2, number=repetitions)
    op_3_result = timeit.timeit(op_3, number=repetitions)
    op_3x_1_result = timeit.timeit(op_3x_1, number=repetitions)
    op_3x_2_result = timeit.timeit(op_3x_2, number=repetitions)

    assert op_3x_2_result < op_3x_1_result
    assert op_1_result < op_2_result
    assert op_3_result > op_1_result
Пример #15
0
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
    config = tf.ConfigProto()
    config.allow_soft_placement = True

    # These parameters don't matter
    batch_size = 512
    num_units = 512

    # Set up sequence lengths
    np.random.seed([127])
    sequence_length = np.random.randint(0, max_time, size=batch_size)
    inputs_list = [np.random.randn(batch_size, num_units).astype(np.float32) for _ in range(max_time)]
    inputs = np.dstack(inputs_list).transpose([0, 2, 1])  # batch x time x depth

    def _create_static_rnn():
        with tf.Session(config=config, graph=tf.Graph()) as sess:
            inputs_list_t = [tf.constant(x) for x in inputs_list]
            ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length)

    def _create_dynamic_rnn():
        with tf.Session(config=config, graph=tf.Graph()) as sess:
            inputs_t = tf.constant(inputs)
            ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)

    delta_static = timeit.timeit(_create_static_rnn, number=5)
    delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)

    print("%d \t %f \t %f \t %f" % (max_time, delta_static, delta_dynamic, delta_dynamic / delta_static))
Пример #16
0
def get_path(graph, source, dest, cutoff):

    # convert the weight into a string
    ss = ""
    for key, value in graph.items():
        for node, weight in value.items():
            ss = ss + str(weight) + ","

    # call function to calculate and measure the time
    start = timeit.timeit()
    result = calculatePath(graph, cutoff, source, dest)
    end = timeit.timeit()
    run_time = end - start

    # convert the result into a string
    s = ""
    for i in result:
        s = s + str(i) + ","
	
    # write into file
    foo = open("result.txt", "ab")
    foo.write(result[0])
    foo.write("\n")
    foo.write(result[-1])
    foo.write("\n")
    foo.write(ss)
    foo.write("\n")
    foo.write(s)
    foo.write("\n")
    foo.write(str(run_time))
    foo.write("\n")
    foo.close()
    
    return result
Пример #17
0
def main():
    """Main"""
    # number and length of lines in test file
    NUM_LINES = 1000
    LINE_LENGTH = 80

    # First, generate a test file
    with open('test_input.txt', 'w') as fp:
        for i in range(NUM_LINES):
            fp.write("".join(random.sample(string.ascii_letters * 2,
                                           LINE_LENGTH)) + "\n")

    # If STDIN specified, compare performance for parsing STDIN
    # Since the stdin buffer does not support seeking, it is not possible
    # to use timeit to benchmark this. Instead a single time will be
    # provided and it should be rerun from the shell multiple times to
    # compare performance
    if not sys.stdin.isatty():
        # Test 1
        #timeit.timeit(s)
        #timeit.timeit("peeker_test()", setup='from __main__ import peeker_test')

        #builtin_stdin_test()
        peeker_stdin_test()
    else:
        # Otherwise, compare performance for reading files
        t = timeit.timeit("builtin_file_test()", 
                          setup='from __main__ import builtin_file_test')
        print("Built-in file stream test (1,000,000 reps): %f" % t)

        t = timeit.timeit("peeker_file_test()", 
                          setup='from __main__ import Peeker,peeker_file_test')
        print("Peeker file stream test (1,000,000 reps): %f" % t)
Пример #18
0
  def test_timeit_lookup(self):
    # Lookups are done much more often than generation under normal utilization.
    # That's what we want to optimize for.
    setup = (
      "import task_to_run\n"
      "from components import utils\n"
      # Test with 1024 combinations.
      "dimensions = {str(k): '01234567890123456789' for k in xrange(10)}\n"
      "items = tuple(sorted("
      "  task_to_run._hash_dimensions(utils.encode_to_json(i))"
      "  for i in task_to_run._powerset(dimensions)))\n")

    statement = (
      # Simulating 10000 pending tasks. Normally, a single poll will not search
      # this many items, simply because the DB is not fast enough.
      "for _ in xrange(10000):"
      "  1 in items")
    # TODO(maruel): This is a linear search instead of a binary search, so this
    # test is a tad unfair. But it's very unlikely that even a binary search can
    # beat the frozenset.
    perf_tuple = timeit.timeit(statement, setup, number=10)
    setup = (
      "import task_to_run\n"
      "from components import utils\n"
      # Test with 1024 combinations.
      "dimensions = {str(k): '01234567890123456789' for k in xrange(10)}\n"
      "items = frozenset("
      "  task_to_run._hash_dimensions(utils.encode_to_json(i))"
      "  for i in task_to_run._powerset(dimensions))\n")
    perf_frozenset = timeit.timeit(statement, setup, number=10)

    # Creating the frozenset is fairly consistently faster by 333% (really).
    self.assertGreater(perf_tuple, perf_frozenset)
Пример #19
0
def test_lists_vs_dicts():
    """See what's faster at int key lookup: dicts or lists."""
    list_time = timeit('item = l[9000]', 'l = [0] * 10000')
    dict_time = timeit('item = d[9000]', 'd = {x: 0 for x in range(10000)}')

    # Dicts take about 1.6x as long as lists in Python 2.6 and 2.7.
    ok_(list_time < dict_time, '%s < %s' % (list_time, dict_time))
Пример #20
0
 def test():
     """Time the execution.
     """
     for name in ['rand_no_jit', 'rand', 'rand_jit']:
         print name + ':',
         print timeit.timeit('{}()'.format(name),
               'from numba_random import {}'.format(name))
Пример #21
0
    def test_value_pickler_speed(self):

        with Tempfile() as tmp:

            vpick = EntityPickler()
            value = FileChecksumEntity(tmp)

            t = lambda vpick = vpick, value = value: \
                vpick.loads(vpick.dumps(value))

            t = timeit.timeit(t, number=10000)
            print("value picker: %s" % t)

            t = lambda vpick = vpick, value = value:\
                vpick.loads(vpick.dumps(value,
                                        protocol=pickle.HIGHEST_PROTOCOL))

            t = timeit.timeit(t, number=10000)
            print("pickle: %s" % t)

        vl = vpick.dumps(value)
        print("vl: %s" % len(vl))

        pl = pickle.dumps(value, protocol=pickle.HIGHEST_PROTOCOL)
        print("pl: %s" % len(pl))
Пример #22
0
def compareCholeskyCustomVSNumpy():
    maxSize = 500
    nbOfPoints = 200
    nbValuesForAverage = 2
    x, yCustom, yNumpy = [], [], []

    for size in [round(i) for i in numpy.linspace(5, maxSize, nbOfPoints)]:
        # Computes matrix density
        x.append(size)
        # Computes average execution times on nbValuesForAverage calls 
        # on different matrix
        customTime, numpyTime = 0, 0
        for j in range(nbValuesForAverage):
            M = matgen.symmetricPositiveDefinite(size)    
            wrapped1 = wrapper(cholesky.completeCholesky, M)
            customTime += timeit.timeit(wrapped1, number=1)
            wrapped2 = wrapper(numpy.linalg.cholesky, M)
            numpyTime += timeit.timeit(wrapped2, number=1)
        yCustom.append(customTime/nbValuesForAverage)
        yNumpy.append(numpyTime/nbValuesForAverage)

    p1 = plt.plot(x, yCustom, 'b', marker='o')
    p2 = plt.plot(x, yNumpy, 'g', marker='o')
    plt.title("Average execution time for the Cholesky factorization (custom and Numpy's) in function of size\n")
    plt.legend(["New custom Cholesky factorization", "Numpy Cholesky factorization"], loc=2)
    plt.ylabel("Execution time (s)")
    plt.xlabel("size")
    plt.show()
Пример #23
0
def main():
    global str1,str2
    n=5000
    str1=''.join([random.choice(string.ascii_letters) for i in range(n)])
    str2=''.join([random.choice(string.ascii_letters) for i in range(n)])
    #print timeit.timeit("lcs1(n,n)",setup="from __main__ import lcs1; n=500",number=1)
    print timeit.timeit("lcs2(n,n)",setup="from __main__ import lcs2; n=5000",number=1)
Пример #24
0
def readPWM(pinNum):
	startpwm = timeit.timeit()	# Time the pwm for now
	
	highCount = 0
	totCount = 0
	cur = GPIO.input(pinNum)
	while True:
		prev = cur
		cur = GPIO.input(pinNum)
		if ( cur == 1 ):	# The gpio pin is high, increment high count
			highCount += 1
			totCount += 1
		
		elif ( prev == 1 and cur == 0 ):		# Period has ended, now determine DC.
			totCount += 1
			dutyCycle = highCount/totCount
			break

		else:
			totCount += 1

		#End if
	#End while	

	endpwm = timeit.timeit()
	totalTime = abs(1000*(endpwm - startpwm))	# Convert sec to ms
	print 'PWM took %.3f microseconds.' %(totalTime)

	return dutyCycle
Пример #25
0
 def runLibraryTests(self,fibLibrary,resultDictionary):
   try:
     resultDictionary[' library'] = fibLibrary
     startTime = time.clock()
     methodLoad = __import__(fibLibrary)
     dynLoad = 'methodLoad.'+fibLibrary+'()'
     fibLib = eval(dynLoad)
     resultDictionary['sequence 10'] = fibLib.fibList(10)
     resultDictionary['sequence 50'] = fibLib.fibList(50)
     resultDictionary['sequence 100'] = fibLib.fibList(100)
     resultDictionary['sequence 500'] = fibLib.fibList(500)
     resultDictionary['starts at 0'] = fibLib.fibAtN(0)
     resultDictionary['value at 5'] = fibLib.fibAtN(5)
     resultDictionary['value at 200'] = fibLib.fibAtN(200)
     resultDictionary['floor of 1000'] = fibLib.fibFloor(1000)
     resultDictionary['ceil of 1000'] = fibLib.fibCeil(1000)
     ss = "fib=__import__('"+fibLibrary+"'); fibLib=fib."+fibLibrary+'()'
     resultDictionary['sequence speed tests'] = timeit.timeit('fibLib.fibList(311)', setup=ss ,number = 5000)
     resultDictionary['value speed tests']=timeit.timeit('fibLib.fibAtN(311)', setup=ss, number = 5000)
     resultDictionary['floor speed tests']=timeit.timeit('fibLib.fibFloor(311)', setup=ss, number = 5000)
     resultDictionary['ceiling speed tests']=timeit.timeit('fibLib.fibCeil(311)', setup=ss, number = 5000)
     stopTime = time.clock()
     resultDictionary['time of basic tests'] = stopTime - startTime
   except:
     return('exception')
   else:
     return('tests worked')
 def diff(self, iterN=100):
     baseSetup = self.baseSetup +  '; bcg=np.random.rand(640, 480)'
     cv2Cmd = 'test=cv2.absdiff(bcg, data)'
     numpyCmd = 'test=data - bcg'
 
     print timeit.timeit(setup=baseSetup, stmt=numpyCmd, number=iterN)/iterN
     print timeit.timeit(setup=baseSetup, stmt=cv2Cmd, number=iterN)/iterN
Пример #27
0
def rxcui_unit_tests(unii, chembl_id, chebi_id, wikidata_accession):
    unii_com = gnomics.objects.compound.Compound(identifier = str(unii), identifier_type = "UNII", source = "FDA")
    print("\nGetting drugs (RxCUIs) from compound (UNII) (%s):" % unii)
    
    for rx in get_drugs(unii_com):
        for iden in rx.identifiers:
            print("- %s (%s)" % (str(iden["identifier"]), iden["identifier_type"]))
    
    chembl_com = gnomics.objects.compound.Compound(identifier = str(chembl_id), identifier_type = "ChEMBL ID", source = "ChEMBL")
    
    print("\nGetting drug identifiers from compound (ChEMBL ID) (%s):" % chembl_id)
    for rx in get_drugs(chembl_com):
        for iden in rx.identifiers:
            print("- %s (%s)" % (str(iden["identifier"]), iden["identifier_type"]))
        
    chebi_com = gnomics.objects.compound.Compound(identifier = str(chebi_id), identifier_type = "ChEBI ID", source = "ChEBI")
    
    print("\nGetting drug identifiers from compound (ChEBI ID) (%s):" % chebi_id)
    for rx in get_drugs(chebi_com):
        for iden in rx.identifiers:
            print("- %s (%s)" % (str(iden["identifier"]), iden["identifier_type"]))
    
    wikidata_com = gnomics.objects.compound.Compound(identifier = str(wikidata_accession), identifier_type = "Wikidata Accession", source = "Wikidata")
    print("\nGetting drug identifiers from Wikidata Accession (%s):" % wikidata_accession)
    
    start = timeit.timeit()
    all_drugs = get_drugs(wikidata_com)
    end = timeit.timeit()
    print("TIME ELAPSED: %s seconds." % str(end - start))
    
    for rx in all_drugs:
        for iden in rx.identifiers:
            print("- %s (%s)" % (str(iden["identifier"]), iden["identifier_type"]))
Пример #28
0
def compareCompleteVSIncomplete():
    size = 100
    nbOfPoints = 50
    nbValuesForAverage = 4
    x, yComplete, yIncomplete = [], [], []

    for i in numpy.linspace(0, size*(size-1), nbOfPoints):
        # Computes matrix density
        x.append((size*size-i)/(size*size))
        # Computes average execution times on 3 calls on different matrix
        completeTime, incompleteTime = 0, 0
        for j in range(nbValuesForAverage):    
            M = matgen.symmetricSparsePositiveDefinite(size, i)
            wrapped1 = wrapper(cholesky.completeCholesky, M)
            completeTime += timeit.timeit(wrapped1, number=1)
            wrapped2 = wrapper(cholesky.incompleteCholesky, M)
            incompleteTime += timeit.timeit(wrapped2, number=1)
        yComplete.append(completeTime/nbValuesForAverage)
        yIncomplete.append(incompleteTime/nbValuesForAverage)

    p1 = plt.plot(x, yComplete, 'b', marker='o')
    p2 = plt.plot(x, yIncomplete, 'g', marker='o')
    plt.title("Average execution time for the Cholesky factorization in function of matrix density\n")
    plt.legend(["New custom Complete factorization", "Custom incomplete factorization"], loc=4)
    plt.ylabel("Execution time (s)")
    plt.xlabel("density of the initial 100*100 matrix")
    plt.show()
Пример #29
0
def patent_unit_tests(chebi_id, pubchem_cid, pubchem_sid, kegg_compound_id):
    chebi_com = gnomics.objects.compound.Compound(identifier = str(chebi_id), identifier_type = "ChEBI ID", source = "ChEBI")
    print("Getting patent accessions from ChEBI ID (%s):" % chebi_id)
    for acc in get_patents(chebi_com):
        for iden in acc.identifiers:
            print("- %s" % str(iden["identifier"]))
        
    pubchem_com = gnomics.objects.compound.Compound(identifier = str(pubchem_cid), identifier_type = "PubChem CID", source = "PubChem")
    print("\nGetting patent accessions from PubChem CID (%s):" % pubchem_cid)
    for acc in get_patents(pubchem_com):
        for iden in acc.identifiers:
            print("- %s" % str(iden["identifier"]))
        
    pubchem_sub = gnomics.objects.compound.Compound(identifier = str(pubchem_sid), identifier_type = "PubChem SID", source = "PubChem")
    print("\nGetting patent accessions from PubChem SID (%s):" % pubchem_sid)
    for acc in get_patents(pubchem_sub):
        for iden in acc.identifiers:
            print("- %s" % str(iden["identifier"]))
        
    kegg_com = gnomics.objects.compound.Compound(identifier = str(kegg_compound_id), identifier_type = "KEGG Compound ID", source = "KEGG")
    print("\nGetting patent accessions from KEGG Compound ID (%s):" % kegg_compound_id)
    
    start = timeit.timeit()
    all_patents = get_patents(kegg_com)
    end = timeit.timeit()
    print("TIME ELAPSED: %s seconds." % str(end - start))
    
    for acc in all_patents:
        for iden in acc.identifiers:
            print("- %s" % str(iden["identifier"]))
Пример #30
0
def measure(documents, loops=LOOPS):
    dump_table = []
    load_table = []

    for title, dumps, loads in serializers:
        packed = [""]
        def do_dump():
            packed[0] = dumps(documents)
            
        def do_load():
            loads(packed[0])

        if VERBOSE:
            print(title)
            print("  dump...")
            
        try:
            result = timeit(do_dump, number=loops)
        except:
            continue
        dump_table.append([title, result, len(packed[0])])
 
        if VERBOSE:
            print("  load...")
        result = timeit(do_load, number=loops)
        load_table.append([title, result])

    return dump_table, load_table, loops
Пример #31
0
    while len(items) != 0:
        try:
            seq.append(items[n])
            del items[n]
            n += k - 1
        except IndexError:
            n = n - len(items)
    return seq


TEST_CODE_Oleksandra_Chmel = '''
result = Oleksandra_Chmel_day21([1,2,3,4,5,6,7],3)
'''

print("Time for ccquiel test code: " + str(
    timeit.timeit(stmt=TEST_CODE_ccquiel, setup=ccquiel_setup, number=100000))
      + " seconds")
print("Time for charlie_ang test code: " + str(
    timeit.timeit(
        stmt=TEST_CODE_charlie_ang, setup=charlie_ang_setup, number=100000)) +
      " seconds")
print("Time for diana_henninger test code: " + str(
    timeit.timeit(stmt=TEST_CODE_diana_henninger,
                  setup=diana_henninger_setup,
                  number=100000)) + " seconds")
print(
    "Time for Jens test code: " +
    str(timeit.timeit(stmt=TEST_CODE_Jens, setup=Jens_setup, number=100000)) +
    " seconds")
print("Time for Jose_Catela test code: " + str(
    timeit.timeit(
from timeit import timeit
from numpy.random import randint


def built_in_sum(l: list):
    return sum(l)


def custom_sum(l: list):
    sum_val = 0
    for num in l:
        sum_val += num
    return sum_val


l = randint(0, 100, size=100_000)
expSize = 100

built_in_time = timeit("built_in_sum(l)", number=expSize, globals=globals())
custom_time = timeit("custom_sum(l)", number=expSize, globals=globals())
print(custom_time / built_in_time)
# 1.2499071011706575
        client = ZhihuClient()
        cookies_str = client.login_in_terminal()
        with open(Cookies_File, 'w') as f:
            f.write(cookies_str)

    print("Making test dir...", end="")
    os.mkdir(TEST_DIR)
    print("Done", end="\n\n")

    os.chdir(TEST_DIR)

    print("===== test start =====")

    import timeit

    try:
        time = timeit.timeit('test()',
                             setup='from __main__ import test',
                             number=1)
        print('===== test passed =====')
        print('no error happen')
        print('time used: {0} ms'.format(time * 1000))
    except Exception as e:
        print('===== test failed =====')
        raise e
    finally:
        os.chdir(BASE_DIR)
        print("Cleaning...", end='')
        shutil.rmtree(TEST_DIR)
        print("Done")
Пример #34
0
''' map和生成器推导几乎一样最快 但是似乎还是map快一点点 '''

import timeit


ss = 's="1234567890-=qwertyuiop[]\asdfghjkl;zxcvbnm,./"\n' \
    'cnt = {c:0 for c in s}'

s1 = 'all(map(lambda x:x >0 , cnt.values()))'

s2 = 'all([x >0 for x in cnt.values()])'

s3 = 'all([cnt[x] >0 for x in cnt])'

s4 = 'all((x >0 for x in cnt.values()))'

t1 = timeit.timeit(stmt=s1, setup=ss, number=20000000)
t2 = timeit.timeit(stmt=s2, setup=ss, number=20000000)
t3 = timeit.timeit(stmt=s3, setup=ss, number=20000000)
t4 = timeit.timeit(stmt=s4, setup=ss, number=20000000)
print(t1)
print(t2)
print(t3)
print(t4)
Пример #35
0
                              [3, 5]), self.s3),
            tf.reduce_sum(dmu2[:, :, 810:, ...], 2, True)
        ], 2))

        dsg += (tf.concat([dsg1, self.top0], 2) + tf.concat([
            self.btm0,
            tf.reshape(
                tf.reduce_sum(tf.reshape(dsg2[:, :, :729, ...], self.s1),
                              [3, 5]), self.s3),
            tf.reshape(
                tf.reduce_sum(tf.reshape(dsg2[:, :, 729:810, ...], self.s2),
                              [3, 5]), self.s3),
            tf.reduce_sum(dsg2[:, :, 810:, ...], 2, True)
        ], 2))

        dalpha = (tf.reduce_sum(fn_, [2, 4], keepdims=True) +
                  tf.reduce_sum(fe_, [2, 4], keepdims=True))
        dw = alpha * (dalpha - tf.reduce_sum(dalpha * alpha, 3, keepdims=True))
        energy = tf.zeros(fn.shape[:2], tf.float64) if not get_energy else \
            -(tf.reduce_sum(fn, [2, 3, 4]) + tf.reduce_sum(fe, [2, 3, 4]))
        return (-dmu * sqrt2, -dsg, -dw), energy


if __name__ == '__main__':
    from core.potential.dnn import NeuralNetPotential
    import timeit

    gmm = GMMHidden(c=10, m=27, l=1, k=11)
    pot = NeuralNetPotential(node_units=(4, 5, 5, 4), edge_units=(5, 7, 7, 5))
    print(timeit.timeit(lambda: gmm.infer(pot, 50), number=1))
b2 = np.array([0.2])

w = [w1, w2]
b = [b1, b2]
x = [1.5, 2.0, 3.0]


def f(x):
    return 1 / (1 + np.exp(-x))


def simple_looped_nn_calc(n_layers, x, w, b):
    for l in range(n_layers - 1):
        if l == 0:
            node_in = x
        else:
            node_in = h
        h = np.zeros((w[l].shape[0], ))
        for i in range(w[l].shape[0]):
            f_sum = 0
            for j in range(w[l].shape[1]):
                f_sum += w[l][i][j] * node_in[j]

            f_sum += b[l][i]
            h[i] = f(f_sum)
    return h


answer = simple_looped_nn_calc(3, x, w, b)
timeit.timeit(stmt=simple_looped_nn_calc(3, x, w, b), number=1234567)
Пример #37
0
    max_ID = 0
    for seat in inp:
        row, col = seat[:7], seat[7:]
        ID = decoderow(row) * 8 + decodecol(col)
        if max_ID < ID:
            max_ID = ID
    return max_ID


def oneliner(inp):
    return max(
        int(line.translate(str.maketrans('RLBF', '1010')), 2) for line in inp)


if __name__ == "__main__":
    with open('5.in') as f:
        inp = f.readlines()
    inp = [l.strip() for l in inp if l]
    print(f"old solution: {solution(inp)}")
    print(
        "time for 10k runs:",
        timeit.timeit('solution(inp)',
                      setup="from __main__ import solution, inp",
                      number=10000))
    print(f"one liner: {oneliner(inp)}")
    print(
        "time for 10k runs:",
        timeit.timeit('oneliner(inp)',
                      setup="from __main__ import oneliner, inp",
                      number=10000))
Пример #38
0
from timeit import timeit

setup = '''
import numpy as np
n = int(input('Кол-во элементов в массиве: '))
a = np.arange(n + 1)
x = int(input('Искомый элемент: '))
right = len(a) - 1  # Правая граница.
left = 0  # Левая граница.
div = 0
'''
stmt = '''
while left <= right:
    div = int(left + right) // 2  # Метод "дихотомии"
    # Уменьшаем зону поиска.
    if x < a[div]:
        right = div - 1
    elif x > a[div]:
        left = div + 1
    else:
        x = div  # Элемент найден!
        break
if x == div:
    print('Елемент {} найден в {} позиции'.format(x, div))
else:
    print('Елемент не найден!')
    '''
print('Время записи: {} секунд'.format(timeit(stmt, setup, number=1)))
Пример #39
0
def run(matcher, cached, repeat):
    '''Time the given test.'''
    stmt = 'tests[{0}][{1}][{2}]()'.format(matcher.__name__, cached, repeat)
    setup = 'from __main__ import tests, {0}'.format(matcher.__name__)
    return timeit(stmt, setup, number=1)
Пример #40
0
def speed_gen():
    f = "Hello there!\n General Kenobi!"
    l = sum(len(word) for line in f for word in line.split())


if __name__ == "__main__":

    num = 10000

    #1 no code provided

    #2.1
    print(2.1)
    r = timeit.timeit("speed_type()",
                      number=num,
                      setup="from __main__ import speed_type")
    print(r)
    r = timeit.timeit("speed_type_is()",
                      number=num,
                      setup="from __main__ import speed_type_is")
    print(r)
    r = timeit.timeit("speed_isinstance()",
                      number=num,
                      setup="from __main__ import speed_isinstance")
    print(r)

    #2.2
    print(2.2)
    r = timeit.timeit("speed_len_in_loop()",
                      number=num,
Пример #41
0
    while swapped:
        swapped = False
        for i in range(lenght):
            for j in range(lenght - i - 1):
                if arr[j] > arr[j + 1]:
                    arr[j], arr[j + 1] = arr[j + 1], arr[j]
                    swapped = True


def geraListaAleatoria(tam):
    lista = []
    while tam > len(lista):
        n = randint(1, 1 * tam)
        if n not in lista: lista.append(n)
    return lista


x = [10000, 20000, 30000, 40000, 50000]

y = []

for i in x:

    lista = geraListaAleatoria(i)
    y.append(
        timeit.timeit("bubbleSort({})".format(lista),
                      setup="from __main__ import bubbleSort",
                      number=1))

desenhaGrafico(x, y, 'Grafico bubbleSort', 'Gráfico bubbleSort.png')
Пример #42
0
def f1():
    n = set([i for i in range(10)])
    a = list(itertools.permutations(n))[1000000 - 1]
    print(a)
    b = list(map(str, a))
    print(b)
    print(''.join(b))


def f2():
    print(''.join(
        list(
            map(
                str,
                list(itertools.permutations(set([i for i in range(10)
                                                 ])))[1000000 - 1]))))


# (2, 7, 8, 3, 9, 1, 5, 4, 6, 0)
# ['2', '7', '8', '3', '9', '1', '5', '4', '6', '0']

print(timeit.timeit(lambda: f1(), number=1))
# 2783915460
# 0.501804581

print(timeit.timeit(lambda: f2(), number=1))
# 2783915460
# 0.48594570599999987

# https://projecteuler.net/problem=24
Пример #43
0
                output.append(right.pop(0))
        return output

if __name__ == "__main__":
    from timeit import timeit
    import random

    best_case = list(range(100))
    worst_case = list(range(100, 0, -1))
    avg_case = [random.randint(0, 100) for _ in range(100)]

    best_stmt = "from __main__ import merge_sort, best_case;merge_sort(best_case[:])"
    worst_stmt = "from __main__ import merge_sort, worst_case; merge_sort(worst_case[:])"
    avg_stmt = "from __main__ import merge_sort, avg_case; merge_sort(avg_case[:])"

    best_time = timeit(best_stmt, number=1000)
    worst_time = timeit(worst_stmt, number=1000)
    avg_time = timeit(avg_stmt, number=1000)

    print("For 1000 iterations of the time-efficient merge sort...")
    print(f"\t...the best case finished in {best_time} seconds")
    print(f"\t...the worst case finished in {worst_time} seconds")
    print(f"\t...the average (random) case finished in {avg_time} seconds")

    best_stmt = "from __main__ import merge_sort2, best_case;merge_sort2(best_case[:])"
    worst_stmt = "from __main__ import merge_sort2, worst_case; merge_sort2(worst_case[:])"
    avg_stmt = "from __main__ import merge_sort2, avg_case; merge_sort2(avg_case[:])"

    best_time = timeit(best_stmt, number=1000)
    worst_time = timeit(worst_stmt, number=1000)
    avg_time = timeit(avg_stmt, number=1000)
Пример #44
0
    print("=" * 100)
    print("Active path: 192.168.1.1 to 10.0.1.2")
    path3 = path_finder.active_by_manage_ip('192.168.1.1', '10.0.1.2')
    pprint.pprint(path3)
    for path in map(nx.utils.pairwise, path3):
        for pp in path:
            try:
                edge = path_finder.graph.edges[pp]
                print(edge)
            except KeyError:
                continue

    print("=" * 100)
    print("Active path: 192.168.1.1 to 192.168.1.10")
    path4 = path_finder.active_by_manage_ip('192.168.1.1', '192.168.1.10')
    pprint.pprint(path4)

    for path in map(nx.utils.pairwise, path4):
        for pp in path:
            print(path_finder.graph.edges[pp])


if __name__ == '__main__':
    import logging
    import timeit

    logging.basicConfig(level=logging.INFO)
    usage_time = timeit.timeit(main, number=1)
    print("Usage time: {:.3f}".format(usage_time))
Пример #45
0
import random
import socket
import struct
import timeit

parser = argparse.ArgumentParser(description="Benchmark maxminddb.")
parser.add_argument("--count", default=250000, type=int, help="number of lookups")
parser.add_argument("--mode", default=0, type=int, help="reader mode to use")
parser.add_argument("--file", default="GeoIP2-City.mmdb", help="path to mmdb file")

args = parser.parse_args()

reader = geoip2.database.Reader(args.file, mode=args.mode)


def lookup_ip_address():
    ip = socket.inet_ntoa(struct.pack("!L", random.getrandbits(32)))
    try:
        record = reader.city(str(ip))
    except geoip2.errors.AddressNotFoundError:
        pass


elapsed = timeit.timeit(
    "lookup_ip_address()",
    setup="from __main__ import lookup_ip_address",
    number=args.count,
)

print(args.count / elapsed, "lookups per second")
Пример #46
0
def smiles_unit_tests(chemspider_id,
                      chebi_id,
                      cas_rn,
                      kegg_drug_id,
                      pubchem_cid,
                      chembl_id,
                      kegg_compound_id,
                      chemspider_security_token=None):
    if chemspider_security_token is not None:
        print("Creating user...")
        user = User(chemspider_security_token=chemspider_security_token)
        print("User created successfully.\n")

        chemspider_com = gnomics.objects.compound.Compound(
            identifier=str(chemspider_id),
            identifier_type="ChemSpider ID",
            source="ChemSpider")
        print("Getting SMILES from ChemSpider ID (%s):" % chemspider_id)
        start = timeit.timeit()
        smiles_array = get_smiles(chemspider_com, user=user)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))

        chebi_com = gnomics.objects.compound.Compound(
            identifier=str(chebi_id),
            identifier_type="ChEBI ID",
            source="ChEBI")
        print("\nGetting SMILES from ChEBI ID (%s):" % chebi_id)
        start = timeit.timeit()
        smiles_array = get_smiles(chebi_com)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))

        cas_com = gnomics.objects.compound.Compound(
            identifier=str(cas_rn),
            identifier_type="CAS Registry Number",
            source="CAS")
        print("\nGetting SMILES from CAS RN (%s):" % cas_rn)
        start = timeit.timeit()
        smiles_array = get_smiles(cas_com)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))

        kegg_compound_com = gnomics.objects.compound.Compound(
            identifier=str(kegg_compound_id),
            identifier_type="KEGG COMPOUND ID",
            source="KEGG")
        print("\nGetting SMILES from KEGG Compound ID (%s):" %
              kegg_compound_id)
        start = timeit.timeit()
        smiles_array = get_smiles(kegg_compound_com)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))

        pubchem_com = gnomics.objects.compound.Compound(
            identifier=str(pubchem_cid),
            identifier_type="PubChem CID",
            source="PubChem")
        print("\nGetting canonical SMILES from PubChem CID (%s):" %
              pubchem_cid)
        start = timeit.timeit()
        smiles_array = get_canonical_smiles(pubchem_com)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))

        print("\nGetting isomeric SMILES from PubChem CID (%s):" % pubchem_cid)
        start = timeit.timeit()
        smiles_array = get_isomeric_smiles(pubchem_com)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))

        chembl_com = gnomics.objects.compound.Compound(
            identifier=str(chembl_id),
            identifier_type="ChEMBL ID",
            source="ChEMBL")
        print("\nGetting canonical SMILES from ChEMBL ID (%s):" % chembl_id)
        start = timeit.timeit()
        smiles_array = get_canonical_smiles(chembl_com)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))

    else:
        print(
            "No user provided. Cannot test ChemSpider conversion without ChemSpider security token.\n"
        )
        print("Continuing with ChEBI ID conversion...\n")

        chebi_com = gnomics.objects.compound.Compound(
            identifier=str(chebi_id),
            identifier_type="ChEBI ID",
            source="ChEBI")
        print("Getting SMILES from ChEBI ID (%s):" % chebi_id)
        start = timeit.timeit()
        smiles_array = get_smiles(chebi_com)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))

        cas_com = gnomics.objects.compound.Compound(
            identifier=str(cas_rn),
            identifier_type="CAS Registry Number",
            source="CAS")
        print("\nGetting SMILES from CAS RN (%s):" % cas_rn)
        start = timeit.timeit()
        smiles_array = get_smiles(cas_com)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))

        kegg_compound_com = gnomics.objects.compound.Compound(
            identifier=str(kegg_compound_id),
            identifier_type="KEGG COMPOUND ID",
            source="KEGG")
        print("\nGetting SMILES from KEGG Compound ID (%s):" %
              kegg_compound_id)
        start = timeit.timeit()
        smiles_array = get_smiles(kegg_compound_com)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))

        pubchem_com = gnomics.objects.compound.Compound(
            identifier=str(pubchem_cid),
            identifier_type="PubChem CID",
            source="PubChem")
        print("\nGetting canonical SMILES from PubChem CID (%s):" %
              pubchem_cid)
        start = timeit.timeit()
        smiles_array = get_canonical_smiles(pubchem_com)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))

        print("\nGetting isomeric SMILES from PubChem CID (%s):" % pubchem_cid)
        start = timeit.timeit()
        smiles_array = get_isomeric_smiles(pubchem_com)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))

        chembl_com = gnomics.objects.compound.Compound(
            identifier=str(chembl_id),
            identifier_type="ChEMBL ID",
            source="ChEMBL")
        print("\nGetting canonical SMILES from ChEMBL ID (%s):" % chembl_id)
        start = timeit.timeit()
        smiles_array = get_canonical_smiles(chembl_com)
        end = timeit.timeit()
        print("\tTIME ELAPSED: %s seconds." % str(end - start))
        print("\tRESULTS:")
        for com in smiles_array:
            print("\t- %s" % str(com))
Пример #47
0
os.system('dot -O -Gdpi=300 -Tpng mixture2.dot')

image = plt.imread("mixture2.dot.png")
fig, ax = plt.subplots(figsize=(40, 20))
ax.imshow(image)
ax.axis('off')

#######################################
# Processing time
# +++++++++++++++

print(
    timeit(stmt="sess.run(None, {'X': xt})",
           number=10000,
           globals={
               'sess': sess,
               'xt': xt
           }))

print(
    timeit(stmt="sess2.run(None, {'X': xt})",
           number=10000,
           globals={
               'sess2': sess2,
               'xt': xt
           }))

#################################
# The model using ReduceLogSumExp is much faster.

##########################################
from Checkers.DuelModel import *

game = Game(4)

# create a model
# env = DuelModel(game, rPieceInner=[200, 200, 200], rGameInner=[500, 500, 500],
#                 bPieceInner=[200, 200, 200], bGameInner=[500, 500, 500])


def testCode():
    """
    Put code in this method to run for a test
    """
    game.resetGame()
    game.play((0, 3), (False, True, False))
    game.play((1, 3), (True, True, False))
    game.play((0, 2), (True, True, False))
    game.play((0, 2), (False, True, False))
    game.play((0, 1), (False, True, False))
    game.play((1, 1), (False, True, False))
    game.play((0, 0), (True, False, False))
    game.play((1, 0), (True, False, False))
    game.play((0, 1), (False, True, False))
    game.play((0, 3), (False, True, False))
    game.play((1, 3), (False, True, False))
    game.play((1, 1), (False, False, False))


print(timeit.timeit(lambda: testCode(), number=10000))
Пример #49
0
    return opt.brenth(f, 0, 1)


def test():
    L = []
    for i in range(100):
        L.append(i)


if __name__ == '__main__':
    print("Test metody bisekcji - funkcja biblioteczna")
    print("x0 = ", opt.bisect(f, 0, 1))
    print("Czas obliczeń: ")
    print(
        timeit.timeit("bis(f)",
                      number=100,
                      setup="from __main__ import f, bis"))

    print("\nTest metody NR - funkcja biblioteczna")
    print("x0 = ", opt.newton(f, -0.5))
    print("Czas obliczeń: ")
    print(
        timeit.timeit("newt(f)",
                      number=100,
                      setup="from __main__ import f, newt"))

    print("\nTest metody Ridder - funkcja biblioteczna")
    print("x0 = ", opt.ridder(f, 0, 1))
    print("Czas obliczeń: ")
    print(
        timeit.timeit("ridd(f)",
Пример #50
0
def sort(unsorted):
    """ insertion sort of unsorted array """

    # if the list is zero or one element, it is already sorted
    if len(unsorted) <= 1:
        return unsorted

    for n in xrange(1, len(unsorted)):
        # pick the nth element, and if it's value is less than any of the
        #  (already sorted) values in front of it, move it to the front
        val = unsorted[n]
        test_index = n - 1
        while (test_index >= 0) and (unsorted[test_index] > val):
            unsorted[test_index + 1] = unsorted[test_index]
            test_index -= 1
        unsorted[test_index + 1] = val

    # unsorted array is now sorted (was done in place with swaps)
    return unsorted


if __name__ == "__main__":

    print "Insertion sort of 10 inorder elements ({} seconds)".format(
        timeit.timeit('arr = [i for i in xrange(0,10)]; sort(arr)',
                      setup='from __main__ import sort'))

    print "Insertion sort on 10 reversed elements ({} seconds)".format(
        timeit.timeit('arr=[i for i in xrange(10, 0, -1)]; sort(arr)',
                      setup='from __main__ import sort'))
Пример #51
0
'''
Created on Jun 4, 2014

@author: Anirudh
'''
import math
import timeit


def binomial(n, r):
    return math.factorial(n) / (math.factorial(r) * math.factorial(n - r))


def catalyn(n):
    return binomial(2 * n, n) / (n + 1)


start = timeit.timeit()
T = int(raw_input())
flowers = []
for i in range(T):
    num = int(raw_input())
    result = 0
    i = 1
    while i <= num:
        result = result + (binomial(num, i) * catalyn(i))
        i = i + 1
    print result

end = timeit.timeit()
print end - start
Пример #52
0
#!/usr/bin/env python
from __future__ import print_function
from timeit import timeit

setup = 'x = "Hello";y = "world"'
setup_big = 'x = "Hello"*100;y = "world"*100'

stmts = [
    'x + " " + y',
    '" ".join((x,y))',
    '" ".join([x,y])',
    '"%s %s" % (x, y)',
    '"{} {}".format(x, y)',
    '"{0} {1}".format(x, y)',
    '"{x} {y}".format(x=x, y=y)',
]
for stmt in stmts:
    print("{:30} {:.3f} {:.3f}".format(stmt, timeit(
        stmt, setup=setup), timeit(stmt, setup=setup_big)))
Пример #53
0
"""
    评估以下几种字符串拼接方法的效率
"""
import sys
import timeit


def f1():
    s = 'hello world'
    r = ''
    for e in s:
        r += e
    return r


print(timeit.timeit(f1, number=1000000))  # 0.89


def f2():
    s = 'hello world'
    r = []
    for e in s:
        r.append(e)
    return ''.join(r)


print(timeit.timeit(f2, number=1000000))  # 1.20


def f3():
    s = 'hello world'
Пример #54
0

def sum_to(upto, step):
    return sum_terms(1, upto // step, step, step)


def euler1_hackerrank(n):
    res = sum_to(n - 1, 3) + sum_to(n - 1, 5) - sum_to(n - 1, 15)
    return res


"""
Above will work with very large numbers
For the formulaes and explaination: 
https://www.youtube.com/watch?v=xWHfQGBzgbc
"""


def test_1():
    print('Sum of numbers below 1000', euler1_hackerrank(1000))


def test_2():
    print('Sum of numbers below 10^1000', euler1_hackerrank(10**1000))


timer = timeit.timeit(stmt=test_1, number=1)
print("In time: ", timer, 'secs')

timer = timeit.timeit(stmt=test_2, number=1)
print("In time: ", timer, 'secs')
Пример #55
0
def main():
	elapsed_time = timeit.timeit(visualization, number=1)
	print("Elapsed time: ", round(elapsed_time, 7), "sec.")
Пример #56
0
x = [10000, 20000, 30000, 40000, 50000]

yPiorCaso = []
yMedioCaso = []
yMelhorCaso = []

lista = [1, 2, 3, 4, 5, 6]
listaPermutada = list(it.permutations(lista, 6))

tempos = []

for i in x:
    lista = geraListaDecresc(i)
    yPiorCaso.append(
        timeit.timeit('bucketSort({})'.format(lista),
                      setup="from __main__ import bucketSort",
                      number=1))

    lista = geraListaAleatoria(i)
    yMedioCaso.append(
        timeit.timeit('bucketSort({})'.format(lista),
                      setup="from __main__ import bucketSort",
                      number=1))

    lista = geraListaCresc(i)
    yMelhorCaso.append(
        timeit.timeit('bucketSort({})'.format(lista),
                      setup="from __main__ import bucketSort",
                      number=1))

desenhaGrafico(x, yPiorCaso, 'Quantidade', 'Tempo', 'Pior_Caso')
Пример #57
0
import timeit


def v28(max_num=1001):
    sum_diagonales = [1]
    rows = []
    for k in range(1, max_num + 1, 2):
        rows.append(k)
    #print(rows)
    for i in range(1, len(rows)):
        for j in range(0, 4):
            if j == 0:
                num = rows[i] - 2
                verschoben = sum_diagonales[len(sum_diagonales) - 1] + 1
                num = verschoben + num
                #print('altes Element: ',sum_diagonales[len(sum_diagonales)-1],'neues Element: ',num)
            else:
                num = (rows[i] - 1) + sum_diagonales[len(sum_diagonales) - 1]
            sum_diagonales.append(num)
    #print(sum_diagonales,sum(sum_diagonales))
    print(sum(sum_diagonales))


#v28()
print(timeit.timeit(v28, number=10) / 10)
#0.0009388312499999999
Пример #58
0
import numpy
import timeit

a = array.array('d', [1, -3, 4, 7, 2, 0])
print(a)
sample11.clip(a, 1, 4, a)
print(a)

b = numpy.random.uniform(-10, 10, size=100000)
print(b)
c = numpy.zeros_like(b)
print(c)
sample11.clip(b, -5, 5, c)
print(min(c))
print(max(c))

print(
    timeit.timeit('numpy.clip(b, -5, 5, c)',
                  'from __main__ import b, c, numpy',
                  number=1000))
print(
    timeit.timeit('sample11.clip(b, -5, 5, c)',
                  'from __main__ import b, c, sample11',
                  number=1000))
# 书上运行结果是numpy快,因为numpy核心代码是c
# 而我这边是sample11快

# @cython.boundscheck(False
# @cython.wraparound(False)
# 省去数组越界检查、消除相对数组的负数下标处理,可以极大提升性能
Пример #59
0
data_train, data_test, label_train, label_test = model_selection.train_test_split(
    iris_data, iris_label, test_size=0.1, train_size=0.9)
print('train_size:', data_train.shape[0])
print('test_size:', data_test.shape[0])

tuned_parameters = [{
    "solver": ["lbfgs", "sgd", "adam"],
    "hidden_layer_sizes": [(100, ), (100, 10), (100, 100, 10),
                           (100, 100, 100, 10), (100, 100, 100, 100, 10)],
}]
licv = model_selection.GridSearchCV(MLPClassifier(early_stopping=True),
                                    param_grid=tuned_parameters,
                                    scoring="accuracy")
print('Learning Time(s):',
      timeit.timeit(lambda: licv.fit(data_train, label_train), number=1))
print('licv.best_params_ : ', licv.best_params_)
print('licv.best_estimator_ : ', licv.best_estimator_)
print('licv.best_score_ : ', licv.best_score_)

pre = licv.predict(data_test)
ac_score = metrics.accuracy_score(label_test, pre)
print('accuracy score:', ac_score)

co_mat = metrics.confusion_matrix(label_test, pre)
print('confusion matrix:')
print(co_mat)
cl_repo = metrics.classification_report(label_test, pre)
print('classification report:')
print(cl_repo)
Пример #60
0
"""
Central eval module
"""

import timeit
from dummy_class import PyDummy

if __name__ == "__main__":

    x_try = """\
a = Dummy(3, 4);
a.square()
    """
    x = timeit.timeit(x_try, setup="from dummy_class_py_ref import Dummy", number=10000)
    print(f"Python took {x:.4f} s to execute.")

    y_try = """\
a = Dummy(3, 4);
a.square()
    """
    y = timeit.timeit(y_try, setup="from dummy_class import Dummy", number=10000)
    print(f"Cython took {y:.4f} s to execute.")

    print(f"Cython has a speedup of {x/y:.2f} over Python.")

    pd = PyDummy(2, 3, 5)
    pd.volume()