def _optimal(first, count): # Returns (sum, coeff ** count) if count & 1: sub_sum, sub_pow = _optimal(first, count - 1) return first + sub_sum * coeff, sub_pow * coeff if not count: return 0, 1 sub_sum, sub_pow = _optimal(first, count >> 1) return sub_sum * (1 + sub_pow), sub_pow * sub_pow def optimal(count): """Optimal algorithm based on the idea similar to the binary exponentiation.""" return _optimal(start, count)[0] if __name__ == '__main__': core.run( 'geom_sum', None, core.optimized(naive) + [ ('optimal', optimal), ], [ (None, 'linear', core.linear_scale(10000, 15)), ], )
if n & 1: return d, c + d return c, d def fast_doubling(n): """Fast doubling - implementation via matrix exponentiation with the redundant calculations removed.""" return _fast_doubling(n)[0] if __name__ == '__main__': # Test all functions on different testcases core.run( 'fib', None, core.optimized(naive) + [ ('matrices', classic_matrices), ('fast dbl', fast_doubling), ], [ ('small', 'linear', core.linear_scale(30000, 15)), ('big', 'linear', core.linear_scale(300000, 15)), ], ) # Test optimized function with different settings core.run( 'fib', None, core.optimized(naive), [('demo', 'linear', core.linear_scale(60000, 30))], )
elem *= coeff return res def _optimal(first, count): # Returns (sum, coeff ** count) if count & 1: sub_sum, sub_pow = _optimal(first, count - 1) return first + sub_sum * coeff, sub_pow * coeff if not count: return 0, 1 sub_sum, sub_pow = _optimal(first, count >> 1) return sub_sum * (1 + sub_pow), sub_pow * sub_pow def optimal(count): """Optimal algorithm based on the idea similar to the binary exponentiation.""" return _optimal(start, count)[0] if __name__ == '__main__': core.run( 'geom_sum', None, core.optimized(naive) + [ ('optimal', optimal), ], [ (None, 'linear', core.linear_scale(10000, 15)), ], )
d = b * b + a * a if n & 1: return d, c + d return c, d def fast_doubling(n): """Fast doubling - implementation via matrix exponentiation with the redundant calculations removed.""" return _fast_doubling(n)[0] if __name__ == '__main__': # Test all functions on different testcases core.run( 'fib', None, core.optimized(naive) + [ ('matrices', classic_matrices), ('fast dbl', fast_doubling), ], [ ('small', 'linear', core.linear_scale(30000, 15)), ('big', 'linear', core.linear_scale(300000, 15)), ], ) # Test optimized function with different settings core.run( 'fib', None, core.optimized(naive), [('demo', 'linear', core.linear_scale(60000, 30))], )
a += i res += a else: # Test "else" case res += 8943 * (res ^ 2321) # Test that local and global variables were successfully assigned # after the ending of the loop res += global_var + local_var r = 3 # Loop with unused counter for it1 in xrange(-3231, n): r = r - 33 + d * 2 q = 43 # Loop with used and modified counter for it2 in xrange(-3231, n, 4345): q = (3 + 4) * q - (it2 * 2 - 8) * 3 it2 = q + 3 - e # Empty loop for it3 in xrange(n): pass # Return ordered list with values of all local variables return tuple(sorted(locals().items(), key=lambda item: item[0])) if __name__ == "__main__": core.run("int_operations", None, core.optimized(naive), [(None, "linear", core.linear_scale(600000, 15))])