def retrieval_benchmark(self,DBO, samples, silent = False):
     timer = Timer()
     timer.start()
     for sample in samples:
         DBO.get_thread(sample)
         #PRINT(DBO.get_thread(sample))
         #raw_input()
         timer.tick()
     timer.stop()
     if silent == False: timer.show()
     return timer.ticks
Example #2
0
def routeMax(tri):
    if len(tri) != 1:
        head = tri[0][0]
        left = map((lambda line, count: line[:count]), tri[1:], [i for i in range(1, len(tri))])
        right = [line[1:] for line in tri[1:]]
        if routeMax(left) > routeMax(right):
            return head + routeMax(left)
        else:
            return head + routeMax(right)
    else:
        return tri[0][0]


def costMax(tri):
    tri.reverse()
    cost = tri[:]
    for i in range(1, len(tri)):
        line = tri[i]
        prev = tri[i - 1]
        for j in range(len(line)):
            larger = max(prev[j], prev[j + 1])
            cost[i][j] += larger

    return cost


euler18("complex.txt")
t = Timer("euler18('triangle.txt')", "euler18")
t.timeit()
Example #3
0
    parser.add_argument("--no-processing",
                        action='store_true',
                        help="Do not process the aggregated files",
                        default=False)
    parser.add_argument(
        '--sample',
        nargs='?',
        const=10,
        type=int,
        help="Include a sample only from each book. Default size 10.")
    args = vars(parser.parse_args())
    if args['list_books']:
        print_books_list()
        exit(0)
    if args["books"] is not None and args["books"].lower() != "all":
        args["books"] = [s.strip() for s in args["books"].split(",")]
        for b in args["books"]:  # Check if books exist
            if b not in dict_to_long:
                print(
                    "Unknown Book: %s, please use -l to list available books."
                    % b)
                exit(-1)
    else:
        args["books"] = list(dict_to_long.keys())
    print_banner()
    aggregated_files = []
    aggregate(args["books"], args["sample"])
    args['no_processing'] and exit(0)
    with Timer('process(aggregated_files)'):
        process(aggregated_files)