def test_generate_random_choice_weighted(): numpy.random.seed(0xDEADBEEF) input_array = numpy.array([(8, 1), (2, 1), (5, 1), (7, 4), (4, 0)], dtype=int) def distance(a, b): return numpy.hypot(*(a - b)) eq_(distance(input_array[2], input_array[1]), 3) pairs = zip(input_array[:-1], input_array[1:]) max_distance = max(map(lambda x: distance(x[0], x[1]), pairs)) eq_(max_distance, distance(input_array[0], input_array[1])) def mc_selector(a, b): throw = numpy.random.random_sample() normalized_distance = distance(a, b) * 1.0 / max_distance normalized_distance = normalized_distance ** 2 if throw < math.exp(-normalized_distance): return True return False result = list(generate_random_choices(5, input_array, mc_selector)) eq_( [(list(x[0]), list(x[1])) for x in result], [([8, 1], [2, 1]), ([5, 1], [2, 1]), ([7, 4], [8, 1]), ([8, 1], [7, 4]), ([7, 4], [8, 1])], ) # Make sure we have the same implementation numpy.random.seed(0xDEADBEEF) standard_impl = list(generate_random_choices_exponential(5, input_array)) assert numpy.array_equal(result, standard_impl)
def test_generate_random_choice_weighted(): numpy.random.seed(0xDEADBEEF) input_array = numpy.array( [(8, 1), (2, 1), (5, 1), (7, 4), (4, 0)], dtype=int) def distance(a, b): return numpy.hypot(*(a - b)) eq_(distance(input_array[2], input_array[1]), 3) pairs = zip(input_array[:-1], input_array[1:]) max_distance = max(map(lambda x: distance(x[0], x[1]), pairs)) eq_(max_distance, distance(input_array[0], input_array[1])) def mc_selector(a, b): throw = numpy.random.random_sample() normalized_distance = distance(a, b) * 1./max_distance normalized_distance = (normalized_distance**2) if throw < math.exp(-normalized_distance): return True return False result = list(generate_random_choices(5, input_array, mc_selector)) eq_([(list(x[0]), list(x[1])) for x in result], [([8, 1], [2, 1]), ([5, 1], [2, 1]), ([7, 4], [8, 1]), ([8, 1], [7, 4]), ([7, 4], [8, 1])]) # Make sure we have the same implementation numpy.random.seed(0xDEADBEEF) standard_impl = list(generate_random_choices_exponential(5, input_array)) assert(numpy.array_equal(result, standard_impl))
host=args.host, port=args.port) # execute some jobs route_count = 0 # Do the future mapping in chunks, to prevent memory # blowup. I don't understand why the executor keeps # so much crap around. chunk_size = 1000 nchunks = max(int(math.ceil(args.N / chunk_size)), 1) for ichunk in range(nchunks): log.info("Processing %i route block %i/%i", chunk_size, ichunk + 1, nchunks) # We run each route forward and backwards to better # describe the use-case for that region. routes_to_run = rr.generate_forward_backward_pairs( rr.generate_random_choices_exponential(chunk_size, nodes)) commit_every = 20 for route in executor.map(route_runner, routes_to_run): coords, query_url, steps = route if not len(steps): log.error("No steps returned for route: %s", coords) continue route_hash = models.OSRMRoute.hash_route( tuple(coords[0]), tuple(coords[1]), ) ormified_route = models.OSRMRoute( route_hash=route_hash,
route_runner = functools.partial( rr.run_route, host=args.host, port=args.port) # execute some jobs route_count = 0 # Do the future mapping in chunks, to prevent memory # blowup. I don't understand why the executor keeps # so much crap around. chunk_size = 1000 nchunks = max(int(math.ceil(args.N / chunk_size)), 1) for ichunk in range(nchunks): log.info("Processing %i route block %i/%i", chunk_size, ichunk + 1, nchunks) # We run each route forward and backwards to better # describe the use-case for that region. routes_to_run = rr.generate_forward_backward_pairs( rr.generate_random_choices_exponential( chunk_size, nodes)) commit_every = 20 for route in executor.map(route_runner, routes_to_run): coords, query_url, steps = route if not len(steps): log.error("No steps returned for route: %s", coords) continue route_hash = models.OSRMRoute.hash_route( tuple(coords[0]), tuple(coords[1]), ) ormified_route = models.OSRMRoute( route_hash=route_hash,