コード例 #1
0
def test_play_the_game_less_badly():
    bad_seeds_01_env = BadSeeds01(seed_count=5,
                                  bad_seed_count=3,
                                  max_episode_length=2 * 2 + 3 * 3 + 1)

    # measure the good seeds twice
    # measure the bad seeds three times
    for time_i, seed_i in enumerate(
            concatv(
                take(
                    n=2 * len(bad_seeds_01_env.good_seeds),
                    seq=cycle(bad_seeds_01_env.good_seed_indices),
                ),
                take(
                    n=3 * len(bad_seeds_01_env.bad_seeds),
                    seq=cycle(bad_seeds_01_env.bad_seed_indices),
                ),
            )):
        next_state, terminal, reward = bad_seeds_01_env.execute(actions=seed_i)
        assert next_state[time_i, seed_i] != 0.0
        assert terminal is False
        assert reward == 0.0

    # measure the first good seed again
    next_state, terminal, reward = bad_seeds_01_env.execute(
        actions=bad_seeds_01_env.good_seed_indices[0])
    assert next_state[-1, bad_seeds_01_env.good_seed_indices[0]] != 0.0
    assert terminal is True
    # reward is the number of times the least-measured seed was measured
    assert reward == 2.0
コード例 #2
0
def template_method(test_config, generator):
    counter = 0
    half_items = ceil(test_config.num_to_consume / 2)
    iterator = chain(take(half_items, generator(half_items)),
                     take(half_items, generator(half_items)))
    for x in iterator:
        counter += 1
コード例 #3
0
def predict_euclidean(limit, distance_fn, fm):
    point = to_rtree_coords(fm)
    # we need to limit the results of nearest because it returns more than
    # limit when the model overfits to the training data
    nearest_neighbors = take(
        limit, trainset_rtree.nearest(point, num_results=limit, objects=True))
    return list(map(lambda x: x.object, nearest_neighbors))
コード例 #4
0
def test_play_the_game_less_badly():
    bad_seeds_03_env = BadSeeds03(seed_count=5,
                                  bad_seed_count=3,
                                  max_episode_length=3 + 2 * 2 + 3 * 3 + 1)

    # measure the good seeds twice
    # measure the bad seeds three times
    for time_i, seed_i in enumerate(
            concatv(
                take(
                    n=2 * len(bad_seeds_03_env.good_seeds),
                    seq=cycle(bad_seeds_03_env.good_seed_indices),
                ),
                take(
                    n=3 * len(bad_seeds_03_env.bad_seeds),
                    seq=cycle(bad_seeds_03_env.bad_seed_indices),
                ),
            )):
        time_i += 3
        next_state, terminal, reward = bad_seeds_03_env.execute(actions=seed_i)
        assert bad_seeds_03_env.history_array[time_i, seed_i] != 0.0
        assert terminal is False
        assert reward == 0.0

    measurement_counts, measured_seed_counts = count_measurements(
        bad_seeds_03_env.history_array)
    expected_measurement_counts = np.zeros_like(measurement_counts)
    expected_measurement_counts[0, bad_seeds_03_env.good_seed_indices] = 5
    expected_measurement_counts[0, bad_seeds_03_env.bad_seed_indices] = 6
    assert np.all(measurement_counts == expected_measurement_counts)

    # measure the first good seed again
    next_state, terminal, reward = bad_seeds_03_env.execute(
        actions=bad_seeds_03_env.good_seed_indices[0])

    print(f"history:\n{bad_seeds_03_env.history_array}")
    measurement_counts, measured_seed_counts = count_measurements(
        bad_seeds_03_env.history_array)
    print(f"measurement_counts: {measurement_counts}")

    assert next_state[-1, bad_seeds_03_env.good_seed_indices[0]] != 0.0
    assert terminal is True
    # reward is the number of times the least-measured seed was measured
    assert reward == 6.0

    expected_measurement_counts[0, bad_seeds_03_env.good_seed_indices[0]] += 1
    assert np.all(measurement_counts == expected_measurement_counts)
コード例 #5
0
ファイル: toolz-test.py プロジェクト: wfelipe3/learning
def test_last_drop_take():
    l = list([1, 2, 3])
    assert_that(pvector(drop(2, l))).is_equal_to(v(3))
    assert_that(pvector(take(2, l))).is_equal_to(v(1, 2))
    assert_that(pmap(groupby(first,
                             ['ABC', 'ABA', 'BAB', 'BAA']))).is_equal_to(
                                 m(A=['ABC', 'ABA'], B=['BAB', 'BAA']))
    assert_that(pmap(groupby(identity,
                             ['ABC', 'ABA', 'BAB', 'BAA']))).is_equal_to(
                                 m(ABC=['ABC'],
                                   ABA=['ABA'],
                                   BAB=['BAB'],
                                   BAA=['BAA']))
コード例 #6
0
        def evaluate_elem(elem):
            def match(p):
                data_fn = juxt(map(op.itemgetter, label['data_keys']))
                # try deleting tuple func call
                return data_fn(elem) == data_fn(p)

            predictions = list(take(limit, predictions_fn(label, elem)))
            if match_one is True:
                total = min(1, len(predictions))
                correct = min(1, len(list(filter(match, predictions))))
            else:
                total = len(predictions)
                correct = len(list(filter(match, predictions)))

            return total, correct
コード例 #7
0
ファイル: numa.py プロジェクト: arif29march/smog
def determine_fit(nodes, mem_type, delta="smaller", factor=2, size_fmt="MB"):
    """
    This function will give an amount of memory suitable for allotting to an
    instance.  It looks up the free or total memory size (that is contained in
    the nodes dict), and will determine how much memory to allocate accordingly

    :param nodes: returned dict from NUMA.get_host_numactl
    :param mem_type: one of "free" or "size"
    :param delta: if "smaller", divide the numa available memory by factor,
                  otherwise multiply by factor
    :param factor: A multiplier (if delta is larger) or divisor (if smaller)
                   for the amount of memory to return
    :return: a dict of {node_num: {mem_type: calculated size}}
    """
    kb, mb, gb, tb = take(4, bytes_iter())
    mult = {"KB": kb, "MB": mb, "GB": gb, "TB": tb}

    if "number" in nodes:
        _ = nodes.pop("number")

    size = {}
    for node in nodes:
        size[node] = {}
        subnode = nodes[node]
        for mem, qual in [
                subnode[mem_type].split() for attr in subnode.keys()
                if attr in [mem_type]
        ]:
            total_mem_bytes = int(mem) * mult[qual]
            for two in powers_two():
                size_qualified = two * mult[qual]
                if total_mem_bytes > size_qualified:
                    continue
                else:
                    if delta == "smaller":
                        size_qualified /= factor
                    else:
                        size_qualified *= factor
                    size_qualified /= mult[size_fmt]
                    size[node].update({mem_type: size_qualified})
                    break
    return size
コード例 #8
0
ファイル: numa.py プロジェクト: arif29march/smog
def determine_fit(nodes, mem_type, delta="smaller", factor=2, size_fmt="MB"):
    """
    This function will give an amount of memory suitable for allotting to an
    instance.  It looks up the free or total memory size (that is contained in
    the nodes dict), and will determine how much memory to allocate accordingly

    :param nodes: returned dict from NUMA.get_host_numactl
    :param mem_type: one of "free" or "size"
    :param delta: if "smaller", divide the numa available memory by factor,
                  otherwise multiply by factor
    :param factor: A multiplier (if delta is larger) or divisor (if smaller)
                   for the amount of memory to return
    :return: a dict of {node_num: {mem_type: calculated size}}
    """
    kb, mb, gb, tb = take(4, bytes_iter())
    mult = {"KB": kb, "MB": mb, "GB": gb, "TB": tb}

    if "number" in nodes:
        _ = nodes.pop("number")

    size = {}
    for node in nodes:
        size[node] = {}
        subnode = nodes[node]
        for mem, qual in [subnode[mem_type].split()
                          for attr in subnode.keys()
                          if attr in [mem_type]]:
            total_mem_bytes = int(mem) * mult[qual]
            for two in powers_two():
                size_qualified = two * mult[qual]
                if total_mem_bytes > size_qualified:
                    continue
                else:
                    if delta == "smaller":
                        size_qualified /= factor
                    else:
                        size_qualified *= factor
                    size_qualified /= mult[size_fmt]
                    size[node].update({mem_type: size_qualified})
                    break
    return size
コード例 #9
0
def template_method(test_config, generator):
    counter = 0
    for x in take(test_config.num_to_consume,
                  generator(test_config.num_to_consume)):
        counter += 1
コード例 #10
0
                        default=0)
    parser.add_argument('--vmax',
                        type=float,
                        help='set the color scaling for the image: max value',
                        default=1)
    args = parser.parse_args()

    print(f'loading model...', file=sys.stderr)
    model = load_model(args.model_path)

    print(f'model summary...', file=sys.stderr)
    model.summary()

    items = list(
        take(
            args.visualize_limit_per_images,
            filter(partial(image_exists, args.base_image_path),
                   ujson.load(args.input_metadata_file))))

    print('loaded items!', file=sys.stderr)

    save_visualization = compose(
        visualize, partial(compute_scores, model, args.layer_name), load_img)

    activations = map(save_visualization, items)

    arr_of_activations = np.asarray(list(activations))
    new_shape = ((args.visualize_limit_per_images - 1) *
                 arr_of_activations[0].shape[0] +
                 arr_of_activations[0].shape[0],
                 arr_of_activations[0].shape[1])
コード例 #11
0
        help='Name of the layer: layer_1, layer_2,... layer_20 and output')
    parser.add_argument('-a',
                        '--activation',
                        default=False,
                        action='store_true',
                        help='Add layer activations to output ?')

    args = parser.parse_args()

    model = load_model(args.model_path)
    output = Model(inputs=model.input,
                   outputs=model.get_layer(args.layer_name).output)

    items = list(
        take(
            args.limit,
            filter(partial(image_exists, args.base_image_path),
                   ujson.load(args.input_metadata_file))))
    print('loaded items!', file=sys.stderr)

    compute_score = compose(flatten_score, partial(predict_score, output),
                            partial(load_image, args.base_image_path))

    scores = map(compute_score, items)
    print('scoring items...', file=sys.stderr)

    def binarize(threshold, score):
        return list(map(lambda x: 1 if float(x) > threshold else 0, score))

    to_feature_map = compose(
        partial(binarize, 0.5),
        partial(save_score_map, not args.save_score_map,
コード例 #12
0
ファイル: test_itertoolz.py プロジェクト: karansag/toolz
def test_concatv():
    assert list(concatv([], [], [])) == []
    assert (list(take(5, concatv(['a', 'b'], range(1000000000)))) ==
            ['a', 'b', 0, 1, 2])
コード例 #13
0
 def sample_images(xs):
     ads = random.sample(xs, limit if len(xs) >= limit else len(xs))
     images = mapcat(second, ads)
     return list(take(limit, images))
コード例 #14
0
                        choices=['points', 'images'],
                        help='Image height in  pixels.')
    parser.add_argument(
        '-i',
        '--viz_base_path',
        type=str,
        help='When specified, the viz will be saved to this path.')
    parser.add_argument('--viz_activations',
                        default=False,
                        action='store_true',
                        help='Visualize activations pre-binarization!')

    args = parser.parse_args()
    print(args)

    items = list(take(args.limit, ujson.load(args.feature_maps)))

    data_points = map(
        partial(feature_map,
                'activations' if args.viz_activations else 'feature_maps',
                args.plot_type == 'images'), items)

    if args.plot_type == 'points':

        feature_maps, im = display_points(data_points, args.plot_type)
        X_2d = t_sne(np.asarray(feature_maps))
        L = np.array(list(map(partial(label, args.clazz), items)))
        viz = plot_points(args.clazz, X_2d, L)
        if args.viz_base_path is not None:
            tag = 'activations' if args.viz_activations else 'binarized'
            viz.savefig(
コード例 #15
0
def generate_map_data(problem, x0, t, params):
    f = problem(params)
    x = take(iterate(f, x0), t + 1)
    x = np.array(x)
    return x[:-1], x[1:]
コード例 #16
0
ファイル: test_itertoolz.py プロジェクト: karansag/toolz
def test_take():
    assert list(take(3, 'ABCDE')) == list('ABC')
    assert list(take(2, (3, 2, 1))) == list((3, 2))
コード例 #17
0
                        '--predict_limit',
                        type=int,
                        default=10,
                        help='Number of predictions to compute per image')
    parser.add_argument('-l',
                        '--limit',
                        type=int,
                        default=sys.maxsize,
                        help='Limit the items for compute predictions for')
    parser.add_argument('--distance',
                        type=str,
                        choices=meta.keys(),
                        default='euclidean')
    parser.add_argument('-p',
                        '--number_of_processes',
                        type=int,
                        default=mp.cpu_count())
    parser.add_argument('-c', '--chunk_size', type=int, default=1)
    args = parser.parse_args()

    print(f'loading trainset...', file=sys.stderr)
    trainset = ujson.load(args.trainset_file)
    print(f'loaded trainset {len(trainset)}', file=sys.stderr)

    print(f'loading dataset...', file=sys.stderr)
    dataset = list(take(args.limit, ujson.load(args.dataset_file)))
    print(f'loaded dataset {len(dataset)}', file=sys.stderr)

    predict(meta[args.distance], trainset, dataset, args.predict_limit,
            args.output_file)
コード例 #18
0
ファイル: test_itertoolz.py プロジェクト: karansag/toolz
def test_iterate():
    assert list(itertools.islice(iterate(inc, 0), 0, 5)) == [0, 1, 2, 3, 4]
    assert list(take(4, iterate(double, 1))) == [1, 2, 4, 8]
コード例 #19
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description=
        'Chart the precision of vehicle detection (all.json from CF)')
    parser.add_argument(
        'dataset',
        nargs='?',
        type=argparse.FileType('r'),
        default=sys.stdin,
        help='The dataset to process. Reads from stdin by default.')
    args = parser.parse_args()

    dataset = ujson.load(args.dataset)

    data_p0_6 = map(lambda n: (n, precision_at(take(n, dataset), 0.6)),
                    range(100,
                          len(dataset) + 1, 100))

    data_p0_7 = map(lambda n: (n, precision_at(take(n, dataset), 0.7)),
                    range(100,
                          len(dataset) + 1, 100))

    data_p0_8 = map(lambda n: (n, precision_at(take(n, dataset), 0.8)),
                    range(100,
                          len(dataset) + 1, 100))

    data_p0_85 = map(lambda n: (n, precision_at(take(n, dataset), 0.85)),
                     range(100,
                           len(dataset) + 1, 100))