def increment(img_rdd, delay, start, args):
    """Increment the data of a Nifti image by 1.

    :param filename: str -- representation of the path for the input file.
    :param data: nifti1Image -- image to manipulate.
    :param metadata: tuple -- of the form (image affine, image header).
    :param delay: int -- sleep time for the task
    :return: tuple -- of the form (filename, data, (image affine,
    image header), iteration+1).
    """
    start_time = time() - start

    filename = img_rdd[0]
    data = img_rdd[1]
    metadata = img_rdd[2]

    data += 1
    sleep(delay)

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            filename,
            args.output_dir,
            args.experiment,
            increment.__name__,
        )

    return filename, data, metadata
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser(description='SGEMM kernel call from CuPy')
    parser.add_argument('--gpu', '-g', default=0, type=int, help='ID of GPU.')
    parser.add_argument('--m', type=int, default=np.random.randint(1000, 1500))
    parser.add_argument('--n', type=int, default=np.random.randint(1000, 1500))
    parser.add_argument('--k', type=int, default=np.random.randint(500, 3000))
    args = parser.parse_args()

    print('m={} n={} k={}'.format(args.m, args.n, args.k))
    print('start benchmarking')
    print('')

    with cp.cuda.Device(args.gpu):
        A = cp.random.uniform(low=-1., high=1.,
                              size=(args.m, args.k)).astype(cp.float32)
        B = cp.random.uniform(low=-1., high=1.,
                              size=(args.k, args.n)).astype(cp.float32)

        # check correctness
        cp.testing.assert_array_almost_equal(sgemm(A, B),
                                             cp.dot(A, B),
                                             decimal=3)

        # dry run
        for _ in range(3):
            sgemm(A, B)
        kernel_times = benchmark(sgemm, (A, B), n_run=5)

        for _ in range(3):
            cp.dot(A, B)
        cublas_times = benchmark(cp.dot, (A, B), n_run=5)

    print('=============================Result===============================')
    print('hand written kernel time {} ms'.format(np.mean(kernel_times)))
    print('cuBLAS              time {} ms'.format(np.mean(cublas_times)))
def _benchmark_velocyto(vlm: vcy.VelocytoLoom, *, n_jobs: int = 32) -> dict:
    from utils import benchmark

    estimate_transition_prob = benchmark(vlm.estimate_transition_prob)
    calculate_embedding_shift = benchmark(vlm.calculate_embedding_shift)
    prepare_markov = benchmark(vlm.prepare_markov)
    run_markov = benchmark(vlm.run_markov)

    print("Calculating transition probabilities")
    etp_mem, _ = estimate_transition_prob(  # only works on 2D embedding
        hidim="Sx_sz",
        embed="ts",
        transform="sqrt",
        psc=1,
        n_neighbors=None,
        knn_random=True,
        n_jobs=n_jobs,
    )
    print("Calculating embedding shift")
    ces_mem, _ = calculate_embedding_shift()

    print("Preparing Markov")
    pm_mem, _ = prepare_markov(sigma_D=1, sigma_W=0.5)
    print("Running Markov")
    rm_mem, _ = run_markov()

    return {
        "estimate_transition_probability": etp_mem,
        "calculate_embedding_shift": ces_mem,
        "prepare_markov": pm_mem,
        "run_markov": rm_mem,
    }
Esempio n. 4
0
def get_voxels(filename, start, args):
    """Retrieve voxel intensity of a Nifti image as a byte stream.

    Parameters
    ----------
    filename: str
        Representation of the path for the input file.

    Returns
    -------
    data : da.array
        Data of the nifti imaeg read.
    """
    start_time = time() - start

    img = None
    with open(filename, "rb") as f_in:
        fh = nib.FileHolder(fileobj=BytesIO(f_in.read()))
        img = nib.Nifti1Image.from_file_map({"header": fh, "image": fh})
    data = img.get_fdata(caching="unchanged")
    data = nib.casting.float_to_int(data, np.int16)

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            filename,
            args.output_dir,
            args.experiment,
            get_voxels.__name__,
        )

    return da.from_array(data)
Esempio n. 5
0
def save_results(img_rdd, assignments, *, start, args):
    """Save a Nifti image.

    Parameters
    ----------
    img_rdd: (str, np.array, (np.array, np.array))
        Filename, image, and image header and affine.
    assignment: (float, (int, int))
        Voxel's class, intensity and frequency.
    start : float
        Start time of the application.
    args : {str: Any}
        Runtime arguments of the application.

    Returns
    -------
    f_out : str
        Output path where the image is saved.
    "SUCCESS" : str
        Indicates that the pipeline succeeded.
    """
    start_time = time() - start
    filename = img_rdd[0]
    img = img_rdd[1]
    metadata = img_rdd[2]

    assigned_class = {class_[0] for class_ in assignments}

    for class_ in assigned_class:
        assigned_voxels = list(
            map(lambda x: x[1][0], filter(lambda x: x[0] == class_,
                                          assignments)))
        img[np.where(np.isin(img, assigned_voxels))] = class_

    bn = os.path.basename("classified-" + filename[:-3] +
                          "nii")  # Save in nifti format
    f_out = os.path.join(args.output_dir, "images/" + bn)

    # save classified image
    classified_img = nib.Nifti1Image(img, metadata[0], header=metadata[1])
    nib.save(classified_img, f_out)

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            filename,
            args.output_dir,
            args.experiment,
            save_results.__name__,
        )

    return f_out, "SUCCESS"
Esempio n. 6
0
def do_highlight_C(highlight_cursor, faces, points):
    global self_model, self_proj, self_view

    # the gluProject helper retrieve those for us 
    # but calling this once is a good time save speedup 
    model = glGetDoublev( GL_MODELVIEW_MATRIX )
    proj = glGetDoublev( GL_PROJECTION_MATRIX )
    view = glGetIntegerv( GL_VIEWPORT )

    # same camera ?
    def same_float_array(X, Y):
        Z = X - Y
        for i in Z:
            for j in i:
                if abs(j) > 0.00001:
                    return False
        return True

    V = view == self_view
    if not isinstance(V, bool):
        V = V.all()
        
    same_camera = V and \
        same_float_array(model, self_model) and \
        same_float_array(proj, self_proj)

    self_model = model
    self_proj = proj
    self_view = view

    cursor = Point2D(*highlight_cursor)

    if not same_camera:
        with benchmark('glu'):

            model_as_list = model[0].tolist() + \
                model[1].tolist() + \
                model[2].tolist() + \
                model[3].tolist() 
            proj_as_list = proj[0].tolist() + \
                proj[1].tolist() + \
                proj[2].tolist() + \
                proj[3].tolist()
            view_as_list = view.tolist()

            projall(model_as_list, proj_as_list, view_as_list)

    with benchmark('python'):
        hits = gethits(cursor.x, cursor.y)

        display_hits(hits, points)
        return True
Esempio n. 7
0
def self_play(n_iterations=10, ben_steps=1000, training_steps=int(1e4),
              n_eval_episodes=100, **kwargs):
  """
  Returns an agent that learns from playing against himself from random to
  optimal play.
  """
  agents = [RLAgent(**kwargs), RandomAgent()]
  for _ in range(n_iterations):
    benchmark(agents[0], agents[1], ben_steps, training_steps, n_eval_episodes)
    # adding the trained agent as the new opponent to exploit
    agents[1] = opposite_agent(agents[0])
    agents[1].eps = agents[0].original_eps
  return agents[0]
Esempio n. 8
0
def main():
    benchmark()
    state = rbg_game.new_game_state()
    begin = time.time()
    result = perft(state, 3)
    end = time.time()
    print('Calculating perft for depth 3 took', end - begin, 's')
    print('The result is', result[0], 'leaves and', result[1], 'nodes')
    if result != [11132, 11639]:
        print('TEST FAILED')
        print('Expected 11132 leaves and 11639 nodes')
    else:
        print('TEST SUCCEEDED')
Esempio n. 9
0
def main():
    ao = ArgsOptions()
    options = [ao.options.fn, ao.options.verbose]

    with benchmark('load'):
        sc = load(*options)
        print sc
def combine_histogram(x, y, *, args, start):
    start_time = time() - start

    rv = {k: x.get(k, 0) + y.get(k, 0) for k in set(x) | set(y)}

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            "all_file",
            args.output_dir,
            args.experiment,
            "combine_histogram",
        )
    return rv
def combine_histogram(x, y, *, args, start):
    start_time = time() - start

    rv = x + y

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            "all_file",
            args.output_dir,
            args.experiment,
            "combine_histogram",
        )
    return rv
def flatten(arr, *, args, start, filename):
    start_time = time() - start

    arr = arr.flatten("F")

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            filename,
            args.output_dir,
            args.experiment,
            "flatten",
        )
    return filename, arr
Esempio n. 13
0
def main():
    ao = ArgsOptions()
    options = [ao.options.fn,
               ao.options.verbose]

    with benchmark('load'):
        sc = load(*options)
        print sc
def calculate_histogram(arr, *, args, start, filename):
    start_time = time() - start

    histogram = np.histogram(arr, bins=range(2**16))[0]

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            filename,
            args.output_dir,
            args.experiment,
            "calculate_histogram",
        )
    return histogram
def save_histogram(histogram, *, args, start):
    start_time = time() - start

    with open(f"{args.output_dir}/histogram.csv", "w") as f_out:
        for i, elm in enumerate(histogram):
            f_out.write(f"{i};{elm}\n")

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            "all_file",
            args.output_dir,
            args.experiment,
            "save_histogram",
        )
def calculate_histogram(arr, *, args, start, filename):
    start_time = time() - start

    histogram = defaultdict(int)
    for x in arr:
        histogram[x] += 1

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            filename,
            args.output_dir,
            args.experiment,
            "calculate_histogram",
        )
    return histogram
def increment(filename, start, args, it):
    """Increment the data of a Nifti image by 1.

    :param filename: str -- representation of the path for the input file.
    :param start: float -- start time of application.
    :param args: argparser -- Argparse object.
    :param it: int -- iteration number
    :return: str -- output path.
    """
    start_time = time() - start

    img = nib.load(filename)
    data = np.asanyarray(img.dataobj)

    data = data + 1
    sleep(args.delay)

    out_basename = os.path.basename(filename)

    if it > 0:
        out_basename = "{0}{1}".format(it, out_basename.lstrip(digits))
    else:
        out_basename = "{0}inc-{1}".format(it, out_basename)

    out_path = os.path.join(args.output_dir, out_basename)

    out_img = nib.Nifti1Image(data, img.affine, img.header)
    nib.save(out_img, out_path)

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            filename,
            args.output_dir,
            args.experiment,
            increment.__name__,
        )

    return out_path
Esempio n. 18
0
def main():
    gears = join('test', 'data', 'gears.obj')
    fn = gears
    if len(sys.argv) > 1:
        fn = sys.argv[1]

    sc = load(fn)
    with benchmark('build octree'):
        octree = Octree(sc)

    ray = (octree.bb.min(), octree.bb.max())
    with benchmark('ray octree'):
        print octree.intersect(ray)

    # debug 
    if fn == gears:
        segment = ([6.0124801866126916, -0.51249832634225589, -9.7930512397503584], (5.9371910904864844, -0.50190367657896617, -10.093763375438117))
        assert octree.intersect(segment)

    octree.write()
def _benchmark_palantir(
    bdata: AnnData,
    size: int,
    col: int,
    annot: pd.DataFrame,
    fs_data: pd.DataFrame,
    n_jobs: int = 32,
) -> Optional[List[float]]:
    from utils import benchmark

    run_palantir = benchmark(palantir.core.run_palantir)
    res = None

    try:
        print(f"Subsetting data to `{size}`, split `{col}`.")
        _add_annotations(bdata, annot)

        assert bdata.n_obs == size

        root_cell = _select_root_cell(bdata)
        final_states = _load_cellrank_final_states(bdata, fs_data)

        if final_states is None:
            print("No final states found, skipping")
            return None
        if root_cell in final_states:
            print("Root cell is in final states, skipping")
            return None

        print("Preprocessing")
        ms_data = _palantir_preprocess(bdata)

        print(
            f"Running with CellRank terminal states `root_cell={root_cell}` and "
            f"`final_states={final_states}`"
        )
        res, _ = run_palantir(
            ms_data,
            root_cell,
            terminal_states=final_states,
            knn=30,
            num_waypoints=int(ceil(size * 0.15)),
            n_jobs=n_jobs,
            scale_components=False,
            use_early_cell_as_start=True,
        )
    except Exception as e:
        print(
            f"Unable to run `Palantir` with size `{size}` on split `{col}`. Reason: `{e}`."
        )
        print(traceback.format_exc())

    return res
Esempio n. 20
0
def run_participant(*, subject_id, start, args, site):
    start_time = time() - start

    output_folder = f"{args.output_dir}-{site}"

    subprocess.run(
        f"singularity exec -B /nfs/singularity-image:/run,/nfs:/nfs /nfs/singularity-image/bids_example.simg bash /run/participant.sh {args.bids_dir}/{site} {output_folder} {subject_id}",
        shell=True,
    )

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            subject_id,
            args.benchmark_dir,
            args.experiment,
            run_participant.__name__,
        )
Esempio n. 21
0
def run_group(*, start, args, site):
    start_time = time() - start

    output_folder = f"{args.output_dir}-{site}"

    subprocess.run(
        f"singularity exec -B /nfs/singularity-image:/run,/nfs:/nfs /nfs/singularity-image/bids_example.simg bash /run/group.sh {args.bids_dir}/{site} {output_folder}",
        shell=True,
    )

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            "all_file",
            args.benchmark_dir,
            args.experiment,
            run_group.__name__,
        )
def save_results(img_rdd, start, args):
    """Save a Nifti image.

    Parameters
    ----------
    img_rdd: (str, np.array, (np.array, np.array))
        Filename, image, and image header and affine.

    Returns
    -------
    f_out : str
        Output path where the image is saved.
    "SUCCESS" : str
        Indicates that the pipeline succeeded.
    """
    start_time = time() - start

    filename = img_rdd[0]
    data = img_rdd[1]
    metadata = img_rdd[2]

    bn = os.path.basename(filename[:-3] + "nii")  # Save in nifti format
    f_out = os.path.join(args.output_dir, "images/" + bn)

    img = nib.Nifti1Image(data, metadata[0], header=metadata[1])
    nib.save(img, f_out)

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            filename,
            args.output_dir,
            args.experiment,
            save_results.__name__,
        )

    return f_out, "SUCCESS"
Esempio n. 23
0
def test3():
    def compute_normals(sc):
        out = len(sc.points) * [ [.0, .0, .0] ]
        triangle_normals = len(sc.faces) * [ [.0, .0, .0] ]

        def hash(p):
            return .11234 * p[0] + .35678 * p[1] + .67257 * p[2]

        from collections import defaultdict
        pt_table = defaultdict(list)

        for i, t in enumerate(sc.faces):
            p1 = sc.points[t[0]]
            p2 = sc.points[t[1]]
            p3 = sc.points[t[2]]

            pt_table[hash(p1)].append( (i, p1, t[0]) )
            pt_table[hash(p2)].append( (i, p2, t[1]) )
            pt_table[hash(p3)].append( (i, p3, t[2]) )

            normal = vcross(sub(p2, p1), sub(p3, p1))
            normal = vnorm(normal)

            triangle_normals[i] = normal

        for key, value in pt_table.iteritems():
            # we assume no collisions in the hash
            point_index = value[0][2]
            first_point = value[0][1]

            # compute the normal of each triangles around 
            # TODO should be done just once for each triangle in pre-process
            normals = []

            for t_index, p, _ in value:
                assert p == first_point
                normals.append(triangle_normals[t_index])
            
            N = (
                sum(n[0] for n in normals) / len(normals),
                sum(n[1] for n in normals) / len(normals),
                sum(n[2] for n in normals) / len(normals)
            )
            # print N
            out[point_index] = N

        return out

    scene = load(sys.argv[1])
    with benchmark('compute normals'):
        scene.normals = compute_normals(scene)
    scene.write(sys.argv[2])
Esempio n. 24
0
def read_img(filename, start, args):
    """Read a Nifti image as a byte stream.

    Parameters
    ----------
    filename: str
        Representation of the path for the input file.

    Returns
    -------
    filename : str
        Representation of the path for the input file.
    data : da.array
        Data of the nifti imaeg read.
    (img.affine, img.header) : (np.array, np.array)
        Affine and header of the nifti image read.
    """
    start_time = time() - start

    img = None
    with open(filename, "rb") as f_in:
        fh = nib.FileHolder(fileobj=BytesIO(f_in.read()))
        img = nib.Nifti1Image.from_file_map({"header": fh, "image": fh})
    data = img.get_data()

    end_time = time() - start

    if args.benchmark:
        benchmark(
            start_time,
            end_time,
            filename,
            args.output_dir,
            args.experiment,
            read_img.__name__,
        )

    return filename, da.from_array(data), (img.affine, img.header)
Esempio n. 25
0
def test3():
    def compute_normals(sc):
        out = len(sc.points) * [[.0, .0, .0]]
        triangle_normals = len(sc.faces) * [[.0, .0, .0]]

        def hash(p):
            return .11234 * p[0] + .35678 * p[1] + .67257 * p[2]

        from collections import defaultdict
        pt_table = defaultdict(list)

        for i, t in enumerate(sc.faces):
            p1 = sc.points[t[0]]
            p2 = sc.points[t[1]]
            p3 = sc.points[t[2]]

            pt_table[hash(p1)].append((i, p1, t[0]))
            pt_table[hash(p2)].append((i, p2, t[1]))
            pt_table[hash(p3)].append((i, p3, t[2]))

            normal = vcross(sub(p2, p1), sub(p3, p1))
            normal = vnorm(normal)

            triangle_normals[i] = normal

        for key, value in pt_table.iteritems():
            # we assume no collisions in the hash
            point_index = value[0][2]
            first_point = value[0][1]

            # compute the normal of each triangles around
            # TODO should be done just once for each triangle in pre-process
            normals = []

            for t_index, p, _ in value:
                assert p == first_point
                normals.append(triangle_normals[t_index])

            N = (sum(n[0] for n in normals) / len(normals),
                 sum(n[1] for n in normals) / len(normals),
                 sum(n[2] for n in normals) / len(normals))
            # print N
            out[point_index] = N

        return out

    scene = load(sys.argv[1])
    with benchmark('compute normals'):
        scene.normals = compute_normals(scene)
    scene.write(sys.argv[2])
Esempio n. 26
0
 def cross_validate(self):
     '''Trains and tests the given classifier on cv folds, and returns the average accuracy'''
     sum_accuracy = 0.0
     for i, (X_train, y_train, X_test,
             y_test) in enumerate(self.partitioner.getPartitions()):
         print('Cross validation iteration: %d' % i)
         accuracy = benchmark(self.clf, X_train, y_train, X_test, y_test)
         sum_accuracy += accuracy
         print('Accuracy of partition %d: %f' % (i, accuracy))
         print()
     avg_acc = sum_accuracy / self.cv
     print('Average accuracy: %f' % avg_acc)
     print()
     return avg_acc
Esempio n. 27
0
def do_highlight_octree(octree, mouse, faces, points, sc_view):
    model = glGetDoublev( GL_MODELVIEW_MATRIX )
    proj = glGetDoublev( GL_PROJECTION_MATRIX )
    view = glGetIntegerv( GL_VIEWPORT )

    tget = gluUnProject(mouse[0], mouse[1], 0, model, proj, view)

    segment = (sc_view.eye, tget)
    with benchmark('ray octree'):
        hit = octree.intersect(segment)

    if hit:
        print hit
        hit = hit[0]
        with in_red():
            glBegin(GL_TRIANGLES)
            glVertex3fv(points[hit[0]])
            glVertex3fv(points[hit[1]])
            glVertex3fv(points[hit[2]])
            glEnd()
    else:
        print 'no hit'
Esempio n. 28
0
    def load_file(self, fn, verbose=0, procedural=False):

        # Currently immediate mode is way faster (3x) than Draw array mode
        # cause we have to build the arrays in memory from list objects.
        # We should use module array instead
        #
        # VBO are 3 to 4 times faster than display list (random test)
        self.do_immediate_mode = False
        # self.do_immediate_mode = False # TEST
        self.use_display_list = False

        self.do_immediate_mode = True
        self.do_vbo = not self.do_immediate_mode
        self.vbo_init = False

        # Style
        self.do_lighting = not self.show_wireframe

        from scene import load
        with benchmark('load from disk'):
            self.scene = load(fn, verbose)
        if not self.scene: return
        self.fn = fn

        if self.use_display_list:
            self.dl = [-1 for i in self.scene.objets]

        # Init quat
        self.trackball = Trackball()

        # highlight setup, for CPython only
        # setup(self.scene.points, self.scene.faces)

        # Grid setup
        if self.octree:
            setup_octree()

        return self.scene
Esempio n. 29
0
    def load_file(self, fn, verbose = 0, procedural = False):

        # Currently immediate mode is way faster (3x) than Draw array mode
        # cause we have to build the arrays in memory from list objects.
        # We should use module array instead
        # 
        # VBO are 3 to 4 times faster than display list (random test)
        self.do_immediate_mode = False
        # self.do_immediate_mode = False # TEST
        self.use_display_list = False

        self.do_immediate_mode = True
        self.do_vbo = not self.do_immediate_mode
        self.vbo_init = False

        # Style
        self.do_lighting = not self.show_wireframe

        from scene import load
        with benchmark('load from disk'):
            self.scene = load(fn, verbose)
        if not self.scene: return
        self.fn = fn

        if self.use_display_list:
            self.dl = [-1 for i in self.scene.objets]

        # Init quat
        self.trackball = Trackball()

        # highlight setup, for CPython only
        # setup(self.scene.points, self.scene.faces)

        # Grid setup
        if self.octree:
            setup_octree()

        return self.scene
Esempio n. 30
0
def get_results(X_train, y_train, X_test, y_test):

    results = []

    if args.clf:
        if args.clf == 'nb':
            learners = [(GaussianNB(), 'Gaussian Naive Bayes')]
        elif args.clf == 'lr':
            learners = [(LogisticRegression(), 'Logistic Regression')]
        else:
            learners = [(LinearSVC(), 'Linear SVM')]
    else:
        learners = [(LogisticRegression(), 'Logistic Regression'),
                    (LinearSVC(), 'Linear SVM'),
                    (GaussianNB(), 'Gaussian Naive Bayes')]
    for clf, name in learners:
        print('-' * 80)
        print(name)
        print('_' * 80)
        accuracy = benchmark(clf, X_train, y_train, X_test, y_test)
        results.append((name, accuracy))
    print_pairs(results, ('classifier', 'accuracy'))
    return results
Esempio n. 31
0
#!/usr/bin/env python

import utils, sys, codecs

def cut(filename, l, r):
    content = open(filename, encoding='utf-8')
    for line in content:
        print(line[l:r])

for f in sys.argv[1:]:
    t = utils.benchmark(lambda: cut(f, 20, 40))
    sys.stderr.write('{0}: {1}\n'.format(f, t))
Esempio n. 32
0
 def dump(self, fn):
     with benchmark('serialize octree'):
         bytes = zlib.compress(dumps(self))
         fo = open(fn, 'w')
         fo.write(bytes)
Esempio n. 33
0
 def load(self, fn):
     with benchmark('deserialize octree'):
         return loads(zlib.decompress(open(fn).read()))
Esempio n. 34
0
def do_highlight(highlight_cursor, faces, points):
    global self_model, self_proj, self_view, image_points

    # the gluProject helper retrieve those for us 
    # but calling this once is a good time save speedup 
    model = glGetDoublev( GL_MODELVIEW_MATRIX )
    proj = glGetDoublev( GL_PROJECTION_MATRIX )
    view = glGetIntegerv( GL_VIEWPORT )

    # same camera ?
    def same_float_array(X, Y):
        Z = X - Y
        for i in Z:
            for j in i:
                if abs(j) > 0.00001:
                    return False
        return True

    V = view == self_view
    if not isinstance(V, bool):
        V = V.all()
        
    same_camera = V and \
        same_float_array(model, self_model) and \
        same_float_array(proj, self_proj)

    self_model = model
    self_proj = proj
    self_view = view

    def cross_product_2d(U, V):
        return U.x * V.y - U.y * V.x

    def cross_product_2d_list(U, V):
        # print U[0], V[1], U[1], V[0]
        return U[0] * V[1] - U[1] * V[0]

    class Vector():
        __slots__ = ('x', 'y')
        def __init__(self, A, B):
            self.x = B.x - A.x
            self.y = B.y - A.y

    class Triangle():
        __slots__ = ('A', 'B', 'C')
        def __init__(self, A, B, C):
            self.A = A
            self.B = B
            self.C = C

        def is_inside(self, P, verbose = False):
            b1 = cross_product_2d( Vector(P,self.A), Vector(P,self.B) ) >= 0
            b2 = cross_product_2d( Vector(P,self.B), Vector(P,self.C) ) >= 0
            b3 = cross_product_2d( Vector(P,self.C), Vector(P,self.A) ) >= 0
            return b1 == b2 == b3

    cursor = Point2D(*highlight_cursor)
    hits = []

    if not same_camera:
        with benchmark('glu'):
            image_points = []

            for p in points:
                x, y, z = gluProject(p[0], p[1], p[2], model, proj, view)
                image_points.append( (x,y,z) )
                # print view, x, y, z

    with benchmark('python'):
        for t in faces:
            x1, y1, z1 = image_points[t[0]]
            x2, y2, z2 = image_points[t[1]]
            x3, y3, z3 = image_points[t[2]]

            if False:
                P1 = Point2D(x1, y1)
                P2 = Point2D(x2, y2)
                P3 = Point2D(x3, y3)
                t_image = Triangle(P1, P2, P3)
                inside = t_image.is_inside(cursor)
            else:
                # 2 to 3 times faster if we dont build Point and Triangle objects
                x, y = cursor.x, cursor.y
                # print x, y
                b1 = cross_product_2d_list( (x - x1, y - y1), (x - x2, y - y2) ) >= 0
                b2 = cross_product_2d_list( (x - x2, y - y2), (x - x3, y - y3) ) >= 0
                b3 = cross_product_2d_list( (x - x3, y - y3), (x - x1, y - y1) ) >= 0
                inside = b1 == b2 == b3

            if inside:
                print t
                hits.append( (max(z1, z2, z3), t) )

    display_hits(hits, points)
Esempio n. 35
0
def test5():
    scene = load(sys.argv[1])
    from geom_ops import compute_normals
    with benchmark('compute normals'):
        scene.normals, scene.faces_normals = compute_normals(scene)
    scene.write(sys.argv[2])
Esempio n. 36
0
#!/usr/bin/env python

import utils, sys

def sort(string):
    lines = string.split('\n')
    lines.sort()
    return '\n'.join(lines)

for f in sys.argv[1:]:
    t = utils.benchmark(lambda:
            utils.with_utf8_file(f, lambda c:
                    sys.stdout.write(sort(c).encode('utf-8'))))
    sys.stderr.write('{0}: {1}\n'.format(f, t))
Esempio n. 37
0
import os
import sys

import matplotlib.pyplot as plt
import seaborn

ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), "../"))
sys.path.append(ROOT)
from utils import benchmark

data = [("Sort", [0, 1])]

frame = benchmark(data, pin_to_cpu=True)

seaborn.barplot(data=frame, x="Sort", y="Time")
plt.show()
Esempio n. 38
0
import os
import sys

import matplotlib.pyplot as plt
import seaborn

ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), "../"))
sys.path.append(ROOT)
from utils import benchmark

data = [("NonTemporal", [0, 1]), ("Threads", list(range(1, 9)))]

frame = benchmark(data)

seaborn.barplot(data=frame, x="Threads", y="Time", hue="NonTemporal")
plt.show()
Esempio n. 39
0
    def render(self):
        if self.w == 0 or self.h == 0: 
            return

        if self.do_vbo and not self.vbo_init:
            with benchmark('setup vbo'):
                self.setup_vbo()
            self.vbo_init = True

        logger.info(' === render  === ')

        if not self.scene: 
            #glutSwapBuffers() GLUT
            if self.SwapBuffer_cb:
                self.SwapBuffer_cb()
            return

        # Some OpenGL init
        glEnable(GL_MULTISAMPLE_ARB)
        glColorMaterial(GL_FRONT_AND_BACK, GL_DIFFUSE)
        glEnable(GL_COLOR_MATERIAL)
        glEnable(GL_DEPTH_TEST)

        bg_color = map(lambda x: x / 255.0, self.scene.bg)
        bg_color += [1.0]
        glClearColor(*bg_color)
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)

        self.draw_bg()
        self.set_lights()
        self.set_matrix(self.scene.views[0])

        # wireframe only is good for debugging, especially
        # when your triangle are just one line: copy/paste error
        # -> and you are trying to draw the triangle A,A,C (instead of A,B,C)
        # using points rendering might another good option
        if self.show_wireframe:
            glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
        else:
            glPolygonMode(GL_BACK,GL_FILL)
            glPolygonMode(GL_FRONT,GL_FILL)

        # Render
        self.init_context()
        # with Transparent():
        #     diffus = self.scene.diffus
        #     # We store color in [0,255] interval
        #     diffus = map(lambda x: x / 255.0, diffus)
        #     color = diffus + [0.5]
        #     glColor4f(*color)
        #     self.render_obj()
        self.render_obj()

        if self.show_wireframe:
            with Wireframe(3.0):
                self.render_obj()

        if self.highlight:
            if self.highlight_implementation == "Python":
                do_highlight(self.highlight_cursor, self.scene.faces, self.scene.points)
            elif self.highlight_implementation == "CPython":
                do_highlight_C(self.highlight_cursor, self.scene.faces, self.scene.points)
            elif self.highlight_implementation == "octree":
                do_highlight_octree(self.octree, self.highlight_cursor, self.scene.faces, self.scene.points, self.scene.views[0])

        if self.draw_octree:
            if self.octree_dl is not None:
                glCallList(self.octree_dl)
            else:
                self.octree_dl = glGenLists(1)
                glNewList(self.octree_dl, GL_COMPILE)

                # with viewer.Wireframe('foo'):
                draw_octree(self.octree)

                glEndList()
                glCallList(self.octree_dl)

            if False:
                draw_bb(self.scene.bb)
                for bb in self.scene.bb.split():
                    draw_bb(bb)

        glFlush()
        if self.SwapBuffer_cb:
            self.SwapBuffer_cb()
        #glutSwapBuffers() GLUT

        self.print_fps()
Esempio n. 40
0
#!/usr/bin/env python

import utils, sys

for f in sys.argv[1:]:
    t = utils.benchmark(lambda: utils.with_utf8_file(f, lambda c: len(c)))
    sys.stderr.write('{0}: {1}\n'.format(f, t))
Esempio n. 41
0
def test5():
    scene = load(sys.argv[1])
    from geom_ops import compute_normals
    with benchmark('compute normals'):
        scene.normals, scene.faces_normals = compute_normals(scene)
    scene.write(sys.argv[2])
Esempio n. 42
0
#!/usr/bin/env python

import utils, sys

def sort(string):
    lines = string.splitlines()
    lines.sort()
    return '\n'.join(lines)

for f in sys.argv[1:]:
    t = utils.benchmark(lambda: sys.stdout.write(
                    				utils.with_utf8_file(f,sort).encode('utf-8'))
                    				)
    sys.stderr.write('{0}: {1}\n'.format(f, t))

Esempio n. 43
0
#!/usr/bin/env python

import utils, sys

def strip_tags(filename):
    string = open(filename, encoding='utf-8').read()

    d = 0
    out = []

    for c in string:
        if c == '<': d += 1

        if d > 0:
            out += ' '
        else:
            out += c

        if c == '>': d -= 1

    print(''.join(out))

for f in sys.argv[1:]:
    t = utils.benchmark(lambda: strip_tags(f))
    sys.stderr.write('{0}: {1}\n'.format(f, t))
Esempio n. 44
0
    def converged_gradient(self, num_iter, X, V, W, iter_check=50000, threshold=0.005,
                           gradient_v=None, gradient_w=None, error=True, gradient_check=False,
                           epsilon=10.**-5, x_j=None, y_j=None):
        training_error = None
        training_loss = None

        if num_iter > 1000000:
            return (True, training_error, training_loss)
        # There are two ways to determine if the gradient has converged.
        # (1) Use the training error (error=True)
        # (2) Use the magnitude of the gradient (error=False)
        # In both cases, training_error and training_loss are attached to the response
        # for the purposes of plotting.
        if error:
            if num_iter % iter_check != 0:
                return (False, training_error, training_loss)
            else:
                if gradient_check:
                    # Randomly check five weights.
                    for _ in range(5):
                        # import pdb; pdb.set_trace()
                        random_wi = np.random.randint(W.shape[0])
                        random_wj = np.random.randint(W.shape[1])
                        random_vi = np.random.randint(V.shape[0])
                        random_vj = np.random.randint(V.shape[1])

                        W_plus_epsilon = W.copy()
                        W_plus_epsilon[random_wi][random_wj] = W_plus_epsilon[random_wi][random_wj] + epsilon
                        Z_W_plus = self.perform_forward_pass(x_j, V, W_plus_epsilon)[1]

                        W_minus_epsilon = W.copy()
                        W_minus_epsilon[random_wi][random_wj] = W_minus_epsilon[random_wi][random_wj] - epsilon
                        Z_W_minus = self.perform_forward_pass(x_j, V, W_minus_epsilon)[1]

                        V_plus_epsilon = V.copy()
                        V_plus_epsilon[random_vi][random_vj] = V_plus_epsilon[random_vi][random_vj] + epsilon
                        Z_V_plus = self.perform_forward_pass(x_j, V_plus_epsilon, W)[1]

                        V_minus_epsilon = V.copy()
                        V_minus_epsilon[random_vi][random_vj] = V_minus_epsilon[random_vi][random_vj] - epsilon
                        Z_V_minus = self.perform_forward_pass(x_j, V_minus_epsilon, W)[1]

                        y = np.zeros(10)
                        y[y_j] = 1

                        if self.loss_function == "mean-squared-error":
                            W_plus_cost = mean_squared_error(Z_W_plus, y)
                            W_minus_cost = mean_squared_error(Z_W_minus, y)
                            V_plus_cost = mean_squared_error(Z_V_plus, y)
                            V_minus_cost = mean_squared_error(Z_V_minus, y)
                        else:
                            W_plus_cost = cross_entropy_loss(Z_W_plus.T, y)
                            W_minus_cost = cross_entropy_loss(Z_W_minus.T, y)
                            V_plus_cost = cross_entropy_loss(Z_V_plus.T, y)
                            V_minus_cost = cross_entropy_loss(Z_V_minus.T, y)

                        gradient_approx_wij = (W_plus_cost - W_minus_cost) / (2. * epsilon)
                        gradient_approx_vij = (V_plus_cost - V_minus_cost) / (2. * epsilon)

                        if gradient_approx_wij > gradient_w[random_wi][random_wj] + threshold or \
                           gradient_approx_wij < gradient_w[random_wi][random_wj] - threshold or \
                           gradient_approx_vij > gradient_v[random_vi][random_vj] + threshold or \
                           gradient_approx_vij < gradient_v[random_vi][random_vj] - threshold:
                            raise AssertionError("The gradient was incorrectly computed.")

                classifications_training, training_Z = self.predict(X, V, W, return_Z=True)
                training_error, training_indices_error = benchmark(classifications_training, self.labels)

                if self.validation_data is not None and self.validation_labels is not None:
                    classifications_validation = self.predict(self.validation_data, V, W)
                    validation_error, validation_indices_error = benchmark(classifications_validation, self.validation_labels)

                if self.loss_function == "mean-squared-error":
                    training_loss = mean_squared_error(training_Z.T, self.Y)
                else:
                    training_loss = cross_entropy_loss(training_Z.T, self.Y)

                print("Completed %d iterations.\nThe training error is %.2f.\n The training loss is %.2f."
                      % (num_iter, training_error, training_loss))

                if self.validation_data is not None and self.validation_labels is not None:
                    print("The error on the validation set is %.2f." % validation_error)

                if training_error < threshold:
                    return (True, training_error, training_loss)

                return (False, training_error, training_loss)
        else:
            if num_iter % iter_check == 0:
                classifications_training, training_Z = self.predict(X, V, W, return_Z=True)
                training_error, indices_error = benchmark(classifications_training, self.labels)

                if self.validation_data is not None and self.validation_labels is not None:
                    classifications_validation = self.predict(self.validation_data, V, W)
                    validation_error, validation_indices_error = benchmark(classifications_validation, self.validation_labels)

                if self.loss_function == "mean-squared-error":
                    training_loss = mean_squared_error(training_Z.T, self.Y)
                else:
                    training_loss = cross_entropy_loss(training_Z.T, self.Y)

                print("Completed %d iterations. The training error is %.2f. Training loss is %.2f" % (num_iter, training_error))

                if self.validation_data is not None and self.validation_labels is not None:
                    print("The error on the validation set is %.2f." % validation_error)

            if np.linalg.norm(gradient_v) < threshold and np.linalg.norm(gradient_w) < threshold:
                return (True, training_error, training_loss)
            else:
                return (False, training_error, training_loss)
Esempio n. 45
0
 def setup_octree(self):
     with benchmark('build octree'):
         self.octree = Octree(self.scene)
#!/usr/bin/env python

import utils, sys

def strip_brackets(string):
    d = 0
    out = ''
    for c in string:
        if c == '{' or c == '[': d += 1

        if d > 0:
            out += ' '
        else:
            out += c

        if c == '}' or c == ']': d -= 1

    return out

for f in sys.argv[1:]:
    t = utils.benchmark(lambda: utils.with_utf8_file(f, strip_brackets))
    sys.stderr.write('{0}: {1}\n'.format(f, t))
Esempio n. 47
0
#!/usr/bin/env python

import utils, sys

def word_count(string):
    freqs = {}
    for w in string.split():
        w = w.lower()
        if freqs.get(w):
            freqs[w] += 1
        else:
            freqs[w] = 1
    return freqs

for f in sys.argv[1:]:
    t = utils.benchmark(lambda: utils.with_utf8_file(f, word_count))
    sys.stderr.write('{0}: {1}\n'.format(f, t))
Esempio n. 48
0
#!/usr/bin/env python

import utils, sys

for f in sys.argv[1:]:
    t = utils.benchmark(lambda: utils.with_utf8_file(f, lambda c: c.upper()))
    sys.stderr.write('{0}: {1}\n'.format(f, t))
Esempio n. 49
0
#!/usr/bin/env python

import utils, sys, codecs

def sort(filename):
    content = open(filename, encoding='utf-8').read()
    lines = content.splitlines()
    lines.sort()
    print('\n'.join(lines))

for f in sys.argv[1:]:
    t = utils.benchmark(lambda: sort(f))
    sys.stderr.write('{0}: {1}\n'.format(f, t))
    test = []
    for i in range(modelCount):
        j = i * 24
        data.append(TrainingTimeSeries(ts, lags=lag, futures=(j, j + 24)))
        train.append([data[i].getTrainingTrain(), data[i].getTrainingTarget()[:, j:(j + 24)]])
        valid.append([data[i].getValidationTrain(), data[i].getValidationTarget()[:, j:(j + 24)]])
        test.append([data[i].getTestTrain(), data[i].getTestTarget()[:, j:(j + 24)]])
    inLayer = theanets.layers.Input(data[0].trainLength, name='inputLayer')
    for algo in algos:
        for hiddenNeuron in range(70, 126, 5):
            hiddenLayer = theanets.layers.Feedforward(hiddenNeuron, inputs=inLayer.size, activation='sigmoid',
                                                      name='hiddenLayer')
            outLayer = theanets.layers.Feedforward(outputCount, inputs=hiddenLayer.size, activation='linear')
            layers = [inLayer, hiddenLayer, outLayer]
            start_time = time.time()
            orig, result, error, n_params, iterations = test_RawData_read_csv_multiple_features(algo, layers, train,
                                                                                                valid, test)
            trainTime = time.time() - start_time
            rmse = utils.calculateRMSE(orig, result)
            mape = utils.calculateMAPE(orig, result)
            smape = utils.calculateSMAPE(orig, result)
            # Start the plotting
            title = 'algo:%s, lags:%s, hidden neurons:%s, testSample:%s TrainTime:%.2f sec' % (
                algo, lag, hiddenNeuron, len(result), trainTime)
            utils.plotFigures(orig, result, title, k, locationToSaveImages='../results/multiple_model/')
            k += 1
            utils.benchmark(str(lag).replace(',', ':'), inLayer.size, hiddenNeuron, outLayer.size, error[1][0]['err'],
                            error[1][1]['err'],
                            n_params, rmse, mape, smape, trainTime, iterations,
                            fileName='../performance/neuralNetBenchmark_m_m.csv')
Esempio n. 51
0
j = 1
futures = 120
for lag in lags:
    ts = copy.deepcopy(timeSeries)
    data = TrainingTimeSeries(ts, lags=lag, futures=futures)
    train = [data.getTrainingTrain(), data.getTrainingTarget()]
    valid = [data.getValidationTrain(), data.getValidationTarget()]
    test = [data.getTestTrain(), data.getTestTarget()]
    inLayer = theanets.layers.Input(data.trainLength, name='inputLayer')
    for algo in algos:
        for hiddenNeuron in range(70, 126, 5):
            hiddenLayer = theanets.layers.Feedforward(hiddenNeuron, inputs=inLayer.size, activation='sigmoid',
                                                      name='hiddenLayer')
            outLayer = theanets.layers.Feedforward(data.getOutputCount(), inputs=hiddenLayer.size, activation='linear')
            layers = [inLayer, hiddenLayer, outLayer]
            start_time = time.time()
            orig, result, error, n_params, iterations = test_RawData_read_csv_multiple_features(algo, layers, train,
                                                                                                valid, test)
            trainTime = time.time() - start_time
            rmse = utils.calculateRMSE(orig, result)
            mape = utils.calculateMAPE(orig, result)
            smape = utils.calculateSMAPE(orig, result)
            # Start the plotting
            title = 'algo:%s, lags:%s, hidden neurons:%s, testSample:%s TrainTime:%.2f sec' % (
                algo, lag, hiddenNeuron, len(result), trainTime)
            utils.plotFigures(orig, result, title, j)
            j += 1
            utils.benchmark(str(lag), inLayer.size, hiddenNeuron, outLayer.size, error[0]['err'], error[1]['err'],
                            n_params, rmse, mape, smape, trainTime, iterations)