Exemplo n.º 1
0
    def test_1(self):
        vtraj = self.vtrajs[2]
        assert len(vtraj) == 3

        assignments = 1234 * np.ones(len(vtraj))
        distances = np.random.randn(len(vtraj)).astype(np.float32)
        save(self.fa, self.fd, assignments, distances, vtraj)

        AData = -1 * np.ones((2, 10))
        AData[0, 6:9] = 1234

        DData = -1 * np.ones((2, 10), dtype=np.float32)
        DData[0, 6:9] = distances

        npt.assert_equal(self.fa.root.arr_0, AData)
        npt.assert_equal(self.fd.root.arr_0, DData)
Exemplo n.º 2
0
    def test_1(self):
        vtraj = self.vtrajs[2]
        assert len(vtraj) == 3
        
        assignments = 1234 * np.ones(len(vtraj))
        distances = np.random.randn(len(vtraj)).astype(np.float32)
        save(self.fa, self.fd, assignments, distances, vtraj)
        
        AData = -1 * np.ones((2,10))
        AData[0,6:9] = 1234
        
        DData = -1 * np.ones((2,10), dtype=np.float32)
        DData[0,6:9] = distances

        npt.assert_equal(self.fa.root.Data, AData)
        npt.assert_equal(self.fd.root.Data, DData)
Exemplo n.º 3
0
def main(args, metric, logger):

    project = Project.load_from_hdf(args.project)
    if not os.path.exists(args.generators):
        raise IOError('Could not open generators')
    generators = os.path.abspath(args.generators)
    output_dir = os.path.abspath(args.output_dir)

    # connect to the workers
    try:
        json_file = client_json_file(args.profile, args.cluster_id)
        client = parallel.Client(json_file, timeout=2)
    except parallel.error.TimeoutError as exception:
        msg = '\nparallel.error.TimeoutError: ' + str(exception)
        msg += "\n\nPerhaps you didn't start a controller?\n"
        msg += "(hint, use ipcluster start)"
        print >> sys.stderr, msg
        sys.exit(1)

    lview = client.load_balanced_view()

    # partition the frames into a bunch of vtrajs
    all_vtrajs = local.partition(project, args.chunk_size)

    # initialze the containers to save to disk
    f_assignments, f_distances = local.setup_containers(
        output_dir, project, all_vtrajs)

    # get the chunks that have not been computed yet
    valid_indices = np.where(
        f_assignments.root.completed_vtrajs[:] == False)[0]
    remaining_vtrajs = np.array(all_vtrajs)[valid_indices].tolist()

    logger.info('%d/%d jobs remaining', len(remaining_vtrajs), len(all_vtrajs))

    # send the workers the files they need to get started
    # dview.apply_sync(remote.load_gens, generators, project['ConfFilename'],
    #    metric)

    # get the workers going
    n_jobs = len(remaining_vtrajs)
    amr = lview.map(remote.assign,
                    remaining_vtrajs, [generators] * n_jobs, [metric] * n_jobs,
                    chunksize=1)

    pending = set(amr.msg_ids)

    while pending:
        client.wait(pending, 1e-3)
        # finished is the set of msg_ids that are complete
        finished = pending.difference(client.outstanding)
        # update pending to exclude those that just finished
        pending = pending.difference(finished)
        for msg_id in finished:
            # we know these are done, so don't worry about blocking
            async = client.get_result(msg_id)

            assignments, distances, chunk = async .result[0]
            vtraj_id = local.save(f_assignments, f_distances, assignments,
                                  distances, chunk)

            log_status(logger, len(pending), n_jobs, vtraj_id, async)

    f_assignments.close()
    f_distances.close()

    logger.info('All done, exiting.')
Exemplo n.º 4
0
def main(args, metric, logger):

    project = Project.load_from(args.project)
    if not os.path.exists(args.generators):
        raise IOError("Could not open generators")
    generators = os.path.abspath(args.generators)
    output_dir = os.path.abspath(args.output_dir)

    # connect to the workers
    try:
        json_file = client_json_file(args.profile, args.cluster_id)
        client = parallel.Client(json_file, timeout=2)
    except parallel.error.TimeoutError as exception:
        msg = "\nparallel.error.TimeoutError: " + str(exception)
        msg += "\n\nPerhaps you didn't start a controller?\n"
        msg += "(hint, use ipcluster start)"
        print >> sys.stderr, msg
        sys.exit(1)

    lview = client.load_balanced_view()

    # partition the frames into a bunch of vtrajs
    all_vtrajs = local.partition(project, args.chunk_size)

    # initialze the containers to save to disk
    f_assignments, f_distances = local.setup_containers(output_dir, project, all_vtrajs)

    # get the chunks that have not been computed yet
    valid_indices = np.where(f_assignments.root.completed_vtrajs[:] == False)[0]
    remaining_vtrajs = np.array(all_vtrajs)[valid_indices].tolist()

    logger.info("%d/%d jobs remaining", len(remaining_vtrajs), len(all_vtrajs))

    # send the workers the files they need to get started
    # dview.apply_sync(remote.load_gens, generators, project['ConfFilename'],
    #    metric)

    # get the workers going
    n_jobs = len(remaining_vtrajs)
    amr = lview.map(remote.assign, remaining_vtrajs, [generators] * n_jobs, [metric] * n_jobs, chunksize=1)

    pending = set(amr.msg_ids)

    while pending:
        client.wait(pending, 1e-3)
        # finished is the set of msg_ids that are complete
        finished = pending.difference(client.outstanding)
        # update pending to exclude those that just finished
        pending = pending.difference(finished)
        for msg_id in finished:
            # we know these are done, so don't worry about blocking
            async = client.get_result(msg_id)

            try:
                assignments, distances, chunk = async.result[0]
            except RemoteError as e:
                print "Remote Error:"
                e.print_traceback()
                raise

            vtraj_id = local.save(f_assignments, f_distances, assignments, distances, chunk)

            log_status(logger, len(pending), n_jobs, vtraj_id, async)

    f_assignments.close()
    f_distances.close()

    logger.info("All done, exiting.")