Пример #1
0
def retrieve(name_db,
             collection,
             db_credentials,
             x_lim,
             y_lim,
             z_lim,
             voxel_size,
             output_path,
             selected_only=True):

    db = DB(db_credentials)
    if selected_only:
        g1, index_map = db.get_selected(name_db,
                                        collection,
                                        x_lim=x_lim,
                                        y_lim=y_lim,
                                        z_lim=z_lim)

        for v in g1.get_vertex_iterator():
            assert (len(g1.get_incident_edges(v)) <=
                    2), "Retrieved graph has branchings"

    else:
        g1, index_map = db.get_g1(name_db,
                                  collection,
                                  x_lim=x_lim,
                                  y_lim=y_lim,
                                  z_lim=z_lim)

        pdb.set_trace()

    g1_to_nml(g1, output_path, knossos=True, voxel_size=voxel_size)
Пример #2
0
def solve_core(core, name_db, collection, db_credentials, cc_min_vertices,
               start_edge_prior, selection_cost, orientation_factor,
               comb_angle_factor, time_limit, voxel_size, backend):

    try:
        logger.info("Core id {}".format(core.id))
        logger.info("Process core {}...".format(core.id))
        db = DB(db_credentials)
        solver = CoreSolver()

        solved = db.is_solved(name_db, collection, core.x_lim_core,
                              core.y_lim_core, core.z_lim_core)

        if not solved:
            g1, index_map = db.get_g1(name_db,
                                      collection,
                                      x_lim=core.x_lim_context,
                                      y_lim=core.y_lim_context,
                                      z_lim=core.z_lim_context)

            solutions = solver.solve_subgraph(
                g1,
                index_map,
                cc_min_vertices=cc_min_vertices,
                start_edge_prior=start_edge_prior,
                selection_cost=selection_cost,
                orientation_factor=orientation_factor,
                comb_angle_factor=comb_angle_factor,
                core_id=core.id,
                voxel_size=voxel_size,
                time_limit=time_limit,
                backend=backend)

            for solution in solutions:
                db.write_solution(solution,
                                  index_map,
                                  name_db,
                                  collection,
                                  x_lim=core.x_lim_core,
                                  y_lim=core.y_lim_core,
                                  z_lim=core.z_lim_core,
                                  id_writer=core.id)

            db.write_solved(name_db, collection, core)
        else:
            logger.info("Skip core {}, already solved...".format(core.id))

        return core.id
    except:
        raise Exception("".join(traceback.format_exception(*sys.exc_info())))
Пример #3
0
    def setUp(self):
        try:
            self.name_db = "unittest"
            self.collection = "run"
            self.db = DB()
            self.client = self.db.get_client(self.name_db,
                                             self.collection,
                                             overwrite=True)

            # Clean up all written data:
            self.addCleanup(self.db.get_client, self.name_db, self.collection, True)

        # Possible problem here is that no db instance is running:
        except Exception as e:
            print "Make sure that a DB instance is running before executing the test suite"
            raise e

        self.min_pos = 1
        self.max_pos = 101
        self.distance_threshold = 10
        self.voxel_size = np.array([5.,5.,50.])
 
        print "[Unittest]: Create mock candidates..."
        self.candidate_positions = []
        self.candidate_seperation = self.distance_threshold/2

        for x in np.arange(self.min_pos, self.max_pos, self.candidate_seperation):
            for y in np.arange(self.min_pos, self.max_pos, self.candidate_seperation):
                for z in np.arange(self.min_pos, self.max_pos, self.candidate_seperation):
                    self.candidate_positions.append(np.array([x,y,z]))

        
        self.n_candidates = len(self.candidate_positions)
        orientations = randint(self.min_pos, 
                               self.max_pos, 
                               size=(self.n_candidates, 3))
  

        self.candidate_orientations = []
        self.mock_candidates = []

        for i in range(self.n_candidates):
            identifier = i + 1 # To mirror id_0 = 1 in track.py

            if self.n_candidates - 10 > i > 10 and (i%3 == 0):
                partner_i = identifier + 1

            elif (self.mock_candidates and (self.mock_candidates[-1].partner_identifier == identifier)):
                partner_i = identifier - 1
                self.candidate_positions[i] = self.candidate_positions[i - 1]
            
            else:
                partner_i = -1
            
            pv = np.array(self.candidate_positions[i], dtype=float)
            
            ov = np.array(orientations[i], dtype=float)
            ov_normed = ov/np.linalg.norm(ov)
            self.candidate_orientations.append(ov_normed)

            candidate = MtCandidate(position=pv,
                                    orientation=ov_normed,
                                    identifier=identifier,
                                    partner_identifier=partner_i)

            self.mock_candidates.append(candidate)

        print "[Unittest]: Verify {} mock candidates...".format(self.n_candidates)
        for j in range(len(self.mock_candidates)):
            if self.mock_candidates[j].partner_identifier != -1:
                if self.mock_candidates[j + 1].partner_identifier ==\
                        self.mock_candidates[j].identifier:

                    self.assertEqual(self.mock_candidates[j].partner_identifier, 
                                     self.mock_candidates[j + 1].identifier)
                    self.assertTrue(np.all(self.mock_candidates[j].position ==\
                                    self.mock_candidates[j + 1].position))
Пример #4
0
    def setUp(self):
        """
        Here we set up a problem instance where the solution should
        be a straight line spanning multiple cores.
        """

        self.line_points = [np.array([0., 0., 10. + j]) for j in range(100)]
        self.line_orientations = [np.array([0., 0., 1.])] * len(
            self.line_points)
        self.line_start = self.line_points[0]
        self.line_end = self.line_points[-1]

        self.wrong_candidate_positions = [
            np.array([0., 3., 10. + j]) for j in np.random.randint(0, 100, 30)
        ]
        self.wrong_candidate_orientations = [np.array([1.0, 0.0, 0.0])] * len(
            self.wrong_candidate_positions)

        self.candidates = []
        for i in range(len(self.line_points)):
            candidate = MtCandidate(self.line_points[i],
                                    self.line_orientations[i],
                                    identifier=i + 1,
                                    partner_identifier=-1)

            self.candidates.append(candidate)

        i0 = len(self.line_points) + 1
        for j in range(len(self.wrong_candidate_positions)):
            candidate = MtCandidate(self.wrong_candidate_positions[j],
                                    self.wrong_candidate_orientations[j],
                                    identifier=i0 + j,
                                    partner_identifier=-1)

        try:
            self.name_db = "unittest"
            self.collection = "Line"
            self.db = DB()
            self.client = self.db.get_client(self.name_db,
                                             self.collection,
                                             overwrite=True)

            self.addCleanup(self.db.get_client, self.name_db, self.collection,
                            True)
        except Exception as e:
            print "Make sure that a DB instance is running befoer executing the test suite"
            raise e

        self.db.write_candidates(name_db=self.name_db,
                                 prob_map_stack_chunk=None,
                                 offset_chunk=None,
                                 gs=None,
                                 ps=None,
                                 voxel_size=[1., 1., 1.],
                                 id_offset=0,
                                 collection=self.collection,
                                 overwrite=True,
                                 candidates=self.candidates)

        self.roi_x = {"min": 0, "max": 20}
        self.roi_y = {"min": 0, "max": 20}
        self.roi_z = {"min": 0, "max": 120}

        self.db.connect_candidates(name_db=self.name_db,
                                   collection=self.collection,
                                   x_lim=self.roi_x,
                                   y_lim=self.roi_y,
                                   z_lim=self.roi_z,
                                   distance_threshold=3.5)
Пример #5
0
def write_candidate_graph(max_chunks,
                          max_dset,
                          pm_chunks,
                          pm_dset,
                          name_db,
                          collection,
                          db_credentials,
                          distance_threshold,
                          voxel_size,
                          cores,
                          cf_lists,
                          volume_offset,
                          overwrite=False,
                          mp=True):

    logger.info("Extract candidates...")
    db = DB(db_credentials)
    n_chunk = 0
    id_offset = 1

    # Overwrite if necesseray:
    graph = db.get_collection(name_db, collection, overwrite=overwrite)
    for chunk in max_chunks:
        logger.info("Extract chunk {}/{}...".format(n_chunk, len(max_chunks)))

        f = h5py.File(chunk, "r")
        attrs = f[max_dset].attrs.items()
        f.close()

        chunk_limits = attrs[1][1]
        offset_chunk = [
            chunk_limits[0][0], chunk_limits[1][0], chunk_limits[2][0]
        ]

        candidates = extract_maxima_candidates(chunk, max_dset, offset_chunk,
                                               id_offset)

        id_offset_tmp = db.write_candidates(name_db,
                                            collection,
                                            candidates,
                                            voxel_size,
                                            overwrite=False)

        assert (graph.find({
            "selected": {
                "$exists": True
            }
        }).count() == id_offset_tmp)

        id_offset = id_offset_tmp + 1
        n_chunk += 1

    # Don't forward SIGINT to child processes
    sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
    pool = multiprocessing.Pool()
    signal.signal(signal.SIGINT, sigint_handler)

    bound_connect_candidates_alias = functools.partial(
        connect_candidates_alias, db)

    logger.info("Connect candidates...")
    try:
        for cf_core_ids in cf_lists:
            logger.info("Connecting {}".format(cf_core_ids))

            results = []
            if mp:
                for core_id in cf_core_ids:
                    logger.info("Add context {} to pool (mp: {})".format(
                        core_id, mp))
                    core = cores[core_id]
                    results.append(
                        pool.apply_async(bound_connect_candidates_alias, (
                            name_db,
                            collection,
                            core.x_lim_context,
                            core.y_lim_context,
                            core.z_lim_context,
                            distance_threshold,
                        )))

                # Catch exceptions and SIGINTs
                for result in results:
                    result.get(60 * 60 * 24 * 3)
            else:
                for core_id in cf_core_ids:
                    core = cores[core_id]
                    results.append(
                        db.connect_candidates(
                            name_db,
                            collection,
                            core.x_lim_context,
                            core.y_lim_context,
                            core.z_lim_context,
                            distance_threshold,
                        ))
    finally:
        pool.terminate()
        pool.join()

    logger.info("Add edge costs...")
    n_chunk = 0
    for chunk in pm_chunks:
        logger.info("Work on chunk {}/{}...".format(n_chunk, len(pm_chunks)))

        f = h5py.File(chunk, "r")
        attrs = f[pm_dset].attrs.items()
        shape = np.shape(np.array(f[pm_dset]))
        f.close()

        db.add_edge_cost(name_db, collection, voxel_size, volume_offset, chunk,
                         pm_dset)
        n_chunk += 1
Пример #6
0
def track(config):
    roi = [config["roi_x"], config["roi_y"], config["roi_z"]]
    volume_offset = config["volume_offset"] * config["voxel_size"]

    roi_volume_size = np.array([r[1] - r[0]
                                for r in roi]) * config["voxel_size"]
    roi_offset = np.array([r[0] for r in roi]) * config["voxel_size"]

    x_lim_roi = {
        "min": roi_offset[0],
        "max": roi_offset[0] + roi_volume_size[0]
    }

    y_lim_roi = {
        "min": roi_offset[1],
        "max": roi_offset[1] + roi_volume_size[1]
    }

    z_lim_roi = {
        "min": roi_offset[2],
        "max": roi_offset[2] + roi_volume_size[2]
    }

    db_credentials = config["db_credentials"]

    if np.any(config["context_size"] * config["voxel_size"] < 2 *
              config["distance_threshold"]):
        raise ValueError("The context size needs to be at least " +\
                         "twice as large as the distance threshold in all dimensions")

    # Init logger:
    logger.info("Start tracking")

    # Generate core geometry:
    builder = CoreBuilder(volume_size=roi_volume_size,
                          core_size=config["core_size"] * config["voxel_size"],
                          context_size=config["context_size"] *
                          config["voxel_size"],
                          offset=roi_offset)

    cores = builder.generate_cores()

    # Get conflict free core lists
    cf_lists = builder.gen_cfs()

    if config["extract_candidates"]:
        max_chunk_dir = chunk_prob_map(
            volume_shape=config["volume_shape"],
            max_chunk_shape=config["max_chunk_shape"],
            volume_offset=config["volume_offset"],
            voxel_size=config["voxel_size"],
            prob_map_h5=config["maxima"],
            dset=config["maxima_dset"],
            output_dir=os.path.join(config["chunk_output_dir"], "maxima"))

        pm_chunk_dir = chunk_prob_map(
            volume_shape=config["volume_shape"],
            max_chunk_shape=config["max_chunk_shape"],
            volume_offset=config["volume_offset"],
            voxel_size=config["voxel_size"],
            prob_map_h5=config["prob_map"],
            dset=config["prob_map_dset"],
            output_dir=os.path.join(config["chunk_output_dir"], "pm"))

        max_chunks = [os.path.join(max_chunk_dir, f)\
                      for f in os.listdir(max_chunk_dir) if f.endswith(".h5")]

        pm_chunks = [os.path.join(pm_chunk_dir, f)\
                     for f in os.listdir(pm_chunk_dir) if f.endswith(".h5")]

        config["pm_chunks"] = pm_chunks
        config["max_chunks"] = max_chunks
        """
        Extract id, and volume information from chunks
        and compare with ROI
        """
        chunk_limits = {}
        chunk_ids = {}
        roi_pm_chunks = []
        roi_max_chunks = []
        for max_chunk, pm_chunk in zip(max_chunks, pm_chunks):
            if not os.path.isfile(pm_chunk):
                raise ValueError("{} is not a file".format(pm_chunk))

            f = h5py.File(pm_chunk, "r")
            attrs = f[config["prob_map_dset"]].attrs.items()
            f.close()

            chunk_limit = attrs[1][1]
            chunk_id = attrs[0][1]

            chunk_limits[pm_chunk] = chunk_limit
            chunk_ids[pm_chunk] = chunk_id

            full_ovlp = np.array([False, False, False])
            for i in range(3):
                full_ovlp[i] = check_overlap(chunk_limit[i], roi[i])

            if np.all(full_ovlp):
                roi_pm_chunks.append(pm_chunk)
                roi_max_chunks.append(max_chunk)
        """
        Extract candidates from all ROI chunks and write to specified
        database.
        """

        write_candidate_graph(max_chunks=max_chunks,
                              max_dset=config["maxima_dset"],
                              pm_chunks=pm_chunks,
                              pm_dset=config["prob_map_dset"],
                              name_db=config["name_db"],
                              collection=config["name_collection"],
                              db_credentials=config["db_credentials"],
                              distance_threshold=config["distance_threshold"],
                              voxel_size=config["voxel_size"],
                              cores=cores,
                              cf_lists=cf_lists,
                              volume_offset=config["volume_offset"],
                              overwrite=True,
                              mp=config["mp"])

        # Clean up chunks
        shutil.rmtree(max_chunk_dir)
        shutil.rmtree(pm_chunk_dir)
        """
        Solve the ROI and write to specified database. The result
        is written out depending on the options in the Output section
        of the config file.
        """
    if config["reset"]:
        db = DB(config["db_credentials"])
        db.reset_collection(config["name_db"], config["name_collection"])

    if config["solve"]:
        solve_candidate_volume(name_db=config["name_db"],
                               collection=config["name_collection"],
                               db_credentials=config["db_credentials"],
                               cc_min_vertices=config["cc_min_vertices"],
                               start_edge_prior=config["start_edge_prior"],
                               selection_cost=config["selection_cost"],
                               orientation_factor=config["orientation_factor"],
                               comb_angle_factor=config["comb_angle_factor"],
                               time_limit=config["time_limit_per_cc"],
                               cores=cores,
                               cf_lists=cf_lists,
                               voxel_size=config["voxel_size"],
                               offset=np.array(roi_offset),
                               mp=config["mp"],
                               backend=config["backend"])

    if config["validate_selection"]:
        db = DB(config["db_credentials"])
        try:
            g1_selected = db.validate_selection(
                name_db=config["name_db"],
                collection=config["name_collection"],
                x_lim=x_lim_roi,
                y_lim=y_lim_roi,
                z_lim=z_lim_roi)
        except ValueError:
            logger.warning("WARNING, solution contains no vertices!")
            g1_selected = G1(0)

        if config["export_validated"]:
            g1_to_nml(g1_selected,
                      config["validated_output_path"],
                      knossos=True,
                      voxel_size=config["voxel_size"])