Ejemplo n.º 1
0
    def worker(outdir, fail=None):

        client = daisy.Client()

        while True:

            block = client.acquire_block()
            if block is None:
                break

            TestMultipleTasks.process_block(outdir, block, fail)

            client.release_block(block, 0)
Ejemplo n.º 2
0
    def worker(self, outdir, fail=None):

        client = daisy.Client()

        while True:

            block = client.acquire_block()
            if block is None:
                break

            self.process_block(outdir, block, fail)

            client.release_block(block, 0)
Ejemplo n.º 3
0
    def run_worker(self):
        '''Wrapper for `_worker_impl()`'''

        assert 'DAISY_CONTEXT' in os.environ, (
            "DAISY_CONTEXT must be defined as an environment variable")
        logger.info("WORKER: Running with context %s" %
                    os.environ['DAISY_CONTEXT'])

        database = Database(self.db_host, self.db_id)
        client_scheduler = daisy.Client()

        while True:
            with client_scheduler.acquire_block() as block:
                if block is None:
                    break
                logger.info(f'Received block {block}')
                self._worker_impl(block)
                database.add_finished(block.block_id)
Ejemplo n.º 4
0
    def test_basic(self):
        roi = daisy.Roi((0, 0, 0), (10, 10, 10))
        task_id = 1
        block = daisy.Block(roi, roi, roi, block_id=1, task_id=task_id)
        parent_conn, child_conn = mp.Pipe()
        server_process = mp.Process(target=self.run_test_server,
                                    args=(block, child_conn))
        server_process.start()
        host, port = parent_conn.recv()
        context = daisy.Context(hostname=host,
                                port=port,
                                task_id=task_id,
                                worker_id=1)
        client = daisy.Client(context=context)
        with client.acquire_block() as block:
            block.status = daisy.BlockStatus.SUCCESS

        success = parent_conn.recv()
        server_process.join()
        self.assertTrue(success)
Ejemplo n.º 5
0
def solve_in_block(db_host,
                   db_name,
                   evidence_factor,
                   comb_angle_factor,
                   start_edge_prior,
                   selection_cost,
                   time_limit,
                   solve_number,
                   graph_number,
                   selected_attr="selected",
                   solved_attr="solved",
                   **kwargs):

    print("Solve in block")

    graph_provider = MongoDbGraphProvider(
        db_name,
        db_host,
        mode='r+',
        position_attribute=['z', 'y', 'x'],
        edges_collection="edges_g{}".format(graph_number))

    client = daisy.Client()

    while True:
        print("Acquire block")
        block = client.acquire_block()

        if not block:
            return

        logger.debug("Solving in block %s", block)

        if check_function(block, 'solve_s{}'.format(solve_number), db_name,
                          db_host):
            client.release_block(block, 0)
            continue

        start_time = time.time()
        graph = graph_provider.get_graph(block.read_roi)

        num_nodes = graph.number_of_nodes()
        num_edges = graph.number_of_edges()
        logger.info(
            "Reading graph with %d nodes and %d edges took %s seconds" %
            (num_nodes, num_edges, time.time() - start_time))

        if num_edges == 0:
            logger.info("No edges in roi %s. Skipping" % block.read_roi)
            write_done(block, 'solve_s{}'.format(solve_number), db_name,
                       db_host)
            client.release_block(block, 0)
            continue

        print("solve")
        solver = Solver(graph, evidence_factor, comb_angle_factor,
                        start_edge_prior, selection_cost, time_limit,
                        selected_attr, solved_attr)

        solver.initialize()
        solver.solve()

        start_time = time.time()
        graph.update_edge_attrs(block.write_roi,
                                attributes=[selected_attr, solved_attr])

        graph.update_node_attrs(block.write_roi,
                                attributes=[selected_attr, solved_attr])

        logger.info(
            "Updating attributes %s & %s for %d edges took %s seconds" %
            (selected_attr, solved_attr, num_edges, time.time() - start_time))

        print("Write done")
        write_done(block, 'solve_s{}'.format(solve_number), db_name, db_host)

        print("Release block")
        client.release_block(block, 0)

    return 0
Ejemplo n.º 6
0
def solve_in_block(
    db_host,
    skeletonization_db,
    subsampled_skeletonization_db,
    time_limit,
    solve_number,
    graph_number,
    location_attr,
    u_name,
    v_name,
    **kwargs,
):
    logger.info("Solve in block")

    subsampled_provider = MongoDbGraphProvider(subsampled_skeletonization_db,
                                               db_host,
                                               mode="r+",
                                               directed=True)

    skeletonization_provider = MongoDbGraphProvider(skeletonization_db,
                                                    db_host,
                                                    mode="r+")

    client = daisy.Client()

    while True:
        logger.info("Acquire block")
        block = client.acquire_block()

        if not block:
            return 0

        logger.debug("Solving in block %s", block)

        if check_function(
                block,
                "solve_s{}".format(solve_number),
                subsampled_skeletonization_db,
                db_host,
        ):
            client.release_block(block, 0)
            continue

        start_time = time.time()
        skeletonization = skeletonization_provider.get_graph(
            block.read_roi, node_inclusion="dangling", edge_inclusion="both")
        # anything in matched was solved previously and must be maintained.
        pre_solved = subsampled_provider.get_graph(block.read_roi,
                                                   node_inclusion="dangling",
                                                   edge_inclusion="both")

        # if len(skeletonization.nodes()) > 10_000:
        #     to_remove = set(skeletonization.nodes()) - set(pre_solved.nodes())
        #     skeletonization.remove_nodes_from(to_remove)
        #     logger.info(f"Solving for {len(skeletonization.nodes())} would take too long")
        #     logger.info(f"Ignoring {len(to_remove)} nodes and skipping this block!")

        logger.info(
            f"Reading skeletonization with {len(skeletonization.nodes)} nodes and "
            +
            f"{len(skeletonization.edges)} edges took {time.time() - start_time} seconds"
        )

        if len(skeletonization.nodes) == 0:
            logger.info(
                f"No consensus nodes in roi {block.read_roi}. Skipping")
            write_done(block, f"solve_s{solve_number}",
                       subsampled_skeletonization_db, db_host)
            client.release_block(block, 0)
            continue

        logger.info("PreProcess...")
        start_time = time.time()

        logger.info(
            f"Skeletoniation has {len(skeletonization.nodes)} nodes "
            f"and {len(skeletonization.edges)} edges before subsampling")

        num_removed = remove_line_nodes(skeletonization, location_attr)
        logger.info(f"Removed {num_removed} nodes from skeletonization!")

        num_nodes, num_edges = write_matched(
            db_host,
            subsampled_skeletonization_db,
            block,
            skeletonization,
            pre_solved,
            location_attr,
            u_name,
            v_name,
        )

        logger.info(
            f"Writing matched graph with {num_nodes} nodes and {num_edges} edges "
            f"took {time.time()-start_time} seconds")

        logger.info("Write done")
        write_done(
            block,
            "solve_s{}".format(solve_number),
            subsampled_skeletonization_db,
            db_host,
        )

        logger.info("Release block")
        client.release_block(block, 0)

    return 0
Ejemplo n.º 7
0
import daisy

import time
import random
import sys

from filelock import FileLock

tmp_path = sys.argv[1]

client = daisy.Client()

with FileLock(f"{tmp_path}/worker_{client.worker_id}.lock"):
    while True:
        with client.acquire_block() as block:
            if block is None:
                break
            else:
                time.sleep(random.random())
Ejemplo n.º 8
0
def agglomerate_worker(input_config):

    logging.info(sys.argv)

    with open(input_config, 'r') as f:
        config = json.load(f)

    logging.info(config)

    affs_file = config['affs_file']
    affs_dataset = config['affs_dataset']
    fragments_file = config['fragments_file']
    fragments_dataset = config['fragments_dataset']
    db_host = config['db_host']
    db_name = config['db_name']
    queue = config['queue']
    merge_function = config['merge_function']

    waterz_merge_function = {
        'hist_quant_10': 'OneMinus<HistogramQuantileAffinity<RegionGraphType, 10, ScoreValue, 256, false>>',
        'hist_quant_10_initmax': 'OneMinus<HistogramQuantileAffinity<RegionGraphType, 10, ScoreValue, 256, true>>',
        'hist_quant_25': 'OneMinus<HistogramQuantileAffinity<RegionGraphType, 25, ScoreValue, 256, false>>',
        'hist_quant_25_initmax': 'OneMinus<HistogramQuantileAffinity<RegionGraphType, 25, ScoreValue, 256, true>>',
        'hist_quant_50': 'OneMinus<HistogramQuantileAffinity<RegionGraphType, 50, ScoreValue, 256, false>>',
        'hist_quant_50_initmax': 'OneMinus<HistogramQuantileAffinity<RegionGraphType, 50, ScoreValue, 256, true>>',
        'hist_quant_75': 'OneMinus<HistogramQuantileAffinity<RegionGraphType, 75, ScoreValue, 256, false>>',
        'hist_quant_75_initmax': 'OneMinus<HistogramQuantileAffinity<RegionGraphType, 75, ScoreValue, 256, true>>',
        'hist_quant_90': 'OneMinus<HistogramQuantileAffinity<RegionGraphType, 90, ScoreValue, 256, false>>',
        'hist_quant_90_initmax': 'OneMinus<HistogramQuantileAffinity<RegionGraphType, 90, ScoreValue, 256, true>>',
        'mean': 'OneMinus<MeanAffinity<RegionGraphType, ScoreValue>>',
    }[merge_function]

    logging.info(f"Reading affs from {affs_file}")
    affs = daisy.open_ds(affs_file, affs_dataset)

    logging.info(f"Reading fragments from {fragments_file}")
    fragments = daisy.open_ds(fragments_file, fragments_dataset)

    # open RAG DB
    logging.info("Opening RAG DB...")
    rag_provider = daisy.persistence.MongoDbGraphProvider(
        db_name,
        host=db_host,
        mode='r+',
        directed=False,
        edges_collection='edges_' + merge_function,
        position_attribute=['center_z', 'center_y', 'center_x'])

    logging.info("RAG DB opened")

    # open block done DB
    client = pymongo.MongoClient(db_host)
    db = client[db_name]
    blocks_agglomerated = db['blocks_agglomerated_' + merge_function]

    client = daisy.Client()

    while True:

        block = client.acquire_block()

        if block is None:
            break

        start = time.time()

        lsd.agglomerate_in_block(
                affs,
                fragments,
                rag_provider,
                block,
                merge_function=waterz_merge_function,
                threshold=1.0)

        document = {
            'num_cpus': 5,
            'queue': queue,
            'block_id': block.block_id,
            'read_roi': (block.read_roi.get_begin(), block.read_roi.get_shape()),
            'write_roi': (block.write_roi.get_begin(), block.write_roi.get_shape()),
            'start': start,
            'duration': time.time() - start
        }

        blocks_agglomerated.insert(document)

        client.release_block(block, ret=0)
Ejemplo n.º 9
0
def extract_fragments_worker(input_config):

    logging.info(sys.argv)

    with open(input_config, 'r') as f:
        config = json.load(f)

    logging.info(config)

    affs_file = config['affs_file']
    affs_dataset = config['affs_dataset']
    fragments_file = config['fragments_file']
    fragments_dataset = config['fragments_dataset']
    db_name = config['db_name']
    db_host = config['db_host']
    queue = config['queue']
    context = config['context']
    num_voxels_in_block = config['num_voxels_in_block']
    fragments_in_xy=config['fragments_in_xy']
    epsilon_agglomerate=config['epsilon_agglomerate']
    filter_fragments=config['filter_fragments']
    replace_sections=config['replace_sections']

    logging.info(f"Reading affs from {affs_file}")

    affs = daisy.open_ds(affs_file, affs_dataset, mode='r')

    logging.info(f"Reading fragments from {fragments_file}")

    fragments = daisy.open_ds(
        fragments_file,
        fragments_dataset,
        mode='r+')

    if config['mask_file']:

        logging.info(f"Reading mask from {config['mask_file']}")

        mask = daisy.open_ds(config['mask_file'], config['mask_dataset'])

    else:

        mask = None

    # open RAG DB
    logging.info("Opening RAG DB...")

    rag_provider = daisy.persistence.MongoDbGraphProvider(
        db_name,
        host=db_host,
        mode='r+',
        directed=False,
        position_attribute=['center_z', 'center_y', 'center_x'])

    logging.info("RAG DB opened")

    # open block done DB
    client = pymongo.MongoClient(db_host)
    db = client[db_name]
    blocks_extracted = db['blocks_extracted']

    client = daisy.Client()

    while True:

        block = client.acquire_block()

        if block is None:
            break

        start = time.time()

        lsd.watershed_in_block(
            affs,
            block,
            context,
            rag_provider,
            fragments,
            num_voxels_in_block=num_voxels_in_block,
            mask=mask,
            fragments_in_xy=fragments_in_xy,
            epsilon_agglomerate=epsilon_agglomerate,
            filter_fragments=filter_fragments,
            replace_sections=replace_sections)

        document = {
            'num_cpus': 5,
            'queue': queue,
            'block_id': block.block_id,
            'read_roi': (
                block.read_roi.get_begin(),
                block.read_roi.get_shape()
            ),
            'write_roi': (
                block.write_roi.get_begin(),
                block.write_roi.get_shape()
            ),
            'start': start,
            'duration': time.time() - start
        }

        blocks_extracted.insert(document)

        client.release_block(block, ret=0)