def handle_valid_frags(table_id, node_id): cg = app_utils.get_cg(table_id) seg_ids = meshgen_utils.get_highest_child_nodes_with_meshes( cg, np.uint64(node_id), stop_layer=1, verify_existence=True) return app_utils.tobinary(seg_ids)
def handle_l2_chunk_children_binary(table_id, chunk_id): as_array = request.args.get("as_array", default=False, type=toboolean) l2_chunk_children = common.handle_l2_chunk_children( table_id, chunk_id, as_array) if as_array: return tobinary(l2_chunk_children) else: return pickle.dumps(l2_chunk_children)
def handle_root_main(table_id, atomic_id, timestamp): current_app.request_type = "root" # Call ChunkedGraph cg = app_utils.get_cg(table_id) root_id = cg.get_root(np.uint64(atomic_id), time_stamp=timestamp) # Return binary return app_utils.tobinary(root_id)
def handle_root(): atomic_id = np.uint64(json.loads(request.data)[0]) # Call ChunkedGraph cg = app_utils.get_cg() root_id = cg.get_root(atomic_id) # Return binary return app_utils.tobinary(root_id)
def handle_valid_frags(table_id, node_id): current_app.table_id = table_id user_id = str(g.auth_user["id"]) current_app.user_id = user_id cg = app_utils.get_cg(table_id) seg_ids = meshgen_utils.get_highest_child_nodes_with_meshes( cg, np.uint64(node_id), stop_layer=1, verify_existence=True) return app_utils.tobinary(seg_ids)
def handle_roots(table_id): int64_as_str = request.args.get("int64_as_str", default=False, type=toboolean) root_ids = common.handle_roots(table_id, is_binary=False) resp = {"root_ids": root_ids} arg_as_binary = request.args.get("as_binary", default="", type=str) if arg_as_binary in resp: return tobinary(resp[arg_as_binary]) else: return jsonify_with_kwargs(resp, int64_as_str=int64_as_str)
def handle_children(table_id, parent_id): current_app.request_type = "children" cg = app_utils.get_cg(table_id) parent_id = np.uint64(parent_id) layer = cg.get_chunk_layer(parent_id) if layer > 1: children = cg.get_children(parent_id) else: children = np.array([]) # Return binary return app_utils.tobinary(children)
def handle_subgraph(root_id): if "bounds" in request.args: bounds = request.args["bounds"] bounding_box = np.array([b.split("-") for b in bounds.split("_")], dtype=np.int).T else: bounding_box = None # Call ChunkedGraph cg = app_utils.get_cg() atomic_edges = cg.get_subgraph(int(root_id), get_edges=True, bounding_box=bounding_box, bb_is_coordinate=True)[0] # Return binary return app_utils.tobinary(atomic_edges)
def handle_leaves(table_id, root_id): current_app.request_type = "leaves" if "bounds" in request.args: bounds = request.args["bounds"] bounding_box = np.array([b.split("-") for b in bounds.split("_")], dtype=np.int).T else: bounding_box = None # Call ChunkedGraph cg = app_utils.get_cg(table_id) atomic_ids = cg.get_subgraph_nodes(int(root_id), bounding_box=bounding_box, bb_is_coordinate=True) # Return binary return app_utils.tobinary(atomic_ids)
def handle_leaves_from_leave(atomic_id): if "bounds" in request.args: bounds = request.args["bounds"] bounding_box = np.array([b.split("-") for b in bounds.split("_")], dtype=np.int).T else: bounding_box = None # Call ChunkedGraph cg = app_utils.get_cg() root_id = cg.get_root(int(atomic_id)) atomic_ids = cg.get_subgraph(root_id, bounding_box=bounding_box, bb_is_coordinate=True) # Return binary return app_utils.tobinary(np.concatenate([np.array([root_id]), atomic_ids]))
def handle_merge(): nodes = json.loads(request.data) assert len(nodes) == 2 user_id = str(request.remote_addr) # Call ChunkedGraph cg = app_utils.get_cg() atomic_edge = [] for node in nodes: node_id = node[0] x, y, z = node[1:] atomic_id = cg.get_atomic_id_from_coord(x, y, z, parent_id=np.uint64(node_id)) if atomic_id is None: return None atomic_edge.append(atomic_id) # Protection from long range mergers chunk_coord_delta = cg.get_chunk_coordinates(atomic_edge[0]) - \ cg.get_chunk_coordinates(atomic_edge[1]) if np.any(np.abs(chunk_coord_delta) > 1): return None new_root = cg.add_edge(user_id=user_id, atomic_edge=np.array(atomic_edge, dtype=np.uint64)) if new_root is None: return None # Return binary return app_utils.tobinary(new_root)
def handle_split(): data = json.loads(request.data) user_id = str(request.remote_addr) # Call ChunkedGraph cg = app_utils.get_cg() data_dict = {} for k in ["sources", "sinks"]: data_dict[k] = [] for node in data[k]: node_id = node[0] x, y, z = node[1:] atomic_id = cg.get_atomic_id_from_coord( x, y, z, parent_id=np.uint64(node_id)) if atomic_id is None: return None data_dict[k].append({ "id": atomic_id, "coord": np.array([x, y, z]) }) new_roots = cg.remove_edges(user_id=user_id, source_id=data_dict["sources"][0]["id"], sink_id=data_dict["sinks"][0]["id"], source_coord=data_dict["sources"][0]["coord"], sink_coord=data_dict["sinks"][0]["coord"], mincut=True) if new_roots is None: return None # Return binary return app_utils.tobinary(new_roots)
def handle_children(parent_id): # Call ChunkedGraph cg = app_utils.get_cg() parent_id = np.uint64(parent_id) layer = cg.get_chunk_layer(parent_id) if layer > 4: stop_lvl = 4 elif layer > 3: stop_lvl = 3 elif layer == 3: stop_lvl = 2 else: stop_lvl = 1 try: children = cg.get_subgraph(parent_id, stop_lvl=stop_lvl) except: children = np.array([]) # Return binary return app_utils.tobinary(children)
def handle_merge(table_id): merge_result = common.handle_merge(table_id) return app_utils.tobinary(merge_result.new_root_ids)
def handle_subgraph(table_id, root_id): subgraph_result = common.handle_subgraph(table_id, root_id) return app_utils.tobinary(subgraph_result)
def handle_leaves_from_leave(table_id, atomic_id): leaf_ids = common.handle_leaves_from_leave(table_id, atomic_id) return app_utils.tobinary(leaf_ids)
def handle_leaves(table_id, root_id): leaf_ids = common.handle_leaves(table_id, root_id) return app_utils.tobinary(leaf_ids)
def handle_children(table_id, parent_id): children_ids = common.handle_children(table_id, parent_id) return app_utils.tobinary(children_ids)
def handle_root_2(table_id, atomic_id): root_id = common.handle_root(table_id, atomic_id) return app_utils.tobinary(root_id)
def handle_roots_binary(table_id): root_ids = common.handle_roots(table_id, is_binary=True) return tobinary(root_ids)
def handle_merge(table_id): current_app.request_type = "merge" nodes = json.loads(request.data) user_id = str(request.remote_addr) current_app.logger.debug(nodes) assert len(nodes) == 2 # Call ChunkedGraph cg = app_utils.get_cg(table_id) atomic_edge = [] coords = [] for node in nodes: node_id = node[0] x, y, z = node[1:] x /= 2 y /= 2 coordinate = np.array([x, y, z]) if not cg.is_in_bounds(coordinate): coordinate /= cg.segmentation_resolution coordinate[0] *= 2 coordinate[1] *= 2 atomic_id = cg.get_atomic_id_from_coord(coordinate[0], coordinate[1], coordinate[2], parent_id=np.uint64(node_id)) if atomic_id is None: raise cg_exceptions.BadRequest( f"Could not determine supervoxel ID for coordinates " f"{coordinate}." ) coords.append(coordinate) atomic_edge.append(atomic_id) # Protection from long range mergers chunk_coord_delta = cg.get_chunk_coordinates(atomic_edge[0]) - \ cg.get_chunk_coordinates(atomic_edge[1]) if np.any(np.abs(chunk_coord_delta) > 3): raise cg_exceptions.BadRequest( "Chebyshev distance between merge points exceeded allowed maximum " "(3 chunks).") lvl2_nodes = [] try: ret = cg.add_edges(user_id=user_id, atomic_edges=np.array(atomic_edge, dtype=np.uint64), source_coord=coords[:1], sink_coord=coords[1:], return_new_lvl2_nodes=True, remesh_preview=False) if len(ret) == 2: new_root, lvl2_nodes = ret else: new_root = ret except cg_exceptions.LockingError as e: raise cg_exceptions.InternalServerError( "Could not acquire root lock for merge operation.") except cg_exceptions.PreconditionError as e: raise cg_exceptions.BadRequest(str(e)) if new_root is None: raise cg_exceptions.InternalServerError( "Could not merge selected supervoxel.") t = threading.Thread(target=meshing_app_blueprint._mesh_lvl2_nodes, args=(cg.get_serialized_info(), lvl2_nodes)) t.start() # Return binary return app_utils.tobinary(new_root)
def handle_split(table_id): split_result = common.handle_split(table_id) return app_utils.tobinary(split_result.new_root_ids)
def handle_split(table_id): current_app.request_type = "split" data = json.loads(request.data) user_id = str(request.remote_addr) current_app.logger.debug(data) # Call ChunkedGraph cg = app_utils.get_cg(table_id) data_dict = {} for k in ["sources", "sinks"]: data_dict[k] = collections.defaultdict(list) for node in data[k]: node_id = node[0] x, y, z = node[1:] x /= 2 y /= 2 coordinate = np.array([x, y, z]) current_app.logger.debug(("before", coordinate)) if not cg.is_in_bounds(coordinate): coordinate /= cg.segmentation_resolution coordinate[0] *= 2 coordinate[1] *= 2 current_app.logger.debug(("after", coordinate)) atomic_id = cg.get_atomic_id_from_coord(coordinate[0], coordinate[1], coordinate[2], parent_id=np.uint64( node_id)) if atomic_id is None: raise cg_exceptions.BadRequest( f"Could not determine supervoxel ID for coordinates " f"{coordinate}.") data_dict[k]["id"].append(atomic_id) data_dict[k]["coord"].append(coordinate) current_app.logger.debug(data_dict) lvl2_nodes = [] try: ret = cg.remove_edges(user_id=user_id, source_ids=data_dict["sources"]["id"], sink_ids=data_dict["sinks"]["id"], source_coords=data_dict["sources"]["coord"], sink_coords=data_dict["sinks"]["coord"], mincut=True, return_new_lvl2_nodes=True, remesh_preview=False) if len(ret) == 2: new_roots, lvl2_nodes = ret else: new_roots = ret except cg_exceptions.LockingError as e: raise cg_exceptions.InternalServerError( "Could not acquire root lock for split operation.") except cg_exceptions.PreconditionError as e: raise cg_exceptions.BadRequest(str(e)) if new_roots is None: raise cg_exceptions.InternalServerError( "Could not split selected segment groups." ) current_app.logger.debug(("after split:", new_roots)) t = threading.Thread(target=meshing_app_blueprint._mesh_lvl2_nodes, args=(cg.get_serialized_info(), lvl2_nodes)) t.start() # Return binary return app_utils.tobinary(new_roots)
def handle_root_1(table_id): atomic_id = np.uint64(json.loads(request.data)[0]) root_id = common.handle_root(table_id, atomic_id) return app_utils.tobinary(root_id)