Exemple #1
0
def handle_root(table_id, atomic_id):
    current_app.table_id = table_id

    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id

    # Convert seconds since epoch to UTC datetime
    try:
        timestamp = float(request.args.get("timestamp", time.time()))
        timestamp = datetime.fromtimestamp(timestamp, UTC)
    except (TypeError, ValueError) as e:
        raise (cg_exceptions.BadRequest(
            "Timestamp parameter is not a valid unix timestamp"))

    stop_layer = request.args.get("stop_layer", None)
    if stop_layer is not None:
        try:
            stop_layer = int(stop_layer)
        except (TypeError, ValueError) as e:
            raise (cg_exceptions.BadRequest("stop_layer is not an integer"))

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)
    root_id = cg.get_root(np.uint64(atomic_id),
                          stop_layer=stop_layer,
                          time_stamp=timestamp)

    # Return root ID
    return root_id
Exemple #2
0
def handle_split_preview(table_id):
    current_app.table_id = table_id
    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id

    data = json.loads(request.data)
    current_app.logger.debug(data)

    cg = app_utils.get_cg(table_id)

    data_dict = {}
    for k in ["sources", "sinks"]:
        data_dict[k] = collections.defaultdict(list)

        for node in data[k]:
            node_id = node[0]
            x, y, z = node[1:]
            coordinate = np.array([x, y, z]) / cg.segmentation_resolution

            atomic_id = cg.get_atomic_id_from_coord(
                coordinate[0],
                coordinate[1],
                coordinate[2],
                parent_id=np.uint64(node_id))

            if atomic_id is None:
                raise cg_exceptions.BadRequest(
                    f"Could not determine supervoxel ID for coordinates "
                    f"{coordinate}.")

            data_dict[k]["id"].append(atomic_id)
            data_dict[k]["coord"].append(coordinate)

    current_app.logger.debug(data_dict)

    try:
        supervoxel_ccs, illegal_split = cg._run_multicut(
            source_ids=data_dict["sources"]["id"],
            sink_ids=data_dict["sinks"]["id"],
            source_coords=data_dict["sources"]["coord"],
            sink_coords=data_dict["sinks"]["coord"],
            bb_offset=(240, 240, 24),
            split_preview=True)

    except cg_exceptions.PreconditionError as e:
        raise cg_exceptions.BadRequest(str(e))

    resp = {
        "supervoxel_connected_components": supervoxel_ccs,
        "illegal_split": illegal_split
    }
    return resp
Exemple #3
0
def handle_redo(table_id):
    if table_id in ["fly_v26", "fly_v31"]:
        raise cg_exceptions.InternalServerError(
            "Redo not supported for this chunkedgraph table.")

    current_app.table_id = table_id

    data = json.loads(request.data)
    is_priority = request.args.get('priority', True, type=str2bool)
    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id

    current_app.logger.debug(data)

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)
    operation_id = np.uint64(data["operation_id"])

    try:
        ret = cg.redo(user_id=user_id, operation_id=operation_id)
    except cg_exceptions.LockingError as e:
        raise cg_exceptions.InternalServerError(
            "Could not acquire root lock for redo operation.")
    except (cg_exceptions.PreconditionError,
            cg_exceptions.PostconditionError) as e:
        raise cg_exceptions.BadRequest(str(e))

    current_app.logger.debug(("after redo:", ret.new_root_ids))
    current_app.logger.debug(("lvl2_nodes:", ret.new_lvl2_ids))

    if ret.new_lvl2_ids.size > 0:
        trigger_remesh(table_id, ret.new_lvl2_ids, is_priority=is_priority)

    return ret
Exemple #4
0
def handle_rollback(table_id):
    if table_id in ["fly_v26", "fly_v31"]:
        raise cg_exceptions.InternalServerError(
            "Rollback not supported for this chunkedgraph table.")

    current_app.table_id = table_id

    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id
    target_user_id = request.args["user_id"]

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)
    user_operations = all_user_operations(table_id)
    operation_ids = user_operations["operation_id"]
    timestamps = user_operations["timestamp"]
    operations = list(zip(operation_ids, timestamps))
    operations.sort(key=lambda op: op[1])

    for operation in operations:
        operation_id = operation[0]
        try:
            ret = cg.undo_operation(user_id=target_user_id,
                                    operation_id=operation_id)
        except cg_exceptions.LockingError as e:
            raise cg_exceptions.InternalServerError(
                "Could not acquire root lock for undo operation.")
        except (cg_exceptions.PreconditionError,
                cg_exceptions.PostconditionError) as e:
            raise cg_exceptions.BadRequest(str(e))

        if ret.new_lvl2_ids.size > 0:
            trigger_remesh(table_id, ret.new_lvl2_ids, is_priority=False)

    return user_operations
Exemple #5
0
def all_user_operations(table_id):
    current_app.table_id = table_id
    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id
    target_user_id = request.args["user_id"]

    try:
        start_time = float(request.args.get("start_time", 0))
        start_time = datetime.fromtimestamp(start_time, UTC)
    except (TypeError, ValueError):
        raise (cg_exceptions.BadRequest(
            "start_time parameter is not a valid unix timestamp"))

    # Call ChunkedGraph
    cg_instance = app_utils.get_cg(table_id)

    log_rows = cg_instance.read_log_rows(start_time=start_time)

    valid_entry_ids = []
    timestamp_list = []

    entry_ids = np.sort(list(log_rows.keys()))
    for entry_id in entry_ids:
        entry = log_rows[entry_id]
        user_id = entry[column_keys.OperationLogs.UserID]

        if user_id == target_user_id:
            valid_entry_ids.append(entry_id)
            timestamp = entry["timestamp"]
            timestamp_list.append(timestamp)

    return {"operation_id": valid_entry_ids, "timestamp": timestamp_list}
Exemple #6
0
def tabular_change_log_recent(table_id):
    current_app.table_id = table_id
    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id

    try:
        start_time = float(request.args.get("start_time", 0))
        start_time = datetime.fromtimestamp(start_time, UTC)
    except (TypeError, ValueError):
        raise (cg_exceptions.BadRequest(
            "start_time parameter is not a valid unix timestamp"))

    # Call ChunkedGraph
    cg_instance = app_utils.get_cg(table_id)

    log_rows = cg_instance.read_log_rows(start_time=start_time)

    timestamp_list = []
    user_list = []

    entry_ids = np.sort(list(log_rows.keys()))
    for entry_id in entry_ids:
        entry = log_rows[entry_id]

        timestamp = entry["timestamp"]
        timestamp_list.append(timestamp)

        user_id = entry[column_keys.OperationLogs.UserID]
        user_list.append(user_id)

    return pd.DataFrame.from_dict({
        "operation_id": entry_ids,
        "timestamp": timestamp_list,
        "user_id": user_list
    })
Exemple #7
0
def handle_roots(table_id, is_binary=False):
    current_app.request_type = "roots"
    current_app.table_id = table_id

    if is_binary:
        node_ids = np.frombuffer(request.data, np.uint64)
    else:
        node_ids = np.array(json.loads(request.data)["node_ids"],
                            dtype=np.uint64)
    # Convert seconds since epoch to UTC datetime
    try:
        timestamp = float(request.args.get("timestamp", time.time()))
        timestamp = datetime.fromtimestamp(timestamp, UTC)
    except (TypeError, ValueError) as e:
        raise (cg_exceptions.BadRequest("Timestamp parameter is not a valid"
                                        " unix timestamp"))

    stop_layer = request.args.get("stop_layer", None)
    if stop_layer is not None:
        stop_layer = int(stop_layer)

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)
    root_ids = cg.get_roots(node_ids,
                            stop_layer=stop_layer,
                            time_stamp=timestamp)

    return root_ids
def handle_root_2(table_id, atomic_id):

    # Convert seconds since epoch to UTC datetime
    try:
        timestamp = float(request.args.get('timestamp', time.time()))
        timestamp = datetime.fromtimestamp(timestamp, UTC)
    except (TypeError, ValueError) as e:
        raise(cg_exceptions.BadRequest("Timestamp parameter is not a valid"
                                       " unix timestamp"))

    return handle_root_main(table_id, np.uint64(atomic_id), timestamp)
Exemple #9
0
    def _get_supervoxel_id_from_node(node):
        node_id = node[0]
        x, y, z = node[1:]
        coordinate = np.array([x, y, z]) / cg.segmentation_resolution

        supervoxel_id = cg.get_atomic_id_from_coord(
            coordinate[0],
            coordinate[1],
            coordinate[2],
            parent_id=np.uint64(node_id))
        if supervoxel_id is None:
            raise cg_exceptions.BadRequest(
                f"Could not determine supervoxel ID for coordinates "
                f"{coordinate}.")

        return supervoxel_id
Exemple #10
0
def merge_log(table_id, root_id):
    current_app.table_id = table_id
    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id

    try:
        time_stamp_past = float(request.args.get("timestamp", 0))
        time_stamp_past = datetime.fromtimestamp(time_stamp_past, UTC)
    except (TypeError, ValueError) as e:
        raise (cg_exceptions.BadRequest("Timestamp parameter is not a valid"
                                        " unix timestamp"))

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)

    segment_history = cg_history.SegmentHistory(cg, int(root_id))
    return segment_history.merge_log(correct_for_wrong_coord_type=True)
def change_log(table_id, root_id):
    current_app.request_type = "change_log"

    try:
        time_stamp_past = float(request.args.get('timestamp', 0))
        time_stamp_past = datetime.fromtimestamp(time_stamp_past, UTC)
    except (TypeError, ValueError) as e:
        raise(cg_exceptions.BadRequest("Timestamp parameter is not a valid"
                                       " unix timestamp"))

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)

    change_log = cg.get_change_log(root_id=np.uint64(root_id),
                                   correct_for_wrong_coord_type=True,
                                   time_stamp_past=time_stamp_past)

    return jsonify(change_log)
Exemple #12
0
def change_log(table_id, root_id=None):
    current_app.table_id = table_id
    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id

    try:
        time_stamp_past = float(request.args.get("timestamp", 0))
        time_stamp_past = datetime.fromtimestamp(time_stamp_past, UTC)
    except (TypeError, ValueError) as e:
        raise (cg_exceptions.BadRequest("Timestamp parameter is not a valid"
                                        " unix timestamp"))

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)
    if not root_id:
        return cg_history.get_all_log_entries(cg)

    segment_history = cg_history.SegmentHistory(cg, int(root_id))

    return segment_history.change_log()
Exemple #13
0
def handle_l2_chunk_children(table_id, chunk_id, as_array):
    current_app.request_type = "l2_chunk_children"
    current_app.table_id = table_id

    # Convert seconds since epoch to UTC datetime
    try:
        timestamp = float(request.args.get("timestamp", time.time()))
        timestamp = datetime.fromtimestamp(timestamp, UTC)
    except (TypeError, ValueError) as e:
        raise (cg_exceptions.BadRequest("Timestamp parameter is not a valid"
                                        " unix timestamp"))

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)

    chunk_layer = cg.get_chunk_layer(chunk_id)
    if chunk_layer != 2:
        raise (cg_exceptions.PreconditionError(
            f'This function only accepts level 2 chunks, the chunk requested is a level {chunk_layer} chunk'
        ))

    rr_chunk = cg.range_read_chunk(chunk_id=np.uint64(chunk_id),
                                   columns=column_keys.Hierarchy.Child,
                                   time_stamp=timestamp)

    if as_array:
        l2_chunk_array = []

        for l2 in rr_chunk:
            svs = rr_chunk[l2][0].value
            for sv in svs:
                l2_chunk_array.extend([l2, sv])

        return np.array(l2_chunk_array)
    else:
        # store in dict of keys to arrays to remove reliance on bigtable
        l2_chunk_dict = {}
        for k in rr_chunk:
            l2_chunk_dict[k] = rr_chunk[k][0].value

        return l2_chunk_dict
Exemple #14
0
def handle_pairwise_contact_sites(table_id, first_node_id, second_node_id):
    current_app.request_type = "pairwise_contact_sites"
    current_app.table_id = table_id
    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id

    try:
        timestamp = float(request.args.get("timestamp", time.time()))
        timestamp = datetime.fromtimestamp(timestamp, UTC)
    except (TypeError, ValueError) as e:
        raise (cg_exceptions.BadRequest("Timestamp parameter is not a valid"
                                        " unix timestamp"))
    exact_location = request.args.get("exact_location",
                                      True,
                                      type=app_utils.toboolean)
    cg = app_utils.get_cg(table_id)
    contact_sites_list, cs_metadata = contact_sites.get_contact_sites_pairwise(
        cg,
        np.uint64(first_node_id),
        np.uint64(second_node_id),
        end_time=timestamp,
        exact_location=exact_location,
    )
    return contact_sites_list, cs_metadata
Exemple #15
0
def handle_contact_sites(table_id, root_id):
    partners = request.args.get("partners", True, type=app_utils.toboolean)
    as_list = request.args.get("as_list", True, type=app_utils.toboolean)
    areas_only = request.args.get("areas_only", True, type=app_utils.toboolean)

    current_app.table_id = table_id
    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id

    try:
        timestamp = float(request.args.get("timestamp", time.time()))
        timestamp = datetime.fromtimestamp(timestamp, UTC)
    except (TypeError, ValueError) as e:
        raise (cg_exceptions.BadRequest("Timestamp parameter is not a valid"
                                        " unix timestamp"))

    if "bounds" in request.args:
        bounds = request.args["bounds"]
        bounding_box = np.array([b.split("-") for b in bounds.split("_")],
                                dtype=np.int).T
    else:
        bounding_box = None

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)

    cs_list, cs_metadata = contact_sites.get_contact_sites(
        cg,
        np.uint64(root_id),
        bounding_box=bounding_box,
        compute_partner=partners,
        end_time=timestamp,
        as_list=as_list,
        areas_only=areas_only)

    return cs_list, cs_metadata
Exemple #16
0
def handle_is_latest_roots(table_id, is_binary):
    current_app.request_type = "is_latest_roots"
    current_app.table_id = table_id

    if is_binary:
        node_ids = np.frombuffer(request.data, np.uint64)
    else:
        node_ids = np.array(json.loads(request.data)["node_ids"],
                            dtype=np.uint64)
    # Convert seconds since epoch to UTC datetime
    try:
        timestamp = float(request.args.get("timestamp", time.time()))
        timestamp = datetime.fromtimestamp(timestamp, UTC)
    except (TypeError, ValueError) as e:
        raise (cg_exceptions.BadRequest("Timestamp parameter is not a valid"
                                        " unix timestamp"))
    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)

    row_dict = cg.read_node_id_rows(node_ids=node_ids,
                                    columns=column_keys.Hierarchy.NewParent)
    is_latest = ~np.isin(node_ids, list(row_dict.keys()))

    return is_latest
Exemple #17
0
def handle_split(table_id):
    current_app.table_id = table_id

    data = json.loads(request.data)
    is_priority = request.args.get('priority', True, type=str2bool)
    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id

    current_app.logger.debug(data)

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)

    data_dict = {}
    for k in ["sources", "sinks"]:
        data_dict[k] = collections.defaultdict(list)

        for node in data[k]:
            node_id = node[0]
            x, y, z = node[1:]
            coordinate = np.array([x, y, z]) / cg.segmentation_resolution

            atomic_id = cg.get_atomic_id_from_coord(
                coordinate[0],
                coordinate[1],
                coordinate[2],
                parent_id=np.uint64(node_id),
            )

            if atomic_id is None:
                raise cg_exceptions.BadRequest(
                    f"Could not determine supervoxel ID for coordinates "
                    f"{coordinate}.")

            data_dict[k]["id"].append(atomic_id)
            data_dict[k]["coord"].append(coordinate)

    current_app.logger.debug(data_dict)

    try:
        ret = cg.remove_edges(
            user_id=user_id,
            source_ids=data_dict["sources"]["id"],
            sink_ids=data_dict["sinks"]["id"],
            source_coords=data_dict["sources"]["coord"],
            sink_coords=data_dict["sinks"]["coord"],
            mincut=True,
        )

    except cg_exceptions.LockingError as e:
        raise cg_exceptions.InternalServerError(
            "Could not acquire root lock for split operation.")
    except cg_exceptions.PreconditionError as e:
        raise cg_exceptions.BadRequest(str(e))

    if ret.new_root_ids is None:
        raise cg_exceptions.InternalServerError(
            "Could not split selected segment groups.")

    current_app.logger.debug(("after split:", ret.new_root_ids))
    current_app.logger.debug(("lvl2_nodes:", ret.new_lvl2_ids))

    if len(ret.new_lvl2_ids) > 0:
        trigger_remesh(table_id, ret.new_lvl2_ids, is_priority=is_priority)

    return ret
def handle_split(table_id):
    current_app.request_type = "split"

    data = json.loads(request.data)
    user_id = str(request.remote_addr)

    current_app.logger.debug(data)

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)

    data_dict = {}
    for k in ["sources", "sinks"]:
        data_dict[k] = collections.defaultdict(list)

        for node in data[k]:
            node_id = node[0]
            x, y, z = node[1:]

            x /= 2
            y /= 2

            coordinate = np.array([x, y, z])

            current_app.logger.debug(("before", coordinate))

            if not cg.is_in_bounds(coordinate):
                coordinate /= cg.segmentation_resolution

                coordinate[0] *= 2
                coordinate[1] *= 2

            current_app.logger.debug(("after", coordinate))

            atomic_id = cg.get_atomic_id_from_coord(coordinate[0],
                                                    coordinate[1],
                                                    coordinate[2],
                                                    parent_id=np.uint64(
                                                        node_id))

            if atomic_id is None:
                raise cg_exceptions.BadRequest(
                    f"Could not determine supervoxel ID for coordinates "
                    f"{coordinate}.")

            data_dict[k]["id"].append(atomic_id)
            data_dict[k]["coord"].append(coordinate)

    current_app.logger.debug(data_dict)

    lvl2_nodes = []
    try:
        ret = cg.remove_edges(user_id=user_id,
                              source_ids=data_dict["sources"]["id"],
                              sink_ids=data_dict["sinks"]["id"],
                              source_coords=data_dict["sources"]["coord"],
                              sink_coords=data_dict["sinks"]["coord"],
                              mincut=True,
                              return_new_lvl2_nodes=True,
                              remesh_preview=False)

        if len(ret) == 2:
            new_roots, lvl2_nodes = ret
        else:
            new_roots = ret

    except cg_exceptions.LockingError as e:
        raise cg_exceptions.InternalServerError(
            "Could not acquire root lock for split operation.")
    except cg_exceptions.PreconditionError as e:
        raise cg_exceptions.BadRequest(str(e))

    if new_roots is None:
        raise cg_exceptions.InternalServerError(
            "Could not split selected segment groups."
        )

    current_app.logger.debug(("after split:", new_roots))

    t = threading.Thread(target=meshing_app_blueprint._mesh_lvl2_nodes,
                         args=(cg.get_serialized_info(), lvl2_nodes))
    t.start()

    # Return binary
    return app_utils.tobinary(new_roots)
def handle_merge(table_id):
    current_app.request_type = "merge"

    nodes = json.loads(request.data)
    user_id = str(request.remote_addr)

    current_app.logger.debug(nodes)
    assert len(nodes) == 2

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)

    atomic_edge = []
    coords = []
    for node in nodes:
        node_id = node[0]
        x, y, z = node[1:]

        x /= 2
        y /= 2

        coordinate = np.array([x, y, z])

        if not cg.is_in_bounds(coordinate):
            coordinate /= cg.segmentation_resolution

            coordinate[0] *= 2
            coordinate[1] *= 2

        atomic_id = cg.get_atomic_id_from_coord(coordinate[0],
                                                coordinate[1],
                                                coordinate[2],
                                                parent_id=np.uint64(node_id))

        if atomic_id is None:
            raise cg_exceptions.BadRequest(
                f"Could not determine supervoxel ID for coordinates "
                f"{coordinate}."
            )

        coords.append(coordinate)
        atomic_edge.append(atomic_id)

    # Protection from long range mergers
    chunk_coord_delta = cg.get_chunk_coordinates(atomic_edge[0]) - \
                        cg.get_chunk_coordinates(atomic_edge[1])

    if np.any(np.abs(chunk_coord_delta) > 3):
        raise cg_exceptions.BadRequest(
            "Chebyshev distance between merge points exceeded allowed maximum "
            "(3 chunks).")

    lvl2_nodes = []

    try:
        ret = cg.add_edges(user_id=user_id,
                           atomic_edges=np.array(atomic_edge,
                                                 dtype=np.uint64),
                           source_coord=coords[:1],
                           sink_coord=coords[1:],
                           return_new_lvl2_nodes=True,
                           remesh_preview=False)

        if len(ret) == 2:
            new_root, lvl2_nodes = ret
        else:
            new_root = ret

    except cg_exceptions.LockingError as e:
        raise cg_exceptions.InternalServerError(
            "Could not acquire root lock for merge operation.")
    except cg_exceptions.PreconditionError as e:
        raise cg_exceptions.BadRequest(str(e))

    if new_root is None:
        raise cg_exceptions.InternalServerError(
            "Could not merge selected supervoxel.")

    t = threading.Thread(target=meshing_app_blueprint._mesh_lvl2_nodes,
                         args=(cg.get_serialized_info(), lvl2_nodes))
    t.start()

    # Return binary
    return app_utils.tobinary(new_root)
Exemple #20
0
def handle_merge(table_id):
    current_app.table_id = table_id

    nodes = json.loads(request.data)
    is_priority = request.args.get('priority', True, type=str2bool)
    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id

    current_app.logger.debug(nodes)
    assert len(nodes) == 2

    # Call ChunkedGraph
    cg = app_utils.get_cg(table_id)

    atomic_edge = []
    coords = []
    for node in nodes:
        node_id = node[0]
        x, y, z = node[1:]
        coordinate = np.array([x, y, z]) / cg.segmentation_resolution

        atomic_id = cg.get_atomic_id_from_coord(coordinate[0],
                                                coordinate[1],
                                                coordinate[2],
                                                parent_id=np.uint64(node_id))

        if atomic_id is None:
            raise cg_exceptions.BadRequest(
                f"Could not determine supervoxel ID for coordinates "
                f"{coordinate}.")

        coords.append(coordinate)
        atomic_edge.append(atomic_id)

    # Protection from long range mergers
    chunk_coord_delta = cg.get_chunk_coordinates(
        atomic_edge[0]) - cg.get_chunk_coordinates(atomic_edge[1])

    if np.any(np.abs(chunk_coord_delta) > 3):
        raise cg_exceptions.BadRequest(
            "Chebyshev distance between merge points exceeded allowed maximum "
            "(3 chunks).")

    try:
        ret = cg.add_edges(
            user_id=user_id,
            atomic_edges=np.array(atomic_edge, dtype=np.uint64),
            source_coord=coords[:1],
            sink_coord=coords[1:],
        )

    except cg_exceptions.LockingError as e:
        raise cg_exceptions.InternalServerError(
            "Could not acquire root lock for merge operation.")
    except cg_exceptions.PreconditionError as e:
        raise cg_exceptions.BadRequest(str(e))

    if ret.new_root_ids is None:
        raise cg_exceptions.InternalServerError("Could not merge selected "
                                                "supervoxel.")

    current_app.logger.debug(("lvl2_nodes:", ret.new_lvl2_ids))

    if len(ret.new_lvl2_ids) > 0:
        trigger_remesh(table_id, ret.new_lvl2_ids, is_priority=is_priority)

    return ret