def test_read_and_write_and_read():
    for i in range(0, len(INPUT_FILES)):
        input_file = INPUT_FILES[i]
        output_file = OUTPUT_FILES[i]
        first = wknml.parse_nml(input_file)
        with open(output_file, 'wb') as f:
            wknml.write_nml(f, first)
        second = wknml.parse_nml(output_file)
        assert first == second
Ejemplo n.º 2
0
def save_snapshot_pickle_read_and_write():
    for i in range(0, len(INPUT_FILES)):
        input_file = INPUT_FILES[i]
        output_file = SNAPSHOT_FILES[i]
        parsed = wknml.parse_nml(input_file)
        with open(output_file + '.pickle', 'wb') as f:
            pickle.dump(parsed, f)
Ejemplo n.º 3
0
def save_snapshot_nml_read_and_write():
    for i in range(0, len(INPUT_FILES)):
        input_file = INPUT_FILES[i]
        output_file = OUTPUT_FILES[i]
        parsed = wknml.parse_nml(input_file)
        with open(output_file + '.snapshot', 'wb') as f:
            wknml.write_nml(f, parsed)
Ejemplo n.º 4
0
def calc_supervoxel_eftpl(nml_filepath, h5_filepath, dset_vals=None, dset_folder='with_background', dset_suffix='labels',
    size=None, dset_start=np.zeros((1,3), dtype=int), 
    ):

    with open(nml_filepath, "rb") as f:
        nml = wknml.parse_nml(f)

    if dset_vals is None:
        with h5py.File(h5_filepath, 'r') as h5file:
            dset = h5file[dset_folder]
            dset_vals = [str(k) for k in dset.keys()]

    eftpl = np.zeros(len(dset_vals))

    h5_dim_idcs = [HDF5_DIM_ORDER.index(ax) for ax in 'xyz']

    selection = None
    if size is not None:
        selection = tuple(slice(dset_start[ax],dset_start[ax]+size[ax]) for ax in h5_dim_idcs)

    for i, dset_val in enumerate(dset_vals):
        with h5py.File(h5_filepath, 'r') as h5file:
            dset = h5file['/'.join([dset_folder, dset_val, dset_suffix])]
            if size is None:
                Vlbls = dset[:]
            else:
                Vlbls = np.zeros(tuple(size[ax] for ax in h5_dim_idcs), dtype=dset.dtype)
                dset.read_direct(Vlbls, selection)
        eftpl[i] = calc_eftpl(nml, Vlbls, dset_start)

    return eftpl, dset_vals
Ejemplo n.º 5
0
    def __init__(self,
                 nml_path: str = None,
                 parameters: Parameters = None,
                 strict=True):
        """ The Skeleton constructor expects either a path to a nml file or a Parameters object as input arguments

        Args:
            nml_path: Path to nml file. If constructed via an nml file, the skeleton object is populated with all the
                trees and additional properties specified in the .nml file
            parameters (optional): Parameters (wkskel.types.Parameters) specifying the most rudimentary properties
                 of the skeleton.
            strict (optional): Controls assertions ensuring that resulting skeleton objects are compatible with
                webKnossos. Default: True

        Examples:
            Using nml_path:
                nml_path = '/path/to/example.nml'
                skel = Skeleton(nml_path)

            Using parameters:
                parameters = Skeleton.define_parameters(name="2017-01-12_FD0156-2", scale=(11.24, 11.24, 32))
                skel = Skeleton(parameters=parameters)
        """

        assert (nml_path is not None) ^ (parameters is not None), \
            'To construct a skeleton object, either a path to a nml file or the skeleton parameters need to passed'

        self.nodes = list()
        self.edges = list()
        self.names = list()
        self.colors = list()
        self.tree_ids = list()
        self.group_ids = list()
        self.groups = list()
        self.branchpoints = list()
        self.parameters = Parameters()
        self.nml_path = str()

        self.strict = strict
        self.defaults = self.DEFAULTS

        # Construct from nml file
        if nml_path is not None:
            assert os.path.exists(nml_path), \
                'not a valid path: {}'.format(nml_path)
            try:
                with open(nml_path, "rb") as f:
                    nml = wknml.parse_nml(f)
            except IOError:
                print('not a valid nml file: {}'.format(nml_path))

            self._nml_to_skeleton(nml)

        # Construct from parameters
        else:
            assert type(parameters) is Parameters, \
                'provided parameters must be of type wkskel.types.Parameters'

            self._parameters_to_skeleton(parameters)
def test_snapshot_read_and_compare_nml():
    for i in range(0, len(INPUT_FILES)):
        input_file = INPUT_FILES[i]
        snapshot_file = SNAPSHOT_FILES[i]
        output_file = OUTPUT_FILES[i]
        parsed = wknml.parse_nml(input_file)
        with open(output_file, 'wb') as f:
            wknml.write_nml(f, parsed)
        assert filecmp.cmp(snapshot_file + '.snapshot', output_file)
def test_snapshot_read_and_compare_pickle():
    for i in range(0, len(INPUT_FILES)):
        input_file = INPUT_FILES[i]
        snapshot_file = SNAPSHOT_FILES[i]
        output_file = OUTPUT_FILES[i]
        parsed = wknml.parse_nml(input_file)
        with open(output_file + '.pickle.cmp', 'wb') as f:
            pickle.dump(parsed, f)
        assert filecmp.cmp(snapshot_file + '.pickle', output_file + '.pickle.cmp')
Ejemplo n.º 8
0
def test_generate_nml():
    with open("testdata/nml_with_invalid_ids.nml", "r") as file:
        test_nml = parse_nml(file)

    (graph, parameter_dict) = generate_graph(test_nml)
    test_result_nml = generate_nml(tree_dict=graph, parameters=parameter_dict)

    with open("testdata/expected_result.nml", "r") as file:
        expected_nml = parse_nml(file)

    # need to save and load the test_result_nml since reading applies default values
    # thus this is needed to be able to compare the nmls
    with open("testoutput/temp.nml", "wb") as file:
        write_nml(file=file, nml=test_result_nml)

    with open("testoutput/temp.nml", "r") as file:
        test_result_nml = parse_nml(file)

    assert test_result_nml == expected_nml
Ejemplo n.º 9
0
def test_no_default_values_written():
    input_file_name = "testdata/nml_without_default_values.nml"
    output_file_name = "testoutput/nml_without_default_values.nml"
    # read and write the test file
    with open(input_file_name, "r") as file:
        test_nml = parse_nml(file)
        with open(output_file_name, "wb") as output_file:
            write_nml(file=output_file, nml=test_nml)

    # read the written testfile and compare the content
    with open(input_file_name, "r") as file:
        test_nml = parse_nml(file)
        with open(output_file_name, "r") as output_file:
            test_result_nml = parse_nml(output_file)

            assert test_nml == test_result_nml, "The testdata file and the testoutput file do not have the same content."

    # test if both files have the same content
    assert filecmp.cmp(
        input_file_name, output_file_name
    ), "The testdata and the testoutput file do not have the same content."
Ejemplo n.º 10
0
def load_and_save_merged_nml_trees(source: str, destination: str, scale):

  assert os.path.isfile(source), "No file was provided as source."
  assert os.path.exists(os.path.dirname(os.path.realpath(destination))), "The destination directory does not exists."

  logger.info("Reading data")
  with open(source, "rb") as f:
    nml = parse_nml(f)

  logger.info("Starting to merge tree groups")
  merged_nml = create_merged_nml(nml, scale)

  logger.info("Writing data")
  with open(destination, "wb") as f:
    write_nml(f, merged_nml)

  logger.info("Done")
Ejemplo n.º 11
0
def test_ensure_max_edge_length():
    with open("testdata/nml_with_too_long_edges.nml", "r") as file:
        test_nml = parse_nml(file)
    max_length = 2.0
    scale = np.array(test_nml.parameters.scale)

    test_nml_graph = generate_graph(test_nml)
    # test if the loaded graph violates the max_length
    assert not is_max_length_violated(test_nml_graph[0], max_length, scale)

    # test the graph version
    test_result_nml_graph, _ = ensure_max_edge_length(test_nml_graph,
                                                      max_length)
    assert is_max_length_violated(test_result_nml_graph, max_length, scale)

    # test the nml version
    test_result_nml = ensure_max_edge_length(test_nml, max_length)
    test_result_nml, _ = generate_graph(test_result_nml)
    assert is_max_length_violated(test_result_nml, max_length, scale)
Ejemplo n.º 12
0
def test_approximate_minimal_edge_length():
    with open("testdata/nml_with_small_distance_nodes.nml", "r") as file:
        test_nml = parse_nml(file)
    max_length = 2.0
    max_angle = 0.2
    scale = np.array(test_nml.parameters.scale)

    test_nml_graph = generate_graph(test_nml)
    # test if the loaded graph violates the max_length
    assert not is_minimal_edge_length_violated(test_nml_graph[0], max_length,
                                               max_angle, scale)

    # test the graph interface
    test_result_nml_graph, _ = approximate_minimal_edge_length(
        test_nml_graph, max_length, max_angle)
    assert is_minimal_edge_length_violated(test_result_nml_graph, max_length,
                                           max_angle, scale)

    # test the graph interface
    test_result_nml = approximate_minimal_edge_length(test_nml, max_length,
                                                      max_angle)
    test_result_nml, _ = generate_graph(test_result_nml)
    assert is_minimal_edge_length_violated(test_result_nml, max_length,
                                           max_angle, scale)
parser.add_argument(
    "--set_zero",
    action="store_true",
    help="Set non-marked segments to zero.",
)
parser.add_argument("input", help="Path to input WKW dataset")
parser.add_argument("--layer_name", "-l", help="Segmentation layer name", default="segmentation")
parser.add_argument("nml", help="Path to NML file")
parser.add_argument("output", help="Path to output tiff files")
args = parser.parse_args()

print("Merging merger mode annotations from {} and {}".format(args.input, args.nml))

# Collect equivalence classes from NML
with open(args.nml, "rb") as f:
  nml = wknml.parse_nml(f)

ds_in = wkw.Dataset.open(path.join(args.input, args.layer_name, "1"))
cube_size = ds_in.header.block_len * ds_in.header.file_len

equiv_classes = [
  set(ds_in.read(node.position, (1,1,1))[0,0,0,0] for node in tree.nodes)
    for tree in nml.trees
]

equiv_map = {}
for klass in equiv_classes:
  base = next(iter(klass))
  for id in klass:
    equiv_map[id] = base
Ejemplo n.º 14
0
def flatten(l):
    return [x for y in l for x in y]


def find(pred, l):
    return next(x for x in l if pred(x))


parser = ArgumentParser(
    description="Splits trees in order to fix unlinked nodes.")
parser.add_argument("source", help="Source NML file")
parser.add_argument("target", help="Target NML file")
args = parser.parse_args()

file = wknml.parse_nml(ET.parse(args.source).getroot())

all_nodes = flatten([t.nodes for t in file.trees])
all_edges = flatten([t.edges for t in file.trees])

all_node_ids = [n.id for n in all_nodes]
max_node_id = max(all_node_ids) + 1
print("trees={} nodes={} edges={} max_node={}".format(len(file.trees),
                                                      len(all_nodes),
                                                      len(all_edges),
                                                      max_node_id))

mat = scipy.sparse.lil_matrix((max_node_id, max_node_id))
for edge in all_edges:
    mat[edge.source, edge.target] = 1