def forwards(apps, schema_editor):
    """Make sure all required class and relations are existing for all
    projects.  We can't use the regular model classes, but have to get
    them through the migration system.
    """
    from catmaid.control.project import validate_project_setup

    Class = apps.get_model('catmaid', 'Class')
    Project = apps.get_model('catmaid', 'Project')
    Relation = apps.get_model('catmaid', 'Relation')
    User = apps.get_model('auth', 'User')
    ClientDatastore = apps.get_model('catmaid', 'ClientDatastore')
    Volume = apps.get_model('catmaid', 'Volume')

    projects = Project.objects.all()
    # If there are no projects, don't continue, because there is nothing to
    # migrate.
    if 0 == len(projects) or 0 == User.objects.count():
        return

    try:
        system_user = get_system_user(User)
        for p in projects:
            validate_project_setup(p.id, system_user.id, True, Class, Relation, ClientDatastore)
    except ImproperlyConfigured as e:
        if Volume.objects.count() > 0:
            logger.warn("Couldn't find a configured system user and will therefore "
                    "skip a configuration update of all existing projects. This is "
                    "okay during the initial setup of a CATMAID database. In this "
                    "case nothing needs to be done. Otherwise, please run "
                    "`manage.py catmaid_update_project_configuration` manually "
                    "after this migration call is finished and rerun this migration.")
            raise e
Example #2
0
def setup_tracing(project_id, user=None):
    """ Tests which of the needed classes and relations is missing
    from the project's semantic space and adds those.
    """
    if not user:
        user = get_system_user()
    # Remember available classes
    available_classes = {}

    # Add missing classes
    for c in needed_classes:
        class_object, _ = Class.objects.get_or_create(class_name=c,
                                                      project_id=project_id,
                                                      defaults={
                                                          'user':
                                                          user,
                                                          'description':
                                                          needed_classes[c]
                                                      })
        available_classes[c] = class_object
    # Add missing relations
    for r in needed_relations:
        defaults = {
            'user': user,
        }
        data_type = type(needed_relations[r])
        if data_type in six.string_types or data_type == six.text_type:
            defaults['description'] = needed_relations[r]
        else:
            defaults.update(needed_relations[r])

        Relation.objects.get_or_create(relation_name=r,
                                       project_id=project_id,
                                       defaults=defaults)
    # Add missing sampler states
    for sn, sd in six.iteritems(needed_sampler_states):
        SamplerState.objects.get_or_create(name=sn,
                                           defaults={'description': sd})
    # Add missing sampler interval states
    for sn, sd in six.iteritems(needed_sampler_interval_states):
        SamplerIntervalState.objects.get_or_create(
            name=sn, defaults={'description': sd})
    # Add missing sampler domain types
    for sn, sd in six.iteritems(needed_sampler_domain_types):
        SamplerDomainType.objects.get_or_create(name=sn,
                                                defaults={'description': sd})
    # Add missing sampler connector states
    for sn, sd in six.iteritems(needed_sampler_connector_states):
        SamplerConnectorState.objects.get_or_create(
            name=sn, defaults={'description': sd})

    # Add root class instance
    ClassInstance.objects.get_or_create(class_column=available_classes['root'],
                                        project_id=project_id,
                                        defaults={
                                            'user': user,
                                            'name': 'neuropile'
                                        })
Example #3
0
def setup_tracing(project_id, user=None):
    """ Tests which of the needed classes and relations is missing
    from the project's semantic space and adds those.
    """
    if not user:
        user = get_system_user()
    # Remember available classes
    available_classes = {}

    # Add missing classes
    for c in needed_classes:
        class_object, _ = Class.objects.get_or_create(
            class_name=c,
            project_id=project_id,
            defaults={'user': user,
                      'description': needed_classes[c]})
        available_classes[c] = class_object
    # Add missing relations
    for r in needed_relations:
        defaults = {
            'user': user,
        }
        data_type = type(needed_relations[r])
        if data_type in six.string_types or data_type == six.text_type:
            defaults['description'] = needed_relations[r]
        else:
            defaults.update(needed_relations[r])

        Relation.objects.get_or_create(
            relation_name=r,
            project_id=project_id,
            defaults=defaults)
    # Add missing sampler states
    for sn, sd in six.iteritems(needed_sampler_states):
        SamplerState.objects.get_or_create(
            name=sn, defaults={'description': sd})
    # Add missing sampler interval states
    for sn, sd in six.iteritems(needed_sampler_interval_states):
        SamplerIntervalState.objects.get_or_create(
            name=sn, defaults={'description': sd})
    # Add missing sampler domain types
    for sn, sd in six.iteritems(needed_sampler_domain_types):
        SamplerDomainType.objects.get_or_create(
            name=sn, defaults={'description': sd})
    # Add missing sampler connector states
    for sn, sd in six.iteritems(needed_sampler_connector_states):
        SamplerConnectorState.objects.get_or_create(
            name=sn, defaults={'description': sd})

    # Add root class instance
    ClassInstance.objects.get_or_create(
        class_column=available_classes['root'],
        project_id=project_id,
        defaults={'user': user,
                  'name': 'neuropile'})
    def handle(self, *args, **options):
        if options['user_id'] is None:
            user = get_system_user(User)
        else:
            user = User.objects.get(pk=options['user_id'])

        if options['project_id'] is None:
            projects = Project.objects.all()
        else:
            projects = Project.objects.filter(pk=options['projects_id'])

        for p in projects:
            validate_project_setup(p.id, user.id, True)
Example #5
0
    def handle(self, *args, **options):
        user = None
        user_id = pk=options['user_id']
        if user_id is not None:
            user = User.objects.get(pk=user_id)

        if not user:
            from catmaid.apps import get_system_user
            user = get_system_user()
            logger.info("Using system user account {} (ID: {})".format(user, user.id))

        # Set up tracing for the requested project
        setup_tracing(options['project_id'], user)
    def handle(self, *args, **options):
        set_log_level(logger, options.get('verbosity', 1))
        user = None
        user_id = pk = options['user_id']
        if user_id is not None:
            user = User.objects.get(pk=user_id)

        if not user:
            from catmaid.apps import get_system_user
            user = get_system_user()
            logger.info(f"Using system user account {user} (ID: {user.id})")

        # Set up tracing for the requested project
        setup_tracing(options['project_id'], user)
Example #7
0
def init_consistent_data():
    """Reset sequence counters and make sure all existing projects have all
    needed classes and relations.
    """
    cursor = connection.cursor()
    # Make sure all counters are set correctly
    cursor.execute("""
        SELECT setval('concept_id_seq', coalesce(max("id"), 1), max("id") IS NOT null) FROM concept;
    """)
    cursor.execute("""
        SELECT setval('location_id_seq', coalesce(max("id"), 1), max("id") IS NOT null) FROM location;
    """)

    user = get_system_user()
    for p in Project.objects.all():
        validate_project_setup(p.id, user.id, True)
Example #8
0
    def handle(self, *args, **options):
        if options['user_id'] is None:
            user = get_system_user(User)
        else:
            user = User.objects.get(pk=options['user_id'])

        if options['project_id'] is None:
            projects = Project.objects.all()
        else:
            projects = Project.objects.filter(pk=options['projects_id'])

        for p in projects:
            try:
                validate_project_setup(p.id, user.id, True)
                logger.info("Validated project {} (ID: {})".format(p, p.id))
            except Exception as e:
                logger.error("Could not validate project setup of project " +
                             "{} (ID: {}): {}".format(p, p.id, e))
    def handle(self, *args, **options):
        if options['user_id'] is None:
            user = get_system_user(User)
        else:
            user = User.objects.get(pk=options['user_id'])

        if options['project_id'] is None:
            projects = Project.objects.all()
        else:
            projects = Project.objects.filter(pk=options['projects_id'])

        for p in projects:
            try:
                validate_project_setup(p.id, user.id, True)
                logger.info("Validated project {} (ID: {})".format(p, p.id))
            except Exception as e:
                logger.error("Could not validate project setup of project " +
                        "{} (ID: {}): {}".format(p, p.id, e))
Example #10
0
    def handle(self, *args, **options):
        set_log_level(logger, options.get('verbosity', 1))
        if options['user_id'] is None:
            user = get_system_user(User)
        else:
            user = User.objects.get(pk=options['user_id'])

        if options['project_id'] is None:
            projects = Project.objects.all()
        else:
            projects = Project.objects.filter(pk=options['projects_id'])

        for p in projects:
            try:
                validate_project_setup(p.id, user.id, True)
                logger.info(f"Validated project {p} (ID: {p.id})")
            except Exception as e:
                logger.error("Could not validate project setup of project " + \
                             f"{p} (ID: {p.id}): {e}")
def forwards(apps, schema_editor):
    """Make sure all required class and relations are existing for all
    projects.  We can't use the regular model classes, but have to get
    them through the migration system.
    """
    from catmaid.control.project import validate_project_setup

    Class = apps.get_model('catmaid', 'Class')
    Project = apps.get_model('catmaid', 'Project')
    Relation = apps.get_model('catmaid', 'Relation')
    User = apps.get_model('auth', 'User')

    projects = Project.objects.all()
    # If there are no projects, don't continue, because there is nothing to
    # migrate.
    if 0 == len(projects) or 0 == User.objects.count():
        return

    system_user = get_system_user(User)
    for p in projects:
        validate_project_setup(p.id, system_user.id, True, Class, Relation)
Example #12
0
def forwards(apps, schema_editor):
    """Make sure all required class and relations are existing for all
    projects.  We can't use the regular model classes, but have to get
    them through the migration system.
    """
    from catmaid.control.project import validate_project_setup

    Class = apps.get_model('catmaid', 'Class')
    Project = apps.get_model('catmaid', 'Project')
    Relation = apps.get_model('catmaid', 'Relation')
    User = apps.get_model('auth', 'User')

    projects = Project.objects.all()
    # If there are no projects, don't continue, because there is nothing to
    # migrate.
    if 0 == len(projects) or 0 == User.objects.count():
        return

    system_user = get_system_user(User)
    for p in projects:
        validate_project_setup(p.id, system_user.id, True, Class, Relation)
Example #13
0
def forwards(apps, schema_editor):
    """Make sure all required class and relations are existing for all
    projects.  We can't use the regular model classes, but have to get
    them through the migration system.
    """
    from catmaid.control.project import validate_project_setup

    Class = apps.get_model('catmaid', 'Class')
    Project = apps.get_model('catmaid', 'Project')
    Relation = apps.get_model('catmaid', 'Relation')
    User = apps.get_model('auth', 'User')
    ClientDatastore = apps.get_model('catmaid', 'ClientDatastore')
    Volume = apps.get_model('catmaid', 'Volume')

    projects = Project.objects.all()
    # If there are no projects, don't continue, because there is nothing to
    # migrate.
    if 0 == len(projects) or 0 == User.objects.count():
        return

    try:
        system_user = get_system_user(User)
        for p in projects:
            validate_project_setup(p.id, system_user.id, True, Class, Relation,
                                   ClientDatastore)
    except ImproperlyConfigured as e:
        if Volume.objects.count() > 0:
            logger.warn(
                "Couldn't find a configured system user and will therefore "
                "skip a configuration update of all existing projects. This is "
                "okay during the initial setup of a CATMAID database. In this "
                "case nothing needs to be done. Otherwise, please run "
                "`manage.py catmaid_update_project_configuration` manually "
                "after this migration call is finished and rerun this migration."
            )
            raise e
Example #14
0
    def handle(self, *args, **options):
        ignore_same_name_projects = options['ignore_same_name_projects']
        ignore_same_name_stacks = options['ignore_same_name_stacks']
        ignore_empty_projects = options['ignore_empty_projects']
        project_id = options['project_id']
        default_tile_width = options['default_tile_width']
        default_tile_height = options['default_tile_height']
        default_tile_source_type = options['default_tile_source_type']
        remove_unref_stack_data = options['remove_unref_stack_data']
        image_base = options['image_base']

        if ignore_same_name_projects:
            logger.info("Ignoring all loaded projects that have same name as "
                    "existing projects")

        if ignore_same_name_stacks:
            logger.info("Ignoring all loaded stacks that have same name as "
                    "existing stacks")

        # Parse permissions
        permissions:List = []
        for p in map(lambda x: x.split(':'), options['permissions']):
            if len(p) != 3:
                raise CommandError('Invalid permission format, expected: type:name:permission')
            p_type, obj_name, p_name = p[0].lower(), p[1], p[2]

            if p_type == 'user':
                target = User.objects.get(username=obj_name)
            elif p_type == 'group':
                target = Group.objects.get(groupname=obj_name)
            else:
                raise CommandError(f'Unknown permission target type: {p_type}')

            logger.info(f'Setting {p_name} permissions for {p_type} {obj_name}')
            permissions.append((target, p_name))

        # This will read from either stdin or a provided text file
        if options['input'].isatty():
            raise CommandError('Please provide either the --input argument '
                    'with a file path or provide data on stdin.')
        input_data = options['input'].read()
        options['input'].close()

        project_configs = json.loads(input_data)

        pre_projects:List = []
        for project_config in project_configs:
            title = project_config['project']['title']
            if ignore_same_name_projects and \
                    Project.objects.filter(title=title).count() > 0:
                logger.info(f"Skipping project {title}, a project with the same name exists alrady")
                continue
            logger.info(f"Parsing project {title}")
            pre_project = PreProject(project_config, image_base, None)
            stacks_to_remove = []
            for pre_stack in pre_project.stacks:
                if Stack.objects.filter(title=pre_stack.title).count() > 0:
                    stacks_to_remove.append(pre_stack)
            if stacks_to_remove:
                stack_titles = ', '.join(map(lambda x: x.title, stacks_to_remove))
                logger.info(f"Skipping stacks {stack_titles} in project {title}, "
                        "because stacks with these names exist alrady")
                for stack_to_remove in stacks_to_remove:
                    pre_project.stacks.remove(stack_to_remove)


            if ignore_empty_projects and not pre_project.stacks:
                logger.info(f"Skipping project {title}, because it has no stacks")
                continue

            pre_projects.append(pre_project)

        tags:List = []
        cls_graph_ids_to_link:List = []
        user = get_system_user()

        logger.info(f'Importing {len(pre_projects)} projects')
        imported, not_imported = import_projects(user,
            pre_projects, tags, permissions, default_tile_width,
            default_tile_height, default_tile_source_type,
            cls_graph_ids_to_link, remove_unref_stack_data)
        logger.info(f'Imported {len(imported)} projects')

        if not_imported:
            logger.info("Encountered the following problems during import:\n" +
                    '\n'.join(map(lambda x: f'{x[0]}: {x[1]}', not_imported)))
def import_meshes(apps, schema_editor):
    """Look for HDF5 meshes in the settings specified HDF5 folder. If any are
    found, import them as PostGIS volumes.
    """

    Project = apps.get_model("catmaid", "Project")
    Stack = apps.get_model("catmaid", "Stack")

    user = None

    for project in Project.objects.all():
        for stack in Stack.objects.all():
            meshes = get_mesh(project.id, stack.id)
            if not meshes:
                continue

            if not user:
                # Lazy load system user
                user = get_system_user()

            # The returned dictionary maps mesh names to a mesh representation
            for mesh_name, mesh_data in meshes.items():
                vertices = []
                input_vertices = mesh_data['vertices']
                i = 0
                while i < len(input_vertices):
                    vertices.append([
                        input_vertices[i], input_vertices[i + 1],
                        input_vertices[i + 2]
                    ])
                    i = i + 3
                triangles = []
                input_faces = mesh_data['faces']
                i = 0
                while i < len(input_faces):
                    face_type = input_faces[i]
                    if 0 == face_type:
                        triangles.append([
                            input_faces[i + 1], input_faces[i + 2],
                            input_faces[i + 3]
                        ])
                        i = i + 4
                    elif 1 == face_type:
                        triangles.append([
                            input_faces[i + 1], input_faces[i + 2],
                            input_faces[i + 3]
                        ])
                        triangles.append([
                            input_faces[i + 3], input_faces[i + 4],
                            input_faces[i + 1]
                        ])
                        i = i + 5
                    else:
                        raise ValueError(
                            "Can't migrate HDF5 mesh {}_{}.hdf, "
                            "because it contains faces different from "
                            "triangles or quads (type {}).".format(
                                project.id, stack.id, i))
                params = {
                    'type':
                    'trimesh',
                    'title':
                    mesh_name,
                    'comment':
                    'Imported HDF5 mesh ' +
                    '(project: {} stack: {})'.format(project.id, stack.id),
                    'mesh': [vertices, triangles]
                }
                instance = get_volume_instance(project.id, user.id, params)
                volume_id = instance.save()
Example #16
0
    def handle(self, *args, **options):
        try:
            project = Project.objects.get(id=options['project_id'])
        except Exception as e:
            raise CommandError(e)

        global log
        log = lambda x: self.stdout.write(x)

        self.check_env(options)

        #  set JVM options
        if options.get('java_heap'):
            import jnius_config
            jnius_config.add_options(f'-Xmx{options.get("java_heap")}')
            log(f'Setting JVM heap to {options.get("java_heap")}')

        if options.get('imagej'):
            ij = imagej.init(options['imagej'])
        else:
            ij = imagej.init()
        log(f'ImageJ version: {ij.getInfo(True)}')

        # Needs to happen after imagej.init()
        Java.init()
        MPICBG.init()

        editor_username = options['user']
        if editor_username:
            editor = User.objects.get(username=editor_username)
        else:
            editor = get_system_user()
        log(f'Making edits with user {editor}')

        if options['skeleton_ids']:
            skeleton_ids = list(map(lambda x: int(x.strip()), options['skeleton_ids'].split(',')))
        else:
            skeleton_ids = []

        if options['layers']:
            layers = list(map(lambda x: int(x.strip()), options['layers'].split(',')))
        else:
            layers = []

        post_mapping = []
        if options['post_mapping']:
            for mapping in options['post_mapping'].replace(' ','').split('),('):
                point_tokens = mapping.split(')=(')
                if len(point_tokens) != 2:
                    raise CommandError(f'Need exactly two points to a point match: {point_tokens}')
                point_a = tuple(map(float, point_tokens[0].replace('(', '').replace(')', '').split(',')))
                point_b = tuple(map(float, point_tokens[1].replace('(', '').replace(')', '').split(',')))
                post_mapping.append((point_a, point_b))

            log(f'Found {len(post_mapping)} post mapping point matches')

        transformer = CoordTransformer(options['project_id'], options['xml'],
                res_x=options['res_x'], res_y=options['res_y'], res_z=options['res_z'], editor=editor,
                review_reset_distance=options['review_reset_distance'], offset_x=options['offset_x'],
                offset_y=options['offset_y'], skeleton_ids=skeleton_ids, layers=layers,
                post_mapping=post_mapping, offset_min_z=options['offset_min_z'],
                offset_max_z=options['offset_max_z'])

        self.stdout.write("Found the following layers:")
        for layer in transformer.layers:
            self.stdout.write(str(layer))

        try:
            with transaction.atomic():
                log('Starting transformation')
                transformer.transform()
                # Add log entry
                add_log_entry(editor.id, 'admin.transform_node_location', transformer.project_id)
        except Exception as e:
            traceback.print_exc()
Example #17
0
    def reset_ids(self, target_classes, import_objects,
            import_objects_by_type_and_id, existing_classes,
            map_treenodes=True, save=True):
        """Reset the ID of each import object to None so that a new object will
        be created when the object is saved. At the same time an index is
        created that allows per-type lookups of foreign key fields
        """
        logger.info("Building foreign key update index")
        # Build index for foreign key fields in models. For each type, map
        # each foreign key name to a model class.
        fk_index = defaultdict(dict) # type: DefaultDict[Any, Dict]
        for c in target_classes:
            class_index = fk_index[c]
            foreign_key_fields = [
                    f for f in c._meta.get_fields()
                    if f.is_relation
                    and f.many_to_one # ForeignKey instances
                    #if field.get_internal_type() == 'ForeignKey':
                    and f.related_model in target_classes
            ]

            for field in foreign_key_fields:
                # Get the database column name for this field
                class_index[field.attname] = field.related_model

        logger.info("Updating foreign keys to imported objects with new IDs")
        all_classes = dict() # type: Dict
        all_classes.update(existing_classes)
        updated_fk_ids = 0
        unchanged_fk_ids = 0
        explicitly_created_summaries = 0
        other_tasks = set(import_objects.keys()) - set(ordered_save_tasks)
        # Iterate objects to import and respect dependency order
        for object_type in ordered_save_tasks + list(other_tasks):
            objects = import_objects.get(object_type)
            if not objects:
                # No objects of this object type are imported
                continue
            fk_fields = fk_index[object_type]
            # No need to do rest if there are no foreign keys to change to begin
            # with.
            if len(fk_fields) == 0:
                continue

            imported_parent_nodes = []

            bar_prefix = "- {}: ".format(object_type.__name__)
            for deserialized_object in progressbar.progressbar(objects,
                    max_value=len(objects), redirect_stdout=True,
                    prefix=bar_prefix):
                obj = deserialized_object.object
                obj_type = type(obj)
                for fk_field, fk_type in fk_fields.items():
                    # Get import object with the former ID referenced in
                    # this field.
                    current_ref = getattr(obj, fk_field)

                    # Only attempt a mapping if the foreign key isn't NULL
                    if current_ref:
                        # Get updated model objects of the referenced type
                        imported_objects_by_id = import_objects_by_type_and_id[fk_type]
                        ref_obj = imported_objects_by_id.get(current_ref)

                        if ref_obj:
                            # Update foreign key reference to ID of newly saved
                            # object. Only for treenodes this is expected to result
                            # in not yet available data
                            if object_type == Treenode and fk_type == Treenode:
                                imported_parent_nodes.append((obj, current_ref))
                            elif ref_obj.id is None:
                                raise ValueError("The referenced {} object '{}' with import ID {} wasn't stored yet".format(
                                        fk_type, str(ref_obj), current_ref))
                            setattr(obj, fk_field, ref_obj.id)
                            updated_fk_ids += 1
                        else:
                            unchanged_fk_ids += 1

                # Save objects if they should either be imported or have change
                # foreign key fields
                if save and (updated_fk_ids or obj.id is None):
                    obj.save()

            # Treenodes are special, because they can reference themselves. They
            # need therefore a second iteration of reference updates after all
            # treenodes have been saved and new IDs are available.
            if map_treenodes and object_type == Treenode:
                logger.info('Mapping parent IDs of treenodes to imported data')
                imported_objects_by_id = import_objects_by_type_and_id[Treenode]
                for obj, parent_id in progressbar.progressbar(imported_parent_nodes,
                        max_value=len(imported_parent_nodes),
                        redirect_stdout=True, prefix="- Mapping parent treenodes: "):
                    new_parent = imported_objects_by_id.get(parent_id)
                    if not new_parent:
                        raise ValueError("Could not find imported treenode {}".format(parent_id))
                    obj.parent_id = new_parent.id
                    if save:
                        obj.save()

            # Update list of known classes after new classes have been saved
            if object_type == Class:
                for deserialized_object in objects:
                    obj = deserialized_object.object
                    all_classes[obj.class_name] = obj.id

            # If skeleton class instances are created, make sure the skeleton
            # summary table entries for the respective skeletons are there.
            # Otherwise the ON CONFLICT claues of the summary update updates can
            # be called multiple times. The alternative is to disable the
            # trigger during import.
            pre_create_summaries = False
            if object_type == ClassInstance and pre_create_summaries:
                last_editor = get_system_user()
                skeleton_class_id = all_classes.get('skeleton')
                for deserialized_object in objects:
                    obj = deserialized_object.object
                    if obj.class_column_id == skeleton_class_id:
                        r = SkeletonSummary.objects.get_or_create(project=self.target,
                                skeleton_id=obj.id, defaults={'last_editor': last_editor})
                        explicitly_created_summaries += 1

        logger.info("".join(["{} foreign key references updated, {} did not ",
                "require change, {} skeleton summaries were created"]).format(
                updated_fk_ids, unchanged_fk_ids, explicitly_created_summaries))
Example #18
0
    def __init__(self, project_id, target_xml, res_x, res_y, res_z, editor=None,
            review_reset_distance=None, offset_x=0.0, offset_y=0.0,
            skeleton_ids=[], layers=[], post_mapping=[], offset_min_z=None,
            offset_max_z=None):
        log('Initializing coordinate transformer')
        self.project_id = project_id
        self.xml = target_xml
        self.res_x = res_x
        self.res_y = res_y
        self.res_z = res_z
        self.last_editor = editor or get_system_user()
        self.review_reset_distance = review_reset_distance
        self.offset_x = offset_x
        self.offset_y = offset_y
        self.offset_min_z = offset_min_z
        self.offset_max_z = offset_max_z
        self.skeleton_ids = skeleton_ids
        self.layers_to_transform = set(layers or [])
        self.post_mapping_point_matches = post_mapping

        # If we wanted to use TrakEM2 data structures directly (inconvenient for
        # debugging and seems slower overall):
        # self.project = TrakEM2.Project.openFSProject(Java.String(self.xml), False) # type: ignore
        # log(f'Opened project from file ({self.xml}):')
        # log(self.project.getInfo())

        # Parse target XML file to find transformation for each section.
        target_data = xml.etree.ElementTree.parse(self.xml)
        if not target_data:
            raise ValueError("Could not parse target XML")
        target_data_root = target_data.getroot()
        if target_data_root.tag != 'trakem2':
            raise ValueError("This doesn't look like a TrakEM2 XML file, could not find trakem2 root")

        # Get first available layer set
        self.layers = []
        target_data_layerset = target_data_root.find('t2_layer_set')
        if target_data_layerset:
            if self.layers_to_transform:
                log(f'Loading {len(self.layers_to_transform)} layer(s)')
            else:
                log('Loading all layers')
            for n, layer_data in enumerate(progressbar(target_data_layerset.findall('t2_layer'))):
                if self.layers_to_transform and n not in self.layers_to_transform:
                    continue

                layer_z = float(layer_data.attrib['z'])
                offset_disabled = (offset_min_z and (layer_z - offset_min_z) < -0.00001) \
                    or (offset_max_z and (layer_z - offset_max_z) > 0.00001)

                if offset_disabled:
                    offset_x, offset_y = 0.0, 0.0
                else:
                    offset_x, offset_y = self.offset_x, self.offset_y

                layer = TrakEM2Layer(layer_data, res_x, res_y, res_z, offset_x, offset_y)
                self.layers.append(layer)

        # Sort layers by Z
        sorted(self.layers, key=lambda x: x.z_start)

        # If there are post processing point matches, precompute transformer.
        self.post_transformer = None
        if self.post_mapping_point_matches:
            self.post_transformer = Transformer(
                    list(map(lambda x: x[0], self.post_mapping_point_matches)),
                    list(map(lambda x: x[1], self.post_mapping_point_matches)),
            )
            log(f'Setting up post-transformer: {self.post_transformer}, test '
                f'transform of (0,0): {self.post_transformer.transform([[0,0]])[0]}')
def import_meshes(apps, schema_editor):
    """Look for HDF5 meshes in the settings specified HDF5 folder. If any are
    found, import them as PostGIS volumes.
    """

    Project = apps.get_model("catmaid", "Project")
    Stack = apps.get_model("catmaid", "Stack")

    user = None

    for project in Project.objects.all():
        for stack in Stack.objects.all():
            meshes = get_mesh(project.id, stack.id)
            if not meshes:
                continue

            if not user:
                # Lazy load system user
                user = get_system_user()

            # The returned dictionary maps mesh names to a mesh representation
            for mesh_name, mesh_data in six.iteritems(meshes):
                vertices = []
                input_vertices = mesh_data['vertices']
                i = 0
                while i < len(input_vertices):
                    vertices.append([
                        input_vertices[i],
                        input_vertices[i+1],
                        input_vertices[i+2]
                    ])
                    i = i + 3
                triangles = []
                input_faces = mesh_data['faces']
                i = 0
                while i < len(input_faces):
                    face_type = input_faces[i]
                    if 0 == face_type:
                        triangles.append([
                            input_faces[i+1],
                            input_faces[i+2],
                            input_faces[i+3]
                        ])
                        i = i + 4
                    elif 1 == face_type:
                        triangles.append([
                            input_faces[i+1],
                            input_faces[i+2],
                            input_faces[i+3]
                        ])
                        triangles.append([
                            input_faces[i+3],
                            input_faces[i+4],
                            input_faces[i+1]
                        ])
                        i = i + 5
                    else:
                        raise ValueError("Can't migrate HDF5 mesh {}_{}.hdf, "
                                "because it contains faces different from "
                                "triangles or quads (type {}).".format(
                                project.id, stack.id, i))
                params = {
                    'type': 'trimesh',
                    'title': mesh_name,
                    'comment': 'Imported HDF5 mesh ' +
                        '(project: {} stack: {})'.format(project.id, stack.id),
                    'mesh': [
                        vertices,
                        triangles
                    ]
                }
                instance = get_volume_instance(project.id, user.id, params)
                volume_id = instance.save()