Exemple #1
0
def fork(request:HttpRequest, project_id) -> JsonResponse:
    """Attempt to create a new project based on the passed in project ID.
    ---
    parameters:
    - name: name
      description: Name of new project
      required: true
      type: string
    """
    name = request.POST.get('name')
    if not name:
        raise ValueError('Need new project name')

    current_p = get_object_or_404(Project, pk=project_id)
    new_p = get_object_or_404(Project, pk=project_id)

    new_p.id = None
    new_p.title = name
    new_p.save()

    # Copy all project-stack links
    ps_links = ProjectStack.objects.filter(project=current_p)
    for ps in ps_links:
        ps.id = None
        ps.project = new_p
        ps.save()

    # Assign read/write/import permissions for new fork
    assign_perm('can_browse', request.user, new_p)
    assign_perm('can_annotate', request.user, new_p)
    assign_perm('can_import', request.user, new_p)

    # Creat basic classes and relations
    validate_project_setup(new_p.id, request.user.id, fix=True)

    # If the source project is a tracing project, make the clone as well one.
    # A local import is used here to avoid a high potential for circular imports.
    from catmaid.control.tracing import check_tracing_setup, setup_tracing
    if check_tracing_setup(project_id):
        setup_tracing(new_p.id)

    return JsonResponse({
        'new_project_id': new_p.id,
        'n_copied_stack_links': len(ps_links),
    })
Exemple #2
0
    def collect_data(self):
        self.to_serialize = []

        classes = dict(Class.objects.filter(
                project=self.project).values_list('class_name', 'id'))
        relations = dict(Relation.objects.filter(
                project=self.project).values_list('relation_name', 'id'))

        if not check_tracing_setup(self.project.id, classes, relations):
            raise ValueError("Project with ID %s is no tracing project." % self.project.id)

        skeleton_id_constraints = None
        entities = ClassInstance.objects.filter(project=self.project,
                class_column__in=[classes['neuron']])
        skeleton_links = ClassInstanceClassInstance.objects.filter(
                project_id=self.project.id, relation=relations['model_of'],
                class_instance_a__class_column=classes['skeleton'])
        skeletons = ClassInstance.objects.filter(project=self.project,
                class_column__in=[classes['skeleton']])

        if self.required_annotations:
            # Get mapping from annotations to IDs
            a_to_id = dict(ClassInstance.objects.filter(
                    project=self.project, class_column=classes['annotation'],
                    name__in=self.required_annotations).values_list('name', 'id'))
            print("Found entities with the following annotations: %s" % \
                  ", ".join(a_to_id.keys()))

            b_ids = list(six.itervalues(a_to_id))
            entities = ClassInstance.objects.filter(project=self.project,
                class_column=classes['neuron'],
                cici_via_a__relation_id=relations['annotated_with'],
                cici_via_a__class_instance_b_id__in=b_ids)

            # Get the corresponding skeleton IDs
            skeleton_links = ClassInstanceClassInstance.objects.filter(
                    project_id=self.project.id, relation=relations['model_of'],
                    class_instance_a__class_column=classes['skeleton'],
                    class_instance_b__in=entities)
            skeleton_id_constraints = set(skeleton_links.values_list(
                    'class_instance_a', flat=True))
            skeletons = ClassInstance.objects.filter(project=self.project,
                    id__in=skeleton_id_constraints)

        print("Will export %s entities" % entities.count())

        # Export classes and relations
        self.to_serialize.append(Class.objects.filter(project=self.project))
        self.to_serialize.append(Relation.objects.filter(project=self.project))

        # Export skeleton-neuron links
        self.to_serialize.append(entities)
        self.to_serialize.append(skeleton_links)
        self.to_serialize.append(skeletons)

        if skeleton_id_constraints:
            # Export treenodes
            if self.export_treenodes:
                treenodes = Treenode.objects.filter(
                        project=self.project,
                        skeleton_id__in=skeleton_id_constraints)
                self.to_serialize.append(treenodes)

                exported_tids = set(treenodes.values_list('id', flat=True))
                print("Exporting %s treenodes" % len(exported_tids))

            # Export connectors and connector links
            if self.export_connectors:
                connector_links = TreenodeConnector.objects.filter(
                        project=self.project, skeleton_id__in=skeleton_id_constraints).values_list('id', 'connector', 'treenode')

                # Add matching connecots
                connector_ids = set(c for _,c,_ in connector_links)
                self.to_serialize.append(Connector.objects.filter(
                        id__in=connector_ids))
                print("Exporting %s connectors" % len(connector_ids))

                # Add matching connector links
                self.to_serialize.append(TreenodeConnector.objects.filter(
                        id__in=[l for l,_,_ in connector_links]))

                # Add addition placeholde treenodes
                connector_tids = set(TreenodeConnector.objects \
                    .filter(project=self.project, connector__in=connector_ids) \
                    .exclude(skeleton_id__in=skeleton_id_constraints) \
                    .values_list('treenode', flat=True))
                extra_tids = connector_tids - exported_tids
                print("Exporting %s placeholder nodes" % len(extra_tids))
                self.to_serialize.append(Treenode.objects.filter(id__in=extra_tids))

                # Add additional skeletons and neuron-skeleton links
                extra_skids = set(Treenode.objects.filter(id__in=extra_tids,
                        project=self.project).values_list('skeleton_id', flat=True))
                self.to_serialize.append(ClassInstance.objects.filter(id__in=extra_skids))

                extra_links = ClassInstanceClassInstance.objects \
                        .filter(project=self.project,
                                class_instance_a__in=extra_skids,
                                relation=relations['model_of'])
                self.to_serialize.append(extra_links)

                extra_nids = extra_links.values_list('class_instance_b', flat=True)
                self.to_serialize.append(ClassInstance.objects.filter(
                    project=self.project, id__in=extra_nids))

            # Export annotations and annotation-neuron links, liked to selected
            # entities.
            if self.export_annotations and 'annotated_with' in relations:
                annotation_links = ClassInstanceClassInstance.objects.filter(
                    project_id=self.project.id, relation=relations['annotated_with'],
                    class_instance_a__in=entities)
                annotations = ClassInstance.objects.filter(project_id=self.project.id,
                                                           cici_via_b__in=annotation_links)
                self.to_serialize.append(annotations)
                self.to_serialize.append(annotation_links)

            # TODO: Export reviews
        else:
            # Export treenodes
            if self.export_treenodes:
                if skeleton_id_constraints:
                    pass
                else:
                    self.to_serialize.append(Treenode.objects.filter(
                            project=self.project))

            # Export connectors and connector links
            if self.export_connectors:
                self.to_serialize.append(Connector.objects.filter(
                        project=self.project))
                self.to_serialize.append(TreenodeConnector.objects.filter(
                        project=self.project))

            # Export annotations and annotation-neuron links
            if self.export_annotations and 'annotated_with' in relations:
                annotation_links = ClassInstanceClassInstance.objects.filter(
                    project_id=self.project.id, relation=relations['annotated_with'],
                    class_instance_a__in=entities)
                annotations = ClassInstance.objects.filter(project_id=self.project.id,
                                                           cici_via_b__in=annotation_links)
                self.to_serialize.append(annotations)
                self.to_serialize.append(annotation_links)
Exemple #3
0
    def collect_data(self):
        self.to_serialize = []

        classes = dict(Class.objects.filter(
                project=self.project).values_list('class_name', 'id'))
        relations = dict(Relation.objects.filter(
                project=self.project).values_list('relation_name', 'id'))

        if not check_tracing_setup(self.project.id, classes, relations):
            raise CommandError("Project with ID %s is no tracing project." % self.project.id)

        exclude_skeleton_id_constraints = set() # type: Set
        exclude_neuron_id_constraint = set() # type: Set
        exclude_annotation_map = dict() # type: Dict
        exclude_annotation_ids = list() # type: List
        if self.excluded_annotations:
            exclude_annotation_map = get_annotation_to_id_map(self.project.id,
                    self.excluded_annotations, relations, classes)
            exclude_annotation_ids = list(map(str, exclude_annotation_map.values()))
            if not exclude_annotation_ids:
                missing_annotations = set(self.excluded_annotations) - set(exclude_annotation_map.keys())
                raise CommandError("Could not find the following annotations: " +
                        ", ".join(missing_annotations))

            query_params = {
                'annotated_with': ",".join(exclude_annotation_ids),
                'sub_annotated_with': ",".join(exclude_annotation_ids)
            }
            neuron_info, num_total_records = get_annotated_entities(self.project.id,
                    query_params, relations, classes, ['neuron'], with_skeletons=True)

            logger.info("Found {} neurons with the following exclusion annotations: {}".format(
                    num_total_records, ", ".join(self.excluded_annotations)))

            exclude_skeleton_id_constraints = set(chain.from_iterable(
                    [n['skeleton_ids'] for n in neuron_info]))
            exclude_neuron_id_constraint = set(n['id'] for n in neuron_info)

        if self.required_annotations:
            annotation_map = get_annotation_to_id_map(self.project.id,
                    self.required_annotations, relations, classes)
            annotation_ids = list(map(str, annotation_map.values()))
            if not annotation_ids:
                missing_annotations = set(self.required_annotations) - set(annotation_map.keys())
                raise CommandError("Could not find the following annotations: " +
                        ", ".join(missing_annotations))

            query_params = {
                'annotated_with': ",".join(annotation_ids),
                'sub_annotated_with': ",".join(annotation_ids)
            }
            neuron_info, num_total_records = get_annotated_entities(self.project.id,
                    query_params, relations, classes, ['neuron'], with_skeletons=True)

            logger.info("Found {} neurons with the following annotations: {}".format(
                    num_total_records, ", ".join(self.required_annotations)))

            skeleton_id_constraints = list(chain.from_iterable([n['skeleton_ids'] for n in neuron_info])) # type: Optional[List]
            neuron_ids = [n['id'] for n in neuron_info]

            # Remove excluded skeletons if either a) exclusion_is_final is set
            # or b) the annotation target is *not* annotated with a required
            # annotation or one of its sub-annotations.
            if exclude_skeleton_id_constraints:
                if self.exclusion_is_final:
                    skeleton_id_constraints = [skid for skid in skeleton_id_constraints
                                            if skid not in exclude_skeleton_id_constraints]
                    neuron_ids = [nid for nid in neuron_ids
                                if nid not in exclude_neuron_id_constraint]
                else:
                    # Remove all skeletons that are marked as excluded *and* are
                    # not annotatead with at least one *other* annotation that
                    # is part of the required annotation set or its
                    # sub-annotation hierarchy. To do this, get first all
                    # sub-annotations of the set of required annotations and
                    # remove the exclusion annotations. Then check all excluded
                    # skeleton IDs if they are annotatead with any of the
                    # those annotations. If not, they are removed from the
                    # exported set.
                    keeping_ids = set(map(int, annotation_ids))
                    annotation_sets_to_expand = set([frozenset(keeping_ids)])
                    sub_annotation_map = get_sub_annotation_ids(self.project.id,
                            annotation_sets_to_expand, relations, classes)
                    sub_annotation_ids = set(chain.from_iterable(sub_annotation_map.values())) - \
                            set(exclude_annotation_map.values())

                    # Get all skeletons annotated *directly* with one of the sub
                    # annotations or the expanded annotations themselves.
                    keep_query_params = {
                        'annotated_with': ','.join(str(a) for a in sub_annotation_ids),
                    }
                    keep_neuron_info, keep_num_total_records = get_annotated_entities(self.project.id,
                            keep_query_params, relations, classes, ['neuron'], with_skeletons=True)
                    # Exclude all skeletons that are not in this result set
                    skeleton_id_constraints = list(chain.from_iterable([n['skeleton_ids'] for n in keep_neuron_info]))
                    neuron_ids = [n['id'] for n in keep_neuron_info]

            entities = ClassInstance.objects.filter(pk__in=neuron_ids)

            skeletons = ClassInstance.objects.filter(project=self.project,
                    id__in=skeleton_id_constraints)
            skeleton_links = ClassInstanceClassInstance.objects.filter(
                    project_id=self.project.id, relation=relations['model_of'],
                    class_instance_a__in=skeletons, class_instance_b__in=entities)
        else:
            skeleton_id_constraints = None
            entities = ClassInstance.objects.filter(project=self.project,
                    class_column__in=[classes['neuron']])
            skeleton_links = ClassInstanceClassInstance.objects.filter(
                    project_id=self.project.id, relation=relations['model_of'],
                    class_instance_a__class_column=classes['skeleton'])
            skeletons = ClassInstance.objects.filter(project=self.project,
                    class_column__in=[classes['skeleton']])

            if exclude_skeleton_id_constraints:
                entities = entities.exclude(id__in=exclude_neuron_id_constraint)
                skeleton_links = skeleton_links.exclude(class_instance_a__in=exclude_skeleton_id_constraints)
                skeletons = skeletons.exclude(id__in=exclude_skeleton_id_constraints)

        if entities.count() == 0:
            raise CommandError("No matching neurons found")

        print("Will export %s neurons" % entities.count())
        start_export = ask_to_continue()
        if not start_export:
            raise CommandError("Canceled by user")

        # Export classes and relations
        self.to_serialize.append(Class.objects.filter(project=self.project))
        self.to_serialize.append(Relation.objects.filter(project=self.project))

        # Export skeleton-neuron links
        self.to_serialize.append(entities)
        self.to_serialize.append(skeleton_links)
        self.to_serialize.append(skeletons)

        treenodes = None
        connector_ids = None
        if skeleton_id_constraints:
            # Export treenodes along with their skeletons and neurons
            if self.export_treenodes:
                treenodes = Treenode.objects.filter(
                        project=self.project,
                        skeleton_id__in=skeleton_id_constraints)
                self.to_serialize.append(treenodes)

            # Export connectors and connector links
            if self.export_connectors:
                connector_links = TreenodeConnector.objects.filter(
                        project=self.project, skeleton_id__in=skeleton_id_constraints).values_list('id', 'connector', 'treenode')

                # Add matching connectors
                connector_ids = set(c for _,c,_ in connector_links)
                self.to_serialize.append(Connector.objects.filter(
                        id__in=connector_ids))
                logger.info("Exporting %s connectors" % len(connector_ids))

                # Add matching connector links
                self.to_serialize.append(TreenodeConnector.objects.filter(
                        id__in=[l for l,_,_ in connector_links]))

            # Export annotations and annotation-neuron links. Include meta
            # annotations.
            if self.export_annotations and 'annotated_with' in relations:
                annotated_with = relations['annotated_with']
                all_annotations = set() # type: Set
                all_annotation_links = set() # type: Set
                working_set = [e for e in entities]
                while working_set:
                    annotation_links = ClassInstanceClassInstance.objects.filter(
                            project_id=self.project.id, relation=annotated_with,
                            class_instance_a__in=working_set)
                    annotations = ClassInstance.objects.filter(project_id=self.project.id,
                            cici_via_b__in=annotation_links)

                    # Reset working set to add next entries
                    working_set = []

                    for al in annotation_links:
                        if al not in all_annotation_links:
                            all_annotation_links.add(al)

                    for a in annotations:
                        if a not in all_annotations:
                            all_annotations.add(a)
                            working_set.append(a)

                if all_annotations:
                    self.to_serialize.append(all_annotations)
                if all_annotation_links:
                    self.to_serialize.append(all_annotation_links)

                logger.info("Exporting {} annotations and {} annotation links: {}".format(
                        len(all_annotations), len(all_annotation_links),
                        ", ".join([a.name for a in all_annotations])))

            # Export tags
            if self.export_tags and 'labeled_as' in relations:
                tag_links = TreenodeClassInstance.objects.select_related('class_instance').filter(
                        project=self.project,
                        class_instance__class_column=classes['label'],
                        relation_id=relations['labeled_as'],
                        treenode__skeleton_id__in=skeleton_id_constraints)
                tags = [t.class_instance for t in tag_links]
                tag_names = sorted(set([t.name for t in tags]))

                self.to_serialize.append(tags)
                self.to_serialize.append(tag_links)

                logger.info("Exporting {n_tags} tags, part of {n_links} links: {tags}".format(
                    n_tags=len(tags), n_links=tag_links.count(), tags=', '.join(tag_names)))

            # TODO: Export reviews
        else:
            # Export treenodes
            if self.export_treenodes:
                treenodes = Treenode.objects.filter(project=self.project)
                if exclude_skeleton_id_constraints:
                    treenodes = treenodes.exclude(skeleton_id=exclude_skeleton_id_constraints)
                self.to_serialize.append(treenodes)

            # Export connectors and connector links
            if self.export_connectors:
                self.to_serialize.append(Connector.objects.filter(
                        project=self.project))
                self.to_serialize.append(TreenodeConnector.objects.filter(
                        project=self.project))

            # Export all tags
            if self.export_tags:
                tags = ClassInstance.objects.filter(project=self.project,
                        class_column=classes['label'])
                tag_links = TreenodeClassInstance.objects.filter(project=self.project,
                        class_instance__class_column=classes['label'],
                        relation_id=relations['labeled_as'])
                if exclude_skeleton_id_constraints:
                    tag_links = tag_links.exclude(skeleton_id=exclude_skeleton_id_constraints)

                self.to_serialize.append(tags)
                self.to_serialize.append(tag_links)

            # TODO: Export reviews


        # Export referenced neurons and skeletons
        exported_tids = set() # type: Set
        if treenodes:
            treenode_skeleton_ids = set(t.skeleton_id for t in treenodes)
            n_skeletons = ClassInstance.objects.filter(
                    project=self.project,
                    id__in=treenode_skeleton_ids).count()
            neuron_links = ClassInstanceClassInstance.objects \
                    .filter(project=self.project, class_instance_a__in=treenode_skeleton_ids, \
                           relation=relations.get('model_of'))
            n_neuron_links = len(neuron_links)
            neurons = set([l.class_instance_b_id for l in neuron_links])

            exported_tids = set(treenodes.values_list('id', flat=True))
            logger.info("Exporting {} treenodes in {} skeletons and {} neurons".format(
                    len(exported_tids), n_skeletons, len(neurons)))

        # Get current maximum concept ID
        cursor = connection.cursor()
        cursor.execute("""
            SELECT MAX(id) FROM concept
        """)
        new_skeleton_id = cursor.fetchone()[0] + 1
        new_neuron_id = new_skeleton_id + 1
        new_model_of_id = new_skeleton_id + 2
        new_concept_offset = 3
        new_neuron_name_id = 1
        if skeleton_id_constraints:
            if connector_ids:
                # Add addition placeholder treenodes
                connector_links = list(TreenodeConnector.objects \
                    .filter(project=self.project, connector__in=connector_ids) \
                    .exclude(skeleton_id__in=skeleton_id_constraints))
                connector_tids = set(c.treenode_id for c in connector_links)
                extra_tids = connector_tids - exported_tids
                if self.original_placeholder_context:
                    logger.info("Exporting %s placeholder nodes" % len(extra_tids))
                else:
                    logger.info("Exporting %s placeholder nodes with first new class instance ID %s" % (len(extra_tids), new_skeleton_id))

                placeholder_treenodes = Treenode.objects.prefetch_related(
                        'treenodeconnector_set').filter(id__in=extra_tids)
                # Placeholder nodes will be transformed into root nodes of new
                # skeletons.
                new_skeleton_cis = []
                new_neuron_cis = []
                new_model_of_links = []
                new_tc_links = []
                for pt in placeholder_treenodes:
                    pt.parent_id = None

                    if not self.original_placeholder_context:
                        original_skeleton_id = pt.skeleton_id
                        pt.skeleton_id = new_skeleton_id

                        # Add class instances for both the skeleton and neuron for
                        # the placeholder node skeleton
                        new_skeleton_ci = ClassInstance(
                                id = new_skeleton_id,
                                user_id=pt.user_id,
                                creation_time=pt.creation_time,
                                edition_time=pt.edition_time,
                                project_id=pt.project_id,
                                class_column_id=classes['skeleton'],
                                name='Placeholder Skeleton ' + str(new_neuron_name_id))

                        new_neuron_ci = ClassInstance(
                                id = new_neuron_id,
                                user_id=pt.user_id,
                                creation_time=pt.creation_time,
                                edition_time=pt.edition_time,
                                project_id=pt.project_id,
                                class_column_id=classes['neuron'],
                                name='Placeholder Neuron ' + str(new_neuron_name_id))

                        new_model_of_link = ClassInstanceClassInstance(
                                id=new_model_of_id,
                                user_id=pt.user_id,
                                creation_time=pt.creation_time,
                                edition_time=pt.edition_time,
                                project_id=pt.project_id,
                                relation_id=relations['model_of'],
                                class_instance_a_id=new_skeleton_id,
                                class_instance_b_id=new_neuron_id)

                        tc_offset = 0
                        for tc in pt.treenodeconnector_set.all():
                            # Only export treenode connector links to connectors
                            # that are exported.
                            if tc.skeleton_id != original_skeleton_id or \
                                    tc.connector_id not in connector_ids:
                                continue
                            new_tc_id = new_skeleton_id + new_concept_offset + 1
                            tc_offset += 1
                            new_treenode_connector = TreenodeConnector(
                                    id=new_tc_id,
                                    user_id=tc.user_id,
                                    creation_time=tc.creation_time,
                                    edition_time=tc.edition_time,
                                    project_id=tc.project_id,
                                    relation_id=tc.relation_id,
                                    treenode_id=pt.id,
                                    skeleton_id = new_skeleton_id,
                                    connector_id=tc.connector_id)
                            new_tc_links.append(new_treenode_connector)

                        effective_offset = new_concept_offset + tc_offset
                        new_skeleton_id += effective_offset
                        new_neuron_id += effective_offset
                        new_model_of_id += effective_offset
                        new_neuron_name_id += 1

                        new_skeleton_cis.append(new_skeleton_ci)
                        new_neuron_cis.append(new_neuron_ci)
                        new_model_of_links.append(new_model_of_link)

                if placeholder_treenodes and not self.original_placeholder_context:
                    self.to_serialize.append(new_skeleton_cis)
                    self.to_serialize.append(new_neuron_cis)
                    self.to_serialize.append(new_model_of_links)
                    if new_tc_links:
                        self.to_serialize.append(new_tc_links)

                self.to_serialize.append(placeholder_treenodes)

                # Add additional skeletons and neuron-skeleton links
                if self.original_placeholder_context:
                    # Original skeletons
                    extra_skids = set(Treenode.objects.filter(id__in=extra_tids,
                            project=self.project).values_list('skeleton_id', flat=True))
                    self.to_serialize.append(ClassInstance.objects.filter(id__in=extra_skids))

                    # Original skeleton model-of neuron links
                    extra_links = ClassInstanceClassInstance.objects \
                            .filter(project=self.project,
                                    class_instance_a__in=extra_skids,
                                    relation=relations['model_of'])
                    self.to_serialize.append(extra_links)

                    # Original neurons
                    extra_nids = extra_links.values_list('class_instance_b', flat=True)
                    self.to_serialize.append(ClassInstance.objects.filter(
                        project=self.project, id__in=extra_nids))

                    # Connector links
                    self.to_serialize.append(connector_links)

        # Volumes
        if self.export_volumes:
            volumes = find_volumes(self.project.id, self.volume_annotations,
                    True, True)
            volume_ids =[v['id'] for v in volumes]
            if volume_ids:
                volumes = Volume.objects.filter(pk__in=volume_ids,
                        project_id=self.project.id)
                logger.info("Exporting {} volumes: {}".format(
                        len(volumes), ', '.join(v.name for v in volumes)))
                self.to_serialize.append(volumes)
            else:
                logger.info("No volumes found to export")

        # Export users, either completely or in a reduced form
        seen_user_ids = set()
        # Find users involved in exported data
        for group in self.to_serialize:
            for o in group:
                if hasattr(o, 'user_id'):
                    seen_user_ids.add(o.user_id)
                if hasattr(o, 'reviewer_id'):
                    seen_user_ids.add(o.reviewer_id)
                if hasattr(o, 'editor_id'):
                    seen_user_ids.add(o.editor_id)
        users = [ExportUser(id=u.id, username=u.username, password=u.password,
                first_name=u.first_name, last_name=u.last_name, email=u.email,
                date_joined=u.date_joined) \
                for u in User.objects.filter(pk__in=seen_user_ids)]
        if self.export_users:
            logger.info("Exporting {} users: {}".format(len(users),
                    ", ".join([u.username for u in users])))
            self.to_serialize.append(users)
        else:
            # Export in reduced form
            reduced_users = []
            for u in users:
                reduced_user = ReducedInfoUser(id=u.id, username=u.username,
                        password=make_password(User.objects.make_random_password()))
                reduced_users.append(reduced_user)
            logger.info("Exporting {} users in reduced form with random passwords: {}".format(len(reduced_users),
                    ", ".join([u.username for u in reduced_users])))
            self.to_serialize.append(reduced_users)
Exemple #4
0
def fork(request:HttpRequest, project_id) -> JsonResponse:
    """Attempt to create a new project based on the passed in project ID.
    ---
    parameters:
    - name: name
      description: Name of new project
      required: true
      type: string
    - name: copy_volumes
      description: Whether volumes will be copied to the new project
      required: false
      type: boolean
      defaultValue: false
    """
    name = request.POST.get('name')
    if not name:
        raise ValueError('Need new project name')

    copy_volumes = get_request_bool(request.POST, 'copy_volumes', False)

    current_p = get_object_or_404(Project, pk=project_id)
    new_p = get_object_or_404(Project, pk=project_id)

    new_p.id = None
    new_p.title = name
    new_p.save()

    # Copy all project-stack links
    ps_links = ProjectStack.objects.filter(project=current_p)
    for ps in ps_links:
        ps.id = None
        ps.project = new_p
        ps.save()

    # Assign read/write/import permissions for new fork
    assign_perm('can_browse', request.user, new_p)
    assign_perm('can_annotate', request.user, new_p)
    assign_perm('can_import', request.user, new_p)
    assign_perm('can_fork', request.user, new_p)
    assign_perm('can_administer', request.user, new_p)
    assign_perm('delete_project', request.user, new_p)

    # Creat basic classes and relations
    validate_project_setup(new_p.id, request.user.id, fix=True)

    # If the source project is a tracing project, make the clone as well one.
    # A local import is used here to avoid a high potential for circular imports.
    from catmaid.control.tracing import check_tracing_setup, setup_tracing
    if check_tracing_setup(project_id):
        setup_tracing(new_p.id)

    if copy_volumes:
        cursor = connection.cursor()
        cursor.execute("""
            INSERT INTO catmaid_volume (user_id, project_id, creation_time,
                    edition_time, editor_id, name, comment, geometry, area,
                    volume, watertight, meta_computed)
            SELECT user_id, %(new_project_id)s, creation_time, edition_time,
                    editor_id, name, comment, geometry, area, volume, watertight,
                    meta_computed
            FROM catmaid_volume
            WHERE project_id = %(project_id)s;
        """, {
            'project_id': project_id,
            'new_project_id': new_p.id
        })

    return JsonResponse({
        'new_project_id': new_p.id,
        'n_copied_stack_links': len(ps_links),
    })
Exemple #5
0
    def collect_data(self):
        self.to_serialize = []

        classes = dict(Class.objects.filter(
                project=self.project).values_list('class_name', 'id'))
        relations = dict(Relation.objects.filter(
                project=self.project).values_list('relation_name', 'id'))

        if not check_tracing_setup(self.project.id, classes, relations):
            raise CommandError("Project with ID %s is no tracing project." % self.project.id)

        exclude_skeleton_id_constraints = set()
        exclude_neuron_id_constraint = set()
        if self.excluded_annotations:
            annotation_map = get_annotation_to_id_map(self.project.id,
                    self.excluded_annotations, relations, classes)
            annotation_ids = list(map(str, annotation_map.values()))
            if not annotation_ids:
                missing_annotations = set(self.excluded_annotations) - set(annotation_map.keys())
                raise CommandError("Could not find the following annotations: " +
                        ", ".join(missing_annotations))

            query_params = {
                'annotated_with': ",".join(annotation_ids),
                'sub_annotated_with': ",".join(annotation_ids)
            }
            neuron_info, num_total_records = get_annotated_entities(self.project,
                    query_params, relations, classes, ['neuron'], with_skeletons=True)

            logger.info("Found {} neurons with the following exclusion annotations: {}".format(
                    num_total_records, ", ".join(self.excluded_annotations)))

            exclude_skeleton_id_constraints = set(chain.from_iterable(
                    [n['skeleton_ids'] for n in neuron_info]))
            exclude_neuron_id_constraint = set(n['id'] for n in neuron_info)

        if self.required_annotations:
            annotation_map = get_annotation_to_id_map(self.project.id,
                    self.required_annotations, relations, classes)
            annotation_ids = list(map(str, annotation_map.values()))
            if not annotation_ids:
                missing_annotations = set(self.required_annotations) - set(annotation_map.keys())
                raise CommandError("Could not find the following annotations: " +
                        ", ".join(missing_annotations))

            query_params = {
                'annotated_with': ",".join(annotation_ids),
                'sub_annotated_with': ",".join(annotation_ids)
            }
            neuron_info, num_total_records = get_annotated_entities(self.project,
                    query_params, relations, classes, ['neuron'], with_skeletons=True)

            logger.info("Found {} neurons with the following annotations: {}".format(
                    num_total_records, ", ".join(self.required_annotations)))

            skeleton_id_constraints = list(chain.from_iterable([n['skeleton_ids'] for n in neuron_info]))
            neuron_ids = [n['id'] for n in neuron_info]

            # Remove excluded skeletons
            if exclude_skeleton_id_constraints:
                skeleton_id_constraints = [skid for skid in skeleton_id_constraints
                                           if skid not in exclude_skeleton_id_constraints]
                neuron_ids = [nid for nid in neuron_ids
                              if nid not in exclude_neuron_id_constraint]

            entities = ClassInstance.objects.filter(pk__in=neuron_ids)

            skeletons = ClassInstance.objects.filter(project=self.project,
                    id__in=skeleton_id_constraints)
            skeleton_links = ClassInstanceClassInstance.objects.filter(
                    project_id=self.project.id, relation=relations['model_of'],
                    class_instance_a__in=skeletons, class_instance_b__in=entities)
        else:
            skeleton_id_constraints = None
            entities = ClassInstance.objects.filter(project=self.project,
                    class_column__in=[classes['neuron']])
            skeleton_links = ClassInstanceClassInstance.objects.filter(
                    project_id=self.project.id, relation=relations['model_of'],
                    class_instance_a__class_column=classes['skeleton'])
            skeletons = ClassInstance.objects.filter(project=self.project,
                    class_column__in=[classes['skeleton']])

            if exclude_skeleton_id_constraints:
                entities = entities.exclude(id__in=exclude_neuron_id_constraint)
                skeleton_links = skeleton_links.exclude(class_instance_a__in=exclude_skeleton_id_constraints)
                skeletons = skeletons.exclude(id__in=exclude_skeleton_id_constraints)

        if entities.count() == 0:
            raise CommandError("No matching neurons found")

        print("Will export %s neurons" % entities.count())
        start_export = ask_to_continue()
        if not start_export:
            raise CommandError("Canceled by user")

        # Export classes and relations
        self.to_serialize.append(Class.objects.filter(project=self.project))
        self.to_serialize.append(Relation.objects.filter(project=self.project))

        # Export skeleton-neuron links
        self.to_serialize.append(entities)
        self.to_serialize.append(skeleton_links)
        self.to_serialize.append(skeletons)

        treenodes = None
        connector_ids = None
        if skeleton_id_constraints:
            # Export treenodes along with their skeletons and neurons
            if self.export_treenodes:
                treenodes = Treenode.objects.filter(
                        project=self.project,
                        skeleton_id__in=skeleton_id_constraints)
                self.to_serialize.append(treenodes)

            # Export connectors and connector links
            if self.export_connectors:
                connector_links = TreenodeConnector.objects.filter(
                        project=self.project, skeleton_id__in=skeleton_id_constraints).values_list('id', 'connector', 'treenode')

                # Add matching connectors
                connector_ids = set(c for _,c,_ in connector_links)
                self.to_serialize.append(Connector.objects.filter(
                        id__in=connector_ids))
                logger.info("Exporting %s connectors" % len(connector_ids))

                # Add matching connector links
                self.to_serialize.append(TreenodeConnector.objects.filter(
                        id__in=[l for l,_,_ in connector_links]))

            # Export annotations and annotation-neuron links. Include meta
            # annotations.
            if self.export_annotations and 'annotated_with' in relations:
                annotated_with = relations['annotated_with']
                all_annotations = set()
                all_annotation_links = set()
                working_set = [e for e in entities]
                while working_set:
                    annotation_links = ClassInstanceClassInstance.objects.filter(
                            project_id=self.project.id, relation=annotated_with,
                            class_instance_a__in=working_set)
                    annotations = ClassInstance.objects.filter(project_id=self.project.id,
                            cici_via_b__in=annotation_links)

                    # Reset working set to add next entries
                    working_set = []

                    for al in annotation_links:
                        if al not in all_annotation_links:
                            all_annotation_links.add(al)

                    for a in annotations:
                        if a not in all_annotations:
                            all_annotations.add(a)
                            working_set.append(a)

                if all_annotations:
                    self.to_serialize.append(all_annotations)
                if all_annotation_links:
                    self.to_serialize.append(all_annotation_links)

                logger.info("Exporting {} annotations and {} annotation links: {}".format(
                        len(all_annotations), len(all_annotation_links),
                        ", ".join([a.name for a in all_annotations])))

            # Export tags
            if self.export_tags and 'labeled_as' in relations:
                tag_links = TreenodeClassInstance.objects.select_related('class_instance').filter(
                        project=self.project,
                        class_instance__class_column=classes['label'],
                        relation_id=relations['labeled_as'],
                        treenode__skeleton_id__in=skeleton_id_constraints)
                tags = [t.class_instance for t in tag_links]
                tag_names = sorted(set([t.name for t in tags]))

                self.to_serialize.append(tags)
                self.to_serialize.append(tag_links)

                logger.info("Exporting {n_tags} tags, part of {n_links} links: {tags}".format(
                    n_tags=len(tags), n_links=tag_links.count(), tags=', '.join(tag_names)))

            # TODO: Export reviews
        else:
            # Export treenodes
            if self.export_treenodes:
                treenodes = Treenode.objects.filter(project=self.project)
                if exclude_skeleton_id_constraints:
                    treenodes = treenodes.exclude(skeleton_id=exclude_skeleton_id_constraints)
                self.to_serialize.append(treenodes)

            # Export connectors and connector links
            if self.export_connectors:
                self.to_serialize.append(Connector.objects.filter(
                        project=self.project))
                self.to_serialize.append(TreenodeConnector.objects.filter(
                        project=self.project))

            # Export all tags
            if self.export_tags:
                tags = ClassInstance.objects.filter(project=self.project,
                        class_column=classes['label'])
                tag_links = TreenodeClassInstance.objects.filter(project=self.project,
                        class_instance__class_column=classes['label'],
                        relation_id=relations['labeled_as'])
                if exclude_skeleton_id_constraints:
                    tag_links = tag_links.exclude(skeleton_id=exclude_skeleton_id_constraints)

                self.to_serialize.append(tags)
                self.to_serialize.append(tag_links)

            # TODO: Export reviews


        # Export referenced neurons and skeletons
        exported_tids = set()
        if treenodes:
            treenode_skeleton_ids = set(t.skeleton_id for t in treenodes)
            skeletons = ClassInstance.objects.filter(
                    project=self.project,
                    id__in=treenode_skeleton_ids)
            self.to_serialize.append(skeletons)

            neuron_links = ClassInstanceClassInstance.objects \
                    .select_related('class_instance_b').filter(
                            project=self.project,
                            class_instance_a__in=treenode_skeleton_ids,
                            relation=relations.get('model_of'))
            self.to_serialize.append(neuron_links)

            neurons = [l.class_instance_b for l in neuron_links]
            self.to_serialize.append(neurons)

            exported_tids = set(treenodes.values_list('id', flat=True))
            logger.info("Exporting {} treenodes in {} skeletons and {} neurons".format(
                    len(exported_tids), len(skeletons), len(neurons)))

        # Get current maximum concept ID
        cursor = connection.cursor()
        cursor.execute("""
            SELECT MAX(id) FROM concept
        """)
        new_skeleton_id = cursor.fetchone()[0] + 1
        new_neuron_id = new_skeleton_id + 1
        new_model_of_id = new_skeleton_id + 2
        new_concept_offset = 3
        new_neuron_name_id = 1
        if skeleton_id_constraints:
            if connector_ids:
                # Add addition placeholder treenodes
                connector_tids = set(TreenodeConnector.objects \
                    .filter(project=self.project, connector__in=connector_ids) \
                    .exclude(skeleton_id__in=skeleton_id_constraints) \
                    .values_list('treenode', flat=True))
                extra_tids = connector_tids - exported_tids
                if self.original_placeholder_context:
                    logger.info("Exporting %s placeholder nodes" % len(extra_tids))
                else:
                    logger.info("Exporting %s placeholder nodes with first new class instance ID %s" % (len(extra_tids), new_skeleton_id))

                placeholder_treenodes = Treenode.objects.prefetch_related(
                        'treenodeconnector_set').filter(id__in=extra_tids)
                # Placeholder nodes will be transformed into root nodes of new
                # skeletons.
                new_skeleton_cis = []
                new_neuron_cis = []
                new_model_of_links = []
                new_tc_links = []
                for pt in placeholder_treenodes:
                    pt.parent_id = None

                    if not self.original_placeholder_context:
                        original_skeleton_id = pt.skeleton_id
                        pt.skeleton_id = new_skeleton_id

                        # Add class instances for both the skeleton and neuron for
                        # the placeholder node skeleton
                        new_skeleton_ci = ClassInstance(
                                id = new_skeleton_id,
                                user_id=pt.user_id,
                                creation_time=pt.creation_time,
                                edition_time=pt.edition_time,
                                project_id=pt.project_id,
                                class_column_id=classes['skeleton'],
                                name='Placeholder Skeleton ' + str(new_neuron_name_id))

                        new_neuron_ci = ClassInstance(
                                id = new_neuron_id,
                                user_id=pt.user_id,
                                creation_time=pt.creation_time,
                                edition_time=pt.edition_time,
                                project_id=pt.project_id,
                                class_column_id=classes['neuron'],
                                name='Placeholder Neuron ' + str(new_neuron_name_id))

                        new_model_of_link = ClassInstanceClassInstance(
                                id=new_model_of_id,
                                user_id=pt.user_id,
                                creation_time=pt.creation_time,
                                edition_time=pt.edition_time,
                                project_id=pt.project_id,
                                relation_id=relations['model_of'],
                                class_instance_a_id=new_skeleton_id,
                                class_instance_b_id=new_neuron_id)

                        tc_offset = 0
                        for tc in pt.treenodeconnector_set.all():
                            # Only export treenode connector links to connectors
                            # that are exported.
                            if tc.skeleton_id != original_skeleton_id or \
                                    tc.connector_id not in connector_ids:
                                continue
                            new_tc_id = new_skeleton_id + new_concept_offset + 1
                            tc_offset += 1
                            new_treenode_connector = TreenodeConnector(
                                    id=new_tc_id,
                                    user_id=tc.user_id,
                                    creation_time=tc.creation_time,
                                    edition_time=tc.edition_time,
                                    project_id=tc.project_id,
                                    relation_id=tc.relation_id,
                                    treenode_id=pt.id,
                                    skeleton_id = new_skeleton_id,
                                    connector_id=tc.connector_id)
                            new_tc_links.append(new_treenode_connector)

                        effective_offset = new_concept_offset + tc_offset
                        new_skeleton_id += effective_offset
                        new_neuron_id += effective_offset
                        new_model_of_id += effective_offset
                        new_neuron_name_id += 1

                        new_skeleton_cis.append(new_skeleton_ci)
                        new_neuron_cis.append(new_neuron_ci)
                        new_model_of_links.append(new_model_of_link)

                if placeholder_treenodes and not self.original_placeholder_context:
                    self.to_serialize.append(new_skeleton_cis)
                    self.to_serialize.append(new_neuron_cis)
                    self.to_serialize.append(new_model_of_links)
                    if new_tc_links:
                        self.to_serialize.append(new_tc_links)

                self.to_serialize.append(placeholder_treenodes)

                # Add additional skeletons and neuron-skeleton links
                if self.original_placeholder_context:
                    extra_skids = set(Treenode.objects.filter(id__in=extra_tids,
                            project=self.project).values_list('skeleton_id', flat=True))
                    self.to_serialize.append(ClassInstance.objects.filter(id__in=extra_skids))

                    extra_links = ClassInstanceClassInstance.objects \
                            .filter(project=self.project,
                                    class_instance_a__in=extra_skids,
                                    relation=relations['model_of'])
                    self.to_serialize.append(extra_links)

                    extra_nids = extra_links.values_list('class_instance_b', flat=True)
                    self.to_serialize.append(ClassInstance.objects.filter(
                        project=self.project, id__in=extra_nids))

        # Export users, either completely or in a reduced form
        seen_user_ids = set()
        # Find users involved in exported data
        for group in self.to_serialize:
            for o in group:
                if hasattr(o, 'user_id'):
                    seen_user_ids.add(o.user_id)
                if hasattr(o, 'reviewer_id'):
                    seen_user_ids.add(o.reviewer_id)
                if hasattr(o, 'editor_id'):
                    seen_user_ids.add(o.editor_id)
        users = [ExportUser(id=u.id, username=u.username, password=u.password,
                first_name=u.first_name, last_name=u.last_name, email=u.email,
                date_joined=u.date_joined) \
                for u in User.objects.filter(pk__in=seen_user_ids)]
        if self.export_users:
            logger.info("Exporting {} users: {}".format(len(users),
                    ", ".join([u.username for u in users])))
            self.to_serialize.append(users)
        else:
            # Export in reduced form
            reduced_users = []
            for u in users:
                reduced_user = ReducedInfoUser(id=u.id, username=u.username,
                        password=make_password(User.objects.make_random_password()))
                reduced_users.append(reduced_user)
            logger.info("Exporting {} users in reduced form with random passwords: {}".format(len(reduced_users),
                    ", ".join([u.username for u in reduced_users])))
            self.to_serialize.append(reduced_users)
    def collect_data(self):
        self.to_serialize = []

        classes = dict(
            Class.objects.filter(project=self.project).values_list(
                'class_name', 'id'))
        relations = dict(
            Relation.objects.filter(project=self.project).values_list(
                'relation_name', 'id'))

        if not check_tracing_setup(self.project.id, classes, relations):
            raise CommandError("Project with ID %s is no tracing project." %
                               self.project.id)

        if self.required_annotations:
            annotation_map = get_annotation_to_id_map(
                self.project.id, self.required_annotations, relations, classes)
            annotation_ids = map(str, annotation_map.values())
            if not annotation_ids:
                missing_annotations = set(self.required_annotations) - set(
                    annotation_map.keys())
                raise CommandError(
                    "Could not find the following annotations: " +
                    ", ".join(missing_annotations))

            query_params = {
                'annotated_with': ",".join(annotation_ids),
                'sub_annotated_with': ",".join(annotation_ids)
            }
            neuron_info, num_total_records = get_annotated_entities(
                self.project,
                query_params,
                relations,
                classes, ['neuron'],
                with_skeletons=True)

            logger.info(
                "Found {} neurons with the following annotations: {}".format(
                    num_total_records, ", ".join(self.required_annotations)))

            skeleton_id_constraints = list(
                chain.from_iterable([n['skeleton_ids'] for n in neuron_info]))

            neuron_ids = [n['id'] for n in neuron_info]
            entities = ClassInstance.objects.filter(pk__in=neuron_ids)

            skeletons = ClassInstance.objects.filter(
                project=self.project, id__in=skeleton_id_constraints)
            skeleton_links = ClassInstanceClassInstance.objects.filter(
                project_id=self.project.id,
                relation=relations['model_of'],
                class_instance_a__in=skeletons,
                class_instance_b__in=entities)
        else:
            skeleton_id_constraints = None
            entities = ClassInstance.objects.filter(
                project=self.project, class_column__in=[classes['neuron']])
            skeleton_links = ClassInstanceClassInstance.objects.filter(
                project_id=self.project.id,
                relation=relations['model_of'],
                class_instance_a__class_column=classes['skeleton'])
            skeletons = ClassInstance.objects.filter(
                project=self.project, class_column__in=[classes['skeleton']])

        if entities.count() == 0:
            raise CommandError("No matching neurons found")

        print("Will export %s neurons" % entities.count())
        start_export = ask_to_continue()
        if not start_export:
            raise CommandError("Canceled by user")

        # Export classes and relations
        self.to_serialize.append(Class.objects.filter(project=self.project))
        self.to_serialize.append(Relation.objects.filter(project=self.project))

        # Export skeleton-neuron links
        self.to_serialize.append(entities)
        self.to_serialize.append(skeleton_links)
        self.to_serialize.append(skeletons)

        if skeleton_id_constraints:
            # Export treenodes
            if self.export_treenodes:
                treenodes = Treenode.objects.filter(
                    project=self.project,
                    skeleton_id__in=skeleton_id_constraints)
                self.to_serialize.append(treenodes)

                exported_tids = set(treenodes.values_list('id', flat=True))
                logger.info("Exporting %s treenodes" % len(exported_tids))

            # Export connectors and connector links
            if self.export_connectors:
                connector_links = TreenodeConnector.objects.filter(
                    project=self.project,
                    skeleton_id__in=skeleton_id_constraints).values_list(
                        'id', 'connector', 'treenode')

                # Add matching connecots
                connector_ids = set(c for _, c, _ in connector_links)
                self.to_serialize.append(
                    Connector.objects.filter(id__in=connector_ids))
                logger.info("Exporting %s connectors" % len(connector_ids))

                # Add matching connector links
                self.to_serialize.append(
                    TreenodeConnector.objects.filter(
                        id__in=[l for l, _, _ in connector_links]))

                # Add addition placeholde treenodes
                connector_tids = set(TreenodeConnector.objects \
                    .filter(project=self.project, connector__in=connector_ids) \
                    .exclude(skeleton_id__in=skeleton_id_constraints) \
                    .values_list('treenode', flat=True))
                extra_tids = connector_tids - exported_tids
                logger.info("Exporting %s placeholder nodes" % len(extra_tids))
                self.to_serialize.append(
                    Treenode.objects.filter(id__in=extra_tids))

                # Add additional skeletons and neuron-skeleton links
                extra_skids = set(
                    Treenode.objects.filter(id__in=extra_tids,
                                            project=self.project).values_list(
                                                'skeleton_id', flat=True))
                self.to_serialize.append(
                    ClassInstance.objects.filter(id__in=extra_skids))

                extra_links = ClassInstanceClassInstance.objects \
                        .filter(project=self.project,
                                class_instance_a__in=extra_skids,
                                relation=relations['model_of'])
                self.to_serialize.append(extra_links)

                extra_nids = extra_links.values_list('class_instance_b',
                                                     flat=True)
                self.to_serialize.append(
                    ClassInstance.objects.filter(project=self.project,
                                                 id__in=extra_nids))

            # Export annotations and annotation-neuron links. Include meta
            # annotations.
            if self.export_annotations and 'annotated_with' in relations:
                annotated_with = relations['annotated_with']
                all_annotations = set()
                all_annotation_links = set()
                working_set = [e for e in entities]
                while working_set:
                    annotation_links = ClassInstanceClassInstance.objects.filter(
                        project_id=self.project.id,
                        relation=annotated_with,
                        class_instance_a__in=working_set)
                    annotations = ClassInstance.objects.filter(
                        project_id=self.project.id,
                        cici_via_b__in=annotation_links)

                    # Reset working set to add next entries
                    working_set = []

                    for al in annotation_links:
                        if al not in all_annotation_links:
                            all_annotation_links.add(al)

                    for a in annotations:
                        if a not in all_annotations:
                            all_annotations.add(a)
                            working_set.append(a)

                if all_annotations:
                    self.to_serialize.append(all_annotations)
                if all_annotation_links:
                    self.to_serialize.append(all_annotation_links)

                logger.info(
                    "Exporting {} annotations and {} annotation links: {}".
                    format(len(all_annotations), len(all_annotation_links),
                           ", ".join([a.name for a in all_annotations])))

            # TODO: Export reviews
        else:
            # Export treenodes
            if self.export_treenodes:
                if skeleton_id_constraints:
                    pass
                else:
                    self.to_serialize.append(
                        Treenode.objects.filter(project=self.project))

            # Export connectors and connector links
            if self.export_connectors:
                self.to_serialize.append(
                    Connector.objects.filter(project=self.project))
                self.to_serialize.append(
                    TreenodeConnector.objects.filter(project=self.project))

            # TODO: Export reviews

        # Export users
        if self.export_users:
            seen_user_ids = set()
            # Find users involved in exported data
            for group in self.to_serialize:
                for o in group:
                    if hasattr(o, 'user_id'):
                        seen_user_ids.add(o.user_id)
                    if hasattr(o, 'reviewer_id'):
                        seen_user_ids.add(o.reviewer_id)
                    if hasattr(o, 'editor_id'):
                        seen_user_ids.add(o.editor_id)
            users = User.objects.filter(pk__in=seen_user_ids)
            logger.info("Exporting {} users: {}".format(
                len(users), ", ".join([u.username for u in users])))
            self.to_serialize.append(users)
Exemple #7
0
def fork(request: HttpRequest, project_id) -> JsonResponse:
    """Attempt to create a new project based on the passed in project ID.
    ---
    parameters:
    - name: name
      description: Name of new project
      required: true
      type: string
    - name: description
      description: Description of new project
      required: false
      type: string
    - name: copy_volumes
      description: Whether volumes will be copied to the new project
      required: false
      type: boolean
      defaultValue: false
    - name: project_token
      description: Whether or not a new project token should be generated
      required: false
      type: boolean
      defaultValue: false
    - name: add_to_favorites
      description: Whether or not the new project is marked as a favorite
      required: false
      type: boolean
      defaultValue: true
    """
    name = request.POST.get('name')
    if not name:
        raise ValueError('Need new project name')

    description = request.POST.get('description')
    copy_volumes = get_request_bool(request.POST, 'copy_volumes', False)
    create_project_token = get_request_bool(request.POST, 'project_token',
                                            False)

    project_token_approval_needed = get_request_bool(
        request.POST, 'project_token_approval_needed', False)
    project_token_default_perms = get_request_list(
        request.POST, 'project_token_default_permissions', [])
    add_to_favorites = get_request_bool(request.POST, 'add_to_favorites', True)

    current_p = get_object_or_404(Project, pk=project_id)
    new_p = get_object_or_404(Project, pk=project_id)

    new_p.id = None
    new_p.title = name
    new_p.comment = description
    new_p.save()

    # Copy all project-stack links
    ps_links = ProjectStack.objects.filter(project=current_p)
    for ps in ps_links:
        ps.id = None
        ps.project = new_p
        ps.save()

    # Assign read/write/import permissions for new fork
    assign_perm('can_browse', request.user, new_p)
    assign_perm('can_annotate', request.user, new_p)
    assign_perm('can_import', request.user, new_p)
    assign_perm('can_fork', request.user, new_p)
    assign_perm('can_administer', request.user, new_p)
    assign_perm('delete_project', request.user, new_p)

    # Creat basic classes and relations
    validate_project_setup(new_p.id, request.user.id, fix=True)

    # If the source project is a tracing project, make the clone as well one.
    # A local import is used here to avoid a high potential for circular imports.
    from catmaid.control.tracing import check_tracing_setup, setup_tracing
    if check_tracing_setup(project_id):
        setup_tracing(new_p.id)

    if copy_volumes:
        cursor = connection.cursor()
        cursor.execute(
            """
            INSERT INTO catmaid_volume (user_id, project_id, creation_time,
                    edition_time, editor_id, name, comment, geometry, area,
                    volume, watertight, meta_computed)
            SELECT user_id, %(new_project_id)s, creation_time, edition_time,
                    editor_id, name, comment, geometry, area, volume, watertight,
                    meta_computed
            FROM catmaid_volume
            WHERE project_id = %(project_id)s;
        """, {
                'project_id': project_id,
                'new_project_id': new_p.id
            })

    project_token_str = None
    if create_project_token:
        allowed_permissions = set(
            get_perms_for_model(Project).values_list('codename', flat=True))
        unknown_permissions = set(
            project_token_default_perms) - allowed_permissions
        if unknown_permissions:
            raise ValueError(
                f'Unknown permissions: {", ".join(unknown_permissions)}')
        project_token = ProjectToken.objects.create(
            **{
                'user_id': request.user.id,
                'project_id': new_p.id,
                'name': "",
                'needs_approval': project_token_approval_needed,
                'default_permissions': project_token_default_perms,
            })
        project_token.name = f'Project token #{project_token.id}'
        project_token.save()

        project_token_str = project_token.token

    if add_to_favorites:
        FavoriteProject.objects.create(**{
            'project_id': new_p.id,
            'user_id': request.user.id,
        })

    return JsonResponse({
        'new_project_id': new_p.id,
        'n_copied_stack_links': len(ps_links),
        'project_token': project_token_str,
    })