def mkcici(ci1, rel, ci2, project, user, save=True): cici = ClassInstanceClassInstance(user=user, project=project, class_instance_a=ci1, relation=rel, class_instance_b=ci2) if save: cici.save() return cici
def _create_relation(user, project_id, relation_id, instance_a_id, instance_b_id): relation = ClassInstanceClassInstance() relation.user = user relation.project_id = project_id relation.relation_id = relation_id relation.class_instance_a_id = instance_a_id relation.class_instance_b_id = instance_b_id relation.save() return relation
def lines_add(request, project_id=None): p = Project.objects.get(pk=project_id) # FIXME: for the moment, just hardcode the user ID: user = User.objects.get(pk=3) neuron = get_object_or_404(ClassInstance, pk=request.POST['neuron_id'], project=p) # There's a race condition here, if two people try to add a line # with the same name at the same time. The normal way to deal # with this would be to make the `name` column unique in the # table, but since the class_instance table isn't just for driver # lines, we can't do that. (FIXME) try: line = ClassInstance.objects.get(name=request.POST['line_name']) except ClassInstance.DoesNotExist: line = ClassInstance() line.name = request.POST['line_name'] line.project = p line.user = user line.class_column = Class.objects.get(class_name='driver_line', project=p) line.save() r = Relation.objects.get(relation_name='expresses_in', project=p) cici = ClassInstanceClassInstance() cici.class_instance_a = line cici.class_instance_b = neuron cici.relation = r cici.user = user cici.project = p cici.save() return HttpResponseRedirect( reverse('vncbrowser.views.view', kwargs={ 'neuron_id': neuron.id, 'project_id': p.id }))
def create_node(): """ Creates a new node. """ # TODO: Test if class and parent class instance exist # if params['classid'] not in class_map: # raise CatmaidException('Failed to select class.') classification_instance_operation.res_on_err = 'Failed to insert instance of class.' node = ClassInstance( user=request.user, name=params['objname']) node.project_id = workspace_pid node.class_column_id = params['classid'] node.save() class_name = node.class_column.class_name insert_into_log(project_id, request.user.id, "create_%s" % class_name, None, "Created %s with ID %s" % (class_name, params['id'])) # We need to connect the node to its parent, or to root if no valid parent is given. node_parent_id = params['parentid'] # TODO: Test if tis parent exists #if 0 == params['parentid']: # # Find root element # classification_instance_operation.res_on_err = 'Failed to select classification root.' # node_parent_id = ClassInstance.objects.filter( # project=workspace_pid, # class_column=class_map['classification_root'])[0].id #Relation.objects.filter(id=params['relationid']) #if params['relationname'] not in relation_map: # raise CatmaidException('Failed to select relation %s' % params['relationname']) classification_instance_operation.res_on_err = 'Failed to insert CICI-link.' cici = ClassInstanceClassInstance() cici.user = request.user cici.project_id = workspace_pid cici.relation_id = params['relationid'] cici.class_instance_a_id = node.id cici.class_instance_b_id = node_parent_id cici.save() return HttpResponse(json.dumps({'class_instance_id': node.id}))
def create_node(): # Can only create a node if the parent node is owned by the user # or the user is a superuser # Given that the parentid is 0 to signal root (but root has a non-zero id), # this implies that regular non-superusers cannot create nodes under root, # but only in their staging area. can_edit_class_instance_or_fail(request.user, params['parentid']) if params['classname'] not in class_map: raise Exception('Failed to select class.') instance_operation.res_on_err = 'Failed to insert instance of class.' node = ClassInstance( user=request.user, name=params['objname']) node.project_id = project_id node.class_column_id = class_map[params['classname']] node.save() insert_into_log(project_id, request.user.id, "create_%s" % params['classname'], None, "Created %s with ID %s" % (params['classname'], params['id'])) # We need to connect the node to its parent, or to root if no valid parent is given. node_parent_id = params['parentid'] if 0 == params['parentid']: # Find root element instance_operation.res_on_err = 'Failed to select root.' node_parent_id = ClassInstance.objects.filter( project=project_id, class_column=class_map['root'])[0].id if params['relationname'] not in relation_map: instance_operation.res_on_err = '' raise Exception('Failed to select relation %s' % params['relationname']) instance_operation.res_on_err = 'Failed to insert relation.' cici = ClassInstanceClassInstance() cici.user = request.user cici.project_id = project_id cici.relation_id = relation_map[params['relationname']] cici.class_instance_a_id = node.id cici.class_instance_b_id = node_parent_id cici.save() return HttpResponse(json.dumps({'class_instance_id': node.id}))
def lines_add(request, project_id=None): p = Project.objects.get(pk=project_id) # FIXME: for the moment, just hardcode the user ID: user = User.objects.get(pk=3) neuron = get_object_or_404(ClassInstance, pk=request.POST['neuron_id'], project=p) # There's a race condition here, if two people try to add a line # with the same name at the same time. The normal way to deal # with this would be to make the `name` column unique in the # table, but since the class_instance table isn't just for driver # lines, we can't do that. (FIXME) try: line = ClassInstance.objects.get(name=request.POST['line_name']) except ClassInstance.DoesNotExist: line = ClassInstance() line.name=request.POST['line_name'] line.project = p line.user = user line.class_column = Class.objects.get(class_name='driver_line', project=p) line.save() r = Relation.objects.get(relation_name='expresses_in', project=p) cici = ClassInstanceClassInstance() cici.class_instance_a = line cici.class_instance_b = neuron cici.relation = r cici.user = user cici.project = p cici.save() return HttpResponseRedirect(reverse('vncbrowser.views.view', kwargs={'neuron_id':neuron.id, 'project_id':p.id}))
def collect_data(self): self.to_serialize = [] classes = dict(Class.objects.filter( project=self.project).values_list('class_name', 'id')) relations = dict(Relation.objects.filter( project=self.project).values_list('relation_name', 'id')) if not check_tracing_setup(self.project.id, classes, relations): raise CommandError("Project with ID %s is no tracing project." % self.project.id) exclude_skeleton_id_constraints = set() # type: Set exclude_neuron_id_constraint = set() # type: Set exclude_annotation_map = dict() # type: Dict exclude_annotation_ids = list() # type: List if self.excluded_annotations: exclude_annotation_map = get_annotation_to_id_map(self.project.id, self.excluded_annotations, relations, classes) exclude_annotation_ids = list(map(str, exclude_annotation_map.values())) if not exclude_annotation_ids: missing_annotations = set(self.excluded_annotations) - set(exclude_annotation_map.keys()) raise CommandError("Could not find the following annotations: " + ", ".join(missing_annotations)) query_params = { 'annotated_with': ",".join(exclude_annotation_ids), 'sub_annotated_with': ",".join(exclude_annotation_ids) } neuron_info, num_total_records = get_annotated_entities(self.project.id, query_params, relations, classes, ['neuron'], with_skeletons=True) logger.info("Found {} neurons with the following exclusion annotations: {}".format( num_total_records, ", ".join(self.excluded_annotations))) exclude_skeleton_id_constraints = set(chain.from_iterable( [n['skeleton_ids'] for n in neuron_info])) exclude_neuron_id_constraint = set(n['id'] for n in neuron_info) if self.required_annotations: annotation_map = get_annotation_to_id_map(self.project.id, self.required_annotations, relations, classes) annotation_ids = list(map(str, annotation_map.values())) if not annotation_ids: missing_annotations = set(self.required_annotations) - set(annotation_map.keys()) raise CommandError("Could not find the following annotations: " + ", ".join(missing_annotations)) query_params = { 'annotated_with': ",".join(annotation_ids), 'sub_annotated_with': ",".join(annotation_ids) } neuron_info, num_total_records = get_annotated_entities(self.project.id, query_params, relations, classes, ['neuron'], with_skeletons=True) logger.info("Found {} neurons with the following annotations: {}".format( num_total_records, ", ".join(self.required_annotations))) skeleton_id_constraints = list(chain.from_iterable([n['skeleton_ids'] for n in neuron_info])) # type: Optional[List] neuron_ids = [n['id'] for n in neuron_info] # Remove excluded skeletons if either a) exclusion_is_final is set # or b) the annotation target is *not* annotated with a required # annotation or one of its sub-annotations. if exclude_skeleton_id_constraints: if self.exclusion_is_final: skeleton_id_constraints = [skid for skid in skeleton_id_constraints if skid not in exclude_skeleton_id_constraints] neuron_ids = [nid for nid in neuron_ids if nid not in exclude_neuron_id_constraint] else: # Remove all skeletons that are marked as excluded *and* are # not annotatead with at least one *other* annotation that # is part of the required annotation set or its # sub-annotation hierarchy. To do this, get first all # sub-annotations of the set of required annotations and # remove the exclusion annotations. Then check all excluded # skeleton IDs if they are annotatead with any of the # those annotations. If not, they are removed from the # exported set. keeping_ids = set(map(int, annotation_ids)) annotation_sets_to_expand = set([frozenset(keeping_ids)]) sub_annotation_map = get_sub_annotation_ids(self.project.id, annotation_sets_to_expand, relations, classes) sub_annotation_ids = set(chain.from_iterable(sub_annotation_map.values())) - \ set(exclude_annotation_map.values()) # Get all skeletons annotated *directly* with one of the sub # annotations or the expanded annotations themselves. keep_query_params = { 'annotated_with': ','.join(str(a) for a in sub_annotation_ids), } keep_neuron_info, keep_num_total_records = get_annotated_entities(self.project.id, keep_query_params, relations, classes, ['neuron'], with_skeletons=True) # Exclude all skeletons that are not in this result set skeleton_id_constraints = list(chain.from_iterable([n['skeleton_ids'] for n in keep_neuron_info])) neuron_ids = [n['id'] for n in keep_neuron_info] entities = ClassInstance.objects.filter(pk__in=neuron_ids) skeletons = ClassInstance.objects.filter(project=self.project, id__in=skeleton_id_constraints) skeleton_links = ClassInstanceClassInstance.objects.filter( project_id=self.project.id, relation=relations['model_of'], class_instance_a__in=skeletons, class_instance_b__in=entities) else: skeleton_id_constraints = None entities = ClassInstance.objects.filter(project=self.project, class_column__in=[classes['neuron']]) skeleton_links = ClassInstanceClassInstance.objects.filter( project_id=self.project.id, relation=relations['model_of'], class_instance_a__class_column=classes['skeleton']) skeletons = ClassInstance.objects.filter(project=self.project, class_column__in=[classes['skeleton']]) if exclude_skeleton_id_constraints: entities = entities.exclude(id__in=exclude_neuron_id_constraint) skeleton_links = skeleton_links.exclude(class_instance_a__in=exclude_skeleton_id_constraints) skeletons = skeletons.exclude(id__in=exclude_skeleton_id_constraints) if entities.count() == 0: raise CommandError("No matching neurons found") print("Will export %s neurons" % entities.count()) start_export = ask_to_continue() if not start_export: raise CommandError("Canceled by user") # Export classes and relations self.to_serialize.append(Class.objects.filter(project=self.project)) self.to_serialize.append(Relation.objects.filter(project=self.project)) # Export skeleton-neuron links self.to_serialize.append(entities) self.to_serialize.append(skeleton_links) self.to_serialize.append(skeletons) treenodes = None connector_ids = None if skeleton_id_constraints: # Export treenodes along with their skeletons and neurons if self.export_treenodes: treenodes = Treenode.objects.filter( project=self.project, skeleton_id__in=skeleton_id_constraints) self.to_serialize.append(treenodes) # Export connectors and connector links if self.export_connectors: connector_links = TreenodeConnector.objects.filter( project=self.project, skeleton_id__in=skeleton_id_constraints).values_list('id', 'connector', 'treenode') # Add matching connectors connector_ids = set(c for _,c,_ in connector_links) self.to_serialize.append(Connector.objects.filter( id__in=connector_ids)) logger.info("Exporting %s connectors" % len(connector_ids)) # Add matching connector links self.to_serialize.append(TreenodeConnector.objects.filter( id__in=[l for l,_,_ in connector_links])) # Export annotations and annotation-neuron links. Include meta # annotations. if self.export_annotations and 'annotated_with' in relations: annotated_with = relations['annotated_with'] all_annotations = set() # type: Set all_annotation_links = set() # type: Set working_set = [e for e in entities] while working_set: annotation_links = ClassInstanceClassInstance.objects.filter( project_id=self.project.id, relation=annotated_with, class_instance_a__in=working_set) annotations = ClassInstance.objects.filter(project_id=self.project.id, cici_via_b__in=annotation_links) # Reset working set to add next entries working_set = [] for al in annotation_links: if al not in all_annotation_links: all_annotation_links.add(al) for a in annotations: if a not in all_annotations: all_annotations.add(a) working_set.append(a) if all_annotations: self.to_serialize.append(all_annotations) if all_annotation_links: self.to_serialize.append(all_annotation_links) logger.info("Exporting {} annotations and {} annotation links: {}".format( len(all_annotations), len(all_annotation_links), ", ".join([a.name for a in all_annotations]))) # Export tags if self.export_tags and 'labeled_as' in relations: tag_links = TreenodeClassInstance.objects.select_related('class_instance').filter( project=self.project, class_instance__class_column=classes['label'], relation_id=relations['labeled_as'], treenode__skeleton_id__in=skeleton_id_constraints) tags = [t.class_instance for t in tag_links] tag_names = sorted(set([t.name for t in tags])) self.to_serialize.append(tags) self.to_serialize.append(tag_links) logger.info("Exporting {n_tags} tags, part of {n_links} links: {tags}".format( n_tags=len(tags), n_links=tag_links.count(), tags=', '.join(tag_names))) # TODO: Export reviews else: # Export treenodes if self.export_treenodes: treenodes = Treenode.objects.filter(project=self.project) if exclude_skeleton_id_constraints: treenodes = treenodes.exclude(skeleton_id=exclude_skeleton_id_constraints) self.to_serialize.append(treenodes) # Export connectors and connector links if self.export_connectors: self.to_serialize.append(Connector.objects.filter( project=self.project)) self.to_serialize.append(TreenodeConnector.objects.filter( project=self.project)) # Export all tags if self.export_tags: tags = ClassInstance.objects.filter(project=self.project, class_column=classes['label']) tag_links = TreenodeClassInstance.objects.filter(project=self.project, class_instance__class_column=classes['label'], relation_id=relations['labeled_as']) if exclude_skeleton_id_constraints: tag_links = tag_links.exclude(skeleton_id=exclude_skeleton_id_constraints) self.to_serialize.append(tags) self.to_serialize.append(tag_links) # TODO: Export reviews # Export referenced neurons and skeletons exported_tids = set() # type: Set if treenodes: treenode_skeleton_ids = set(t.skeleton_id for t in treenodes) n_skeletons = ClassInstance.objects.filter( project=self.project, id__in=treenode_skeleton_ids).count() neuron_links = ClassInstanceClassInstance.objects \ .filter(project=self.project, class_instance_a__in=treenode_skeleton_ids, \ relation=relations.get('model_of')) n_neuron_links = len(neuron_links) neurons = set([l.class_instance_b_id for l in neuron_links]) exported_tids = set(treenodes.values_list('id', flat=True)) logger.info("Exporting {} treenodes in {} skeletons and {} neurons".format( len(exported_tids), n_skeletons, len(neurons))) # Get current maximum concept ID cursor = connection.cursor() cursor.execute(""" SELECT MAX(id) FROM concept """) new_skeleton_id = cursor.fetchone()[0] + 1 new_neuron_id = new_skeleton_id + 1 new_model_of_id = new_skeleton_id + 2 new_concept_offset = 3 new_neuron_name_id = 1 if skeleton_id_constraints: if connector_ids: # Add addition placeholder treenodes connector_links = list(TreenodeConnector.objects \ .filter(project=self.project, connector__in=connector_ids) \ .exclude(skeleton_id__in=skeleton_id_constraints)) connector_tids = set(c.treenode_id for c in connector_links) extra_tids = connector_tids - exported_tids if self.original_placeholder_context: logger.info("Exporting %s placeholder nodes" % len(extra_tids)) else: logger.info("Exporting %s placeholder nodes with first new class instance ID %s" % (len(extra_tids), new_skeleton_id)) placeholder_treenodes = Treenode.objects.prefetch_related( 'treenodeconnector_set').filter(id__in=extra_tids) # Placeholder nodes will be transformed into root nodes of new # skeletons. new_skeleton_cis = [] new_neuron_cis = [] new_model_of_links = [] new_tc_links = [] for pt in placeholder_treenodes: pt.parent_id = None if not self.original_placeholder_context: original_skeleton_id = pt.skeleton_id pt.skeleton_id = new_skeleton_id # Add class instances for both the skeleton and neuron for # the placeholder node skeleton new_skeleton_ci = ClassInstance( id = new_skeleton_id, user_id=pt.user_id, creation_time=pt.creation_time, edition_time=pt.edition_time, project_id=pt.project_id, class_column_id=classes['skeleton'], name='Placeholder Skeleton ' + str(new_neuron_name_id)) new_neuron_ci = ClassInstance( id = new_neuron_id, user_id=pt.user_id, creation_time=pt.creation_time, edition_time=pt.edition_time, project_id=pt.project_id, class_column_id=classes['neuron'], name='Placeholder Neuron ' + str(new_neuron_name_id)) new_model_of_link = ClassInstanceClassInstance( id=new_model_of_id, user_id=pt.user_id, creation_time=pt.creation_time, edition_time=pt.edition_time, project_id=pt.project_id, relation_id=relations['model_of'], class_instance_a_id=new_skeleton_id, class_instance_b_id=new_neuron_id) tc_offset = 0 for tc in pt.treenodeconnector_set.all(): # Only export treenode connector links to connectors # that are exported. if tc.skeleton_id != original_skeleton_id or \ tc.connector_id not in connector_ids: continue new_tc_id = new_skeleton_id + new_concept_offset + 1 tc_offset += 1 new_treenode_connector = TreenodeConnector( id=new_tc_id, user_id=tc.user_id, creation_time=tc.creation_time, edition_time=tc.edition_time, project_id=tc.project_id, relation_id=tc.relation_id, treenode_id=pt.id, skeleton_id = new_skeleton_id, connector_id=tc.connector_id) new_tc_links.append(new_treenode_connector) effective_offset = new_concept_offset + tc_offset new_skeleton_id += effective_offset new_neuron_id += effective_offset new_model_of_id += effective_offset new_neuron_name_id += 1 new_skeleton_cis.append(new_skeleton_ci) new_neuron_cis.append(new_neuron_ci) new_model_of_links.append(new_model_of_link) if placeholder_treenodes and not self.original_placeholder_context: self.to_serialize.append(new_skeleton_cis) self.to_serialize.append(new_neuron_cis) self.to_serialize.append(new_model_of_links) if new_tc_links: self.to_serialize.append(new_tc_links) self.to_serialize.append(placeholder_treenodes) # Add additional skeletons and neuron-skeleton links if self.original_placeholder_context: # Original skeletons extra_skids = set(Treenode.objects.filter(id__in=extra_tids, project=self.project).values_list('skeleton_id', flat=True)) self.to_serialize.append(ClassInstance.objects.filter(id__in=extra_skids)) # Original skeleton model-of neuron links extra_links = ClassInstanceClassInstance.objects \ .filter(project=self.project, class_instance_a__in=extra_skids, relation=relations['model_of']) self.to_serialize.append(extra_links) # Original neurons extra_nids = extra_links.values_list('class_instance_b', flat=True) self.to_serialize.append(ClassInstance.objects.filter( project=self.project, id__in=extra_nids)) # Connector links self.to_serialize.append(connector_links) # Volumes if self.export_volumes: volumes = find_volumes(self.project.id, self.volume_annotations, True, True) volume_ids =[v['id'] for v in volumes] if volume_ids: volumes = Volume.objects.filter(pk__in=volume_ids, project_id=self.project.id) logger.info("Exporting {} volumes: {}".format( len(volumes), ', '.join(v.name for v in volumes))) self.to_serialize.append(volumes) else: logger.info("No volumes found to export") # Export users, either completely or in a reduced form seen_user_ids = set() # Find users involved in exported data for group in self.to_serialize: for o in group: if hasattr(o, 'user_id'): seen_user_ids.add(o.user_id) if hasattr(o, 'reviewer_id'): seen_user_ids.add(o.reviewer_id) if hasattr(o, 'editor_id'): seen_user_ids.add(o.editor_id) users = [ExportUser(id=u.id, username=u.username, password=u.password, first_name=u.first_name, last_name=u.last_name, email=u.email, date_joined=u.date_joined) \ for u in User.objects.filter(pk__in=seen_user_ids)] if self.export_users: logger.info("Exporting {} users: {}".format(len(users), ", ".join([u.username for u in users]))) self.to_serialize.append(users) else: # Export in reduced form reduced_users = [] for u in users: reduced_user = ReducedInfoUser(id=u.id, username=u.username, password=make_password(User.objects.make_random_password())) reduced_users.append(reduced_user) logger.info("Exporting {} users in reduced form with random passwords: {}".format(len(reduced_users), ", ".join([u.username for u in reduced_users]))) self.to_serialize.append(reduced_users)
def split_skeleton(request, project_id=None): """ The split is only possible if the neuron is not locked or if it is locked by the current user or if the current user belongs to the group of the user who locked it. Of course, the split is also possible if the current user is a super-user. Also, all reviews of the treenodes in the new neuron are updated to refer to the new skeleton. """ treenode_id = int(request.POST['treenode_id']) treenode = Treenode.objects.get(pk=treenode_id) skeleton_id = treenode.skeleton_id upstream_annotation_map = json.loads(request.POST.get('upstream_annotation_map')) downstream_annotation_map = json.loads(request.POST.get('downstream_annotation_map')) cursor = connection.cursor() # Check if the treenode is root! if not treenode.parent: return HttpResponse(json.dumps({'error': 'Can\'t split at the root node: it doesn\'t have a parent.'})) # Check if annotations are valid if not check_annotations_on_split(project_id, skeleton_id, frozenset(upstream_annotation_map.keys()), frozenset(downstream_annotation_map.keys())): raise Exception("Annotation distribution is not valid for splitting. " \ "One part has to keep the whole set of annotations!") skeleton = ClassInstance.objects.select_related('user').get(pk=skeleton_id) project_id=int(project_id) # retrieve neuron of this skeleton neuron = ClassInstance.objects.get( cici_via_b__relation__relation_name='model_of', cici_via_b__class_instance_a_id=skeleton_id) # Make sure the user has permissions to edit can_edit_class_instance_or_fail(request.user, neuron.id, 'neuron') # retrieve the id, parent_id of all nodes in the skeleton # with minimal ceremony cursor.execute(''' SELECT id, parent_id FROM treenode WHERE skeleton_id=%s ''' % skeleton_id) # no need to sanitize # build the networkx graph from it graph = nx.DiGraph() for row in cursor.fetchall(): graph.add_node( row[0] ) if row[1]: # edge from parent_id to id graph.add_edge( row[1], row[0] ) # find downstream nodes starting from target treenode_id # and generate the list of IDs to change, starting at treenode_id (inclusive) change_list = nx.bfs_tree(graph, treenode_id).nodes() if not change_list: # When splitting an end node, the bfs_tree doesn't return any nodes, # which is surprising, because when the splitted tree has 2 or more nodes # the node at which the split is made is included in the list. change_list.append(treenode_id) # create a new skeleton new_skeleton = ClassInstance() new_skeleton.name = 'Skeleton' new_skeleton.project_id = project_id new_skeleton.user = skeleton.user # The same user that owned the skeleton to split new_skeleton.class_column = Class.objects.get(class_name='skeleton', project_id=project_id) new_skeleton.save() new_skeleton.name = 'Skeleton {0}'.format( new_skeleton.id ) # This could be done with a trigger in the database new_skeleton.save() # Create new neuron new_neuron = ClassInstance() new_neuron.name = 'Neuron' new_neuron.project_id = project_id new_neuron.user = skeleton.user new_neuron.class_column = Class.objects.get(class_name='neuron', project_id=project_id) new_neuron.save() new_neuron.name = 'Neuron %s' % str(new_neuron.id) new_neuron.save() # Assign the skeleton to new neuron cici = ClassInstanceClassInstance() cici.class_instance_a = new_skeleton cici.class_instance_b = new_neuron cici.relation = Relation.objects.get(relation_name='model_of', project_id=project_id) cici.user = skeleton.user # The same user that owned the skeleton to split cici.project_id = project_id cici.save() # update skeleton_id of list in treenode table # This creates a lazy QuerySet that, upon calling update, returns a new QuerySet # that is then executed. It does NOT create an update SQL query for every treenode. tns = Treenode.objects.filter(id__in=change_list).update(skeleton=new_skeleton) # update the skeleton_id value of the treenode_connector table tc = TreenodeConnector.objects.filter( relation__relation_name__endswith = 'synaptic_to', treenode__in=change_list, ).update(skeleton=new_skeleton) # setting new root treenode's parent to null Treenode.objects.filter(id=treenode_id).update(parent=None, editor=request.user) # Update annotations of existing neuron to have only over set _update_neuron_annotations(project_id, request.user, neuron.id, upstream_annotation_map) # Update all reviews of the treenodes that are moved to a new neuron to # refer to the new skeleton. Review.objects.filter(treenode_id__in=change_list).update(skeleton=new_skeleton) # Update annotations of under skeleton _annotate_entities(project_id, [new_neuron.id], downstream_annotation_map) # Log the location of the node at which the split was done location = (treenode.location_x, treenode.location_y, treenode.location_z) insert_into_log(project_id, request.user.id, "split_skeleton", location, "Split skeleton with ID {0} (neuron: {1})".format( skeleton_id, neuron.name ) ) return HttpResponse(json.dumps({}), content_type='text/json')