def test_annotation_creation(self): self.fake_authentication() neuron_ids = [2365, 2381] # Expect entity 2365 and 2381 to be not annotated for nid in neuron_ids: aq = create_annotation_query(self.test_project_id, {'neuron_id': nid}) self.assertEqual(len(aq), 0) # Annotate both with the same annotation _annotate_entities(self.test_project_id, neuron_ids, {'myannotation': {'user_id': self.test_user_id}}) # Expect entity 2365 and 2381 to be annotated for nid in neuron_ids: aq = create_annotation_query(self.test_project_id, {'neuron_id': nid}) self.assertEqual(len(aq), 1) self.assertEqual(aq[0].name, 'myannotation') # Annotate both with the pattern annotation _annotate_entities(self.test_project_id, neuron_ids, {'pattern {n9} test-{n}-annotation': { 'user_id': self.test_user_id}}) # Expect entity 2365 and 2381 to be annotated aq = create_annotation_query(self.test_project_id, {'neuron_id': 2365}).order_by('name') self.assertEqual(len(aq), 2) self.assertEqual(aq[0].name, 'myannotation') self.assertEqual(aq[1].name, 'pattern 9 test-1-annotation') aq = create_annotation_query(self.test_project_id, {'neuron_id': 2381}).order_by('name') self.assertEqual(len(aq), 2) self.assertEqual(aq[0].name, 'myannotation') self.assertEqual(aq[1].name, 'pattern 10 test-2-annotation')
def test_rename_neuron_fail(self): self.fake_authentication() neuron_id = 362 # Lock this neuron for another user _annotate_entities(self.test_project_id, [neuron_id], {'locked': { 'user_id': 1 }}) count_logs = lambda: Log.objects.all().count() log_count = count_logs() old_name = ClassInstance.objects.get(id=neuron_id).name new_name = 'newname' self.assertFalse(old_name == new_name) url = '/%d/neurons/%s/rename' % (self.test_project_id, neuron_id) response = self.client.post(url, {'name': new_name}) self.assertEqual(response.status_code, 200) parsed_response = json.loads(response.content.decode('utf-8')) self.assertTrue('error' in parsed_response) self.assertTrue(parsed_response['error']) self.assertEqual(old_name, ClassInstance.objects.get(id=neuron_id).name) self.assertEqual(log_count, count_logs())
def test_annotation_creation(self): self.fake_authentication() neuron_ids = [2365, 2381] # Expect entity 2365 and 2381 to be not annotated for nid in neuron_ids: aq = create_annotation_query(self.test_project_id, {'neuron_id': nid}) self.assertEqual(len(aq), 0) # Annotate both with the same annotation _annotate_entities(self.test_project_id, neuron_ids, {'myannotation': self.test_user_id}) # Expect entity 2365 and 2381 to be annotated for nid in neuron_ids: aq = create_annotation_query(self.test_project_id, {'neuron_id': nid}) self.assertEqual(len(aq), 1) self.assertEqual(aq[0].name, 'myannotation') # Annotate both with the pattern annotation _annotate_entities(self.test_project_id, neuron_ids, {'pattern {n9} test-{n}-annotation': self.test_user_id}) # Expect entity 2365 and 2381 to be annotated aq = create_annotation_query(self.test_project_id, {'neuron_id': 2365}).order_by('name') self.assertEqual(len(aq), 2) self.assertEqual(aq[0].name, 'myannotation') self.assertEqual(aq[1].name, 'pattern 9 test-1-annotation') aq = create_annotation_query(self.test_project_id, {'neuron_id': 2381}).order_by('name') self.assertEqual(len(aq), 2) self.assertEqual(aq[0].name, 'myannotation') self.assertEqual(aq[1].name, 'pattern 10 test-2-annotation')
def test_export_compact_skeleton_with_annotations(self): self.fake_authentication() skeleton_id = 373 neuron_id = 374 _, new_annotations = _annotate_entities(self.test_project_id, [neuron_id], {'myannotation': {'user_id': self.test_user_id}}) new_annotation_link_id = new_annotations.pop() url = '/%d/%d/1/1/compact-skeleton' % (self.test_project_id, skeleton_id) response = self.client.get(url, { 'with_annotations': True }) self.assertEqual(response.status_code, 200) parsed_response = json.loads(response.content.decode('utf-8')) expected_response = [ [[377, None, 3, 7620.0, 2890.0, 0.0, -1.0, 5], [403, 377, 3, 7840.0, 2380.0, 0.0, -1.0, 5], [405, 377, 3, 7390.0, 3510.0, 0.0, -1.0, 5], [407, 405, 3, 7080.0, 3960.0, 0.0, -1.0, 5], [409, 407, 3, 6630.0, 4330.0, 0.0, -1.0, 5]], [[377, 356, 1, 6730.0, 2700.0, 0.0], [409, 421, 1, 6260.0, 3990.0, 0.0]], {"uncertain end": [403]}, [], [[new_annotation_link_id]]] self.assertEqual(len(parsed_response), len(expected_response)) six.assertCountEqual(self, parsed_response[0], expected_response[0]) six.assertCountEqual(self, parsed_response[1], expected_response[1]) self.assertEqual(parsed_response[2], expected_response[2]) self.assertEqual(parsed_response[3], expected_response[3]) self.assertEqual(parsed_response[4], expected_response[4])
def test_export_compact_skeleton_with_annotations(self): self.fake_authentication() skeleton_id = 373 neuron_id = 374 _, new_annotations = _annotate_entities( self.test_project_id, [neuron_id], {'myannotation': { 'user_id': self.test_user_id }}) new_annotation_link_id = new_annotations.pop() url = '/%d/%d/1/1/compact-skeleton' % (self.test_project_id, skeleton_id) response = self.client.get(url, {'with_annotations': True}) self.assertEqual(response.status_code, 200) parsed_response = json.loads(response.content.decode('utf-8')) expected_response = [[[377, None, 3, 7620.0, 2890.0, 0.0, -1.0, 5], [403, 377, 3, 7840.0, 2380.0, 0.0, -1.0, 5], [405, 377, 3, 7390.0, 3510.0, 0.0, -1.0, 5], [407, 405, 3, 7080.0, 3960.0, 0.0, -1.0, 5], [409, 407, 3, 6630.0, 4330.0, 0.0, -1.0, 5]], [[377, 356, 1, 6730.0, 2700.0, 0.0], [409, 421, 1, 6260.0, 3990.0, 0.0]], { "uncertain end": [403] }, [], [[new_annotation_link_id]]] self.assertEqual(len(parsed_response), len(expected_response)) six.assertCountEqual(self, parsed_response[0], expected_response[0]) six.assertCountEqual(self, parsed_response[1], expected_response[1]) self.assertEqual(parsed_response[2], expected_response[2]) self.assertEqual(parsed_response[3], expected_response[3]) self.assertEqual(parsed_response[4], expected_response[4])
def test_rename_neuron_fail(self): self.fake_authentication() neuron_id = 362 # Lock this neuron for another user _annotate_entities(self.test_project_id, [neuron_id], {'locked': 1}) count_logs = lambda: Log.objects.all().count() log_count = count_logs() old_name = ClassInstance.objects.get(id=neuron_id).name new_name = 'newname' self.assertFalse(old_name == new_name) url = '/%d/neurons/%s/rename' % (self.test_project_id, neuron_id) response = self.client.post(url, {'name': new_name}) self.assertEqual(response.status_code, 200) parsed_response = json.loads(response.content) self.assertTrue('error' in parsed_response) self.assertTrue(parsed_response['error']) self.assertEqual(old_name, ClassInstance.objects.get(id=neuron_id).name) self.assertEqual(log_count, count_logs())
def split_skeleton(request, project_id=None): """ The split is only possible if the neuron is not locked or if it is locked by the current user or if the current user belongs to the group of the user who locked it. Of course, the split is also possible if the current user is a super-user. """ treenode_id = int(request.POST['treenode_id']) treenode = Treenode.objects.get(pk=treenode_id) skeleton_id = treenode.skeleton_id upstream_annotation_set = frozenset([v for k,v in request.POST.iteritems() if k.startswith('upstream_annotation_set[')]) downstream_annotation_set = frozenset([v for k,v in request.POST.iteritems() if k.startswith('downstream_annotation_set[')]) cursor = connection.cursor() # Check if the treenode is root! if not treenode.parent: return HttpResponse(json.dumps({'error': 'Can\'t split at the root node: it doesn\'t have a parent.'})) # Check if annotations are valid if not check_annotations_on_split(project_id, skeleton_id, upstream_annotation_set, downstream_annotation_set): raise Exception("Annotation distribution is not valid for splitting. " \ "One part has to keep the whole set of annotations!") skeleton = ClassInstance.objects.select_related('user').get(pk=skeleton_id) project_id=int(project_id) # retrieve neuron of this skeleton neuron = ClassInstance.objects.get( cici_via_b__relation__relation_name='model_of', cici_via_b__class_instance_a_id=skeleton_id) # Make sure the user has permissions to edit can_edit_class_instance_or_fail(request.user, neuron.id, 'neuron') # retrieve the id, parent_id of all nodes in the skeleton # with minimal ceremony cursor.execute(''' SELECT id, parent_id FROM treenode WHERE skeleton_id=%s ''' % skeleton_id) # no need to sanitize # build the networkx graph from it graph = nx.DiGraph() for row in cursor.fetchall(): graph.add_node( row[0] ) if row[1]: # edge from parent_id to id graph.add_edge( row[1], row[0] ) # find downstream nodes starting from target treenode_id # and generate the list of IDs to change, starting at treenode_id (inclusive) change_list = nx.bfs_tree(graph, treenode_id).nodes() if not change_list: # When splitting an end node, the bfs_tree doesn't return any nodes, # which is surprising, because when the splitted tree has 2 or more nodes # the node at which the split is made is included in the list. change_list.append(treenode_id) # create a new skeleton new_skeleton = ClassInstance() new_skeleton.name = 'Skeleton' new_skeleton.project_id = project_id new_skeleton.user = skeleton.user # The same user that owned the skeleton to split new_skeleton.class_column = Class.objects.get(class_name='skeleton', project_id=project_id) new_skeleton.save() new_skeleton.name = 'Skeleton {0}'.format( new_skeleton.id ) # This could be done with a trigger in the database new_skeleton.save() # Create new neuron new_neuron = ClassInstance() new_neuron.name = 'Neuron' new_neuron.project_id = project_id new_neuron.user = skeleton.user new_neuron.class_column = Class.objects.get(class_name='neuron', project_id=project_id) new_neuron.save() new_neuron.name = 'Neuron %s' % str(new_neuron.id) new_neuron.save() # Assign the skeleton to new neuron cici = ClassInstanceClassInstance() cici.class_instance_a = new_skeleton cici.class_instance_b = new_neuron cici.relation = Relation.objects.get(relation_name='model_of', project_id=project_id) cici.user = skeleton.user # The same user that owned the skeleton to split cici.project_id = project_id cici.save() # update skeleton_id of list in treenode table # This creates a lazy QuerySet that, upon calling update, returns a new QuerySet # that is then executed. It does NOT create an update SQL query for every treenode. tns = Treenode.objects.filter(id__in=change_list).update(skeleton=new_skeleton) # update the skeleton_id value of the treenode_connector table tc = TreenodeConnector.objects.filter( relation__relation_name__endswith = 'synaptic_to', treenode__in=change_list, ).update(skeleton=new_skeleton) # setting new root treenode's parent to null Treenode.objects.filter(id=treenode_id).update(parent=None, editor=request.user) # Update annotations of existing neuron to have only over set _update_neuron_annotations(project_id, request.user, neuron.id, upstream_annotation_set) # Update annotations of under skeleton _annotate_entities(project_id, request.user, [new_neuron.id], downstream_annotation_set) # Log the location of the node at which the split was done insert_into_log( project_id, request.user.id, "split_skeleton", treenode.location, "Split skeleton with ID {0} (neuron: {1})".format( skeleton_id, neuron.name ) ) return HttpResponse(json.dumps({}), mimetype='text/json')