def _export_review_skeleton(project_id=None, skeleton_id=None, format=None): """ Returns a list of segments for the requested skeleton. Each segment contains information about the review status of this part of the skeleton. """ # Get all treenodes of the requested skeleton treenodes = Treenode.objects.filter(skeleton_id=skeleton_id).values_list('id', 'location', 'parent_id') # Get all reviews for the requested skeleton reviews = get_treenodes_to_reviews(skeleton_ids=[skeleton_id]) # Add each treenode to a networkx graph and attach reviewer information to # it. g = nx.DiGraph() reviewed = set() for t in treenodes: loc = Double3D.from_str(t[1]) # While at it, send the reviewer IDs, which is useful to iterate fwd # to the first unreviewed node in the segment. g.add_node(t[0], {'id': t[0], 'x': loc.x, 'y': loc.y, 'z': loc.z, 'rids': reviews[t[0]]}) if reviews[t[0]]: reviewed.add(t[0]) if t[2]: # if parent g.add_edge(t[2], t[0]) # edge from parent to child else: root_id = t[0] # Create all sequences, as long as possible and always from end towards root distances = edge_count_to_root(g, root_node=root_id) # distance in number of edges from root seen = set() sequences = [] # Iterate end nodes sorted from highest to lowest distance to root endNodeIDs = (nID for nID in g.nodes() if 0 == len(g.successors(nID))) for nodeID in sorted(endNodeIDs, key=distances.get, reverse=True): sequence = [g.node[nodeID]] parents = g.predecessors(nodeID) while parents: parentID = parents[0] sequence.append(g.node[parentID]) if parentID in seen: break seen.add(parentID) parents = g.predecessors(parentID) if len(sequence) > 1: sequences.append(sequence) # Calculate status segments = [] for sequence in sorted(sequences, key=len, reverse=True): segments.append({ 'id': len(segments), 'sequence': sequence, 'status': '%.2f' % (100.0 * sum(1 for node in sequence if node['id'] in reviewed) / len(sequence)), 'nr_nodes': len(sequence) }) return segments
def fetch_treenodes(request, project_id=None, skeleton_id=None, with_reviewers=None): """ Fetch the topology only, optionally with the reviewer IDs. """ skeleton_id = int(skeleton_id) cursor = connection.cursor() cursor.execute(''' SELECT id, parent_id FROM treenode WHERE skeleton_id = %s ''' % skeleton_id) if with_reviewers: reviews = get_treenodes_to_reviews(skeleton_ids=[skeleton_id]) treenode_data = tuple([r[0], r[1], reviews.get(r[0], [])] \ for r in cursor.fetchall()) else: treenode_data = tuple(cursor.fetchall()) return HttpResponse(json.dumps(treenode_data))
def _skeleton_graph(project_id, skeleton_ids, confidence_threshold, bandwidth, expand, compute_risk, cable_spread, path_confluence): """ Assumes all skeleton_ids belong to project_id. """ skeletons_string = ",".join(str(int(x)) for x in skeleton_ids) cursor = connection.cursor() # Fetch all treenodes of all skeletons cursor.execute(''' SELECT id, parent_id, confidence, skeleton_id, location_x, location_y, location_z FROM treenode WHERE skeleton_id IN (%s) ''' % skeletons_string) rows = tuple(cursor.fetchall()) # Each skeleton is represented with a DiGraph arbors = defaultdict(nx.DiGraph) # Get reviewers for the requested skeletons reviews = get_treenodes_to_reviews(skeleton_ids=skeleton_ids) # Create a DiGraph for every skeleton for row in rows: arbors[row[3]].add_node(row[0], {'reviewer_ids': reviews.get(row[0], [])}) # Dictionary of skeleton IDs vs list of DiGraph instances arbors = split_by_confidence_and_add_edges(confidence_threshold, arbors, rows) # Fetch all synapses relations = {'presynaptic_to': -1, 'postsynaptic_to': -1} for r in Relation.objects.filter( relation_name__in=('presynaptic_to', 'postsynaptic_to'), project_id=project_id).values_list('relation_name', 'id'): relations[r[0]] = r[1] cursor.execute(''' SELECT connector_id, relation_id, treenode_id, skeleton_id FROM treenode_connector WHERE skeleton_id IN (%s) ''' % skeletons_string) connectors = defaultdict(partial(defaultdict, list)) skeleton_synapses = defaultdict(partial(defaultdict, list)) for row in cursor.fetchall(): connectors[row[0]][row[1]].append((row[2], row[3])) skeleton_synapses[row[3]][row[1]].append(row[2]) # Cluster by synapses minis = defaultdict(list) # skeleton_id vs list of minified graphs locations = None whole_arbors = arbors if expand and bandwidth > 0: locations = {row[0]: (row[4], row[5], row[6]) for row in rows} treenode_connector = defaultdict(list) for connector_id, pp in connectors.iteritems(): for treenode_id in chain.from_iterable( pp[relations['presynaptic_to']]): treenode_connector[treenode_id].append( (connector_id, "presynaptic_to")) for treenode_id in chain.from_iterable( pp[relations['postsynaptic_to']]): treenode_connector[treenode_id].append( (connector_id, "postsynaptic_to")) arbors_to_expand = { skid: ls for skid, ls in arbors.iteritems() if skid in expand } expanded_arbors, minis = split_by_synapse_domain( bandwidth, locations, arbors_to_expand, treenode_connector, minis) arbors.update(expanded_arbors) # Obtain neuron names cursor.execute(''' SELECT cici.class_instance_a, ci.name FROM class_instance ci, class_instance_class_instance cici, relation r WHERE cici.class_instance_a IN (%s) AND cici.class_instance_b = ci.id AND cici.relation_id = r.id AND r.relation_name = 'model_of' ''' % skeletons_string) names = dict(cursor.fetchall()) # A DiGraph representing the connections between the arbors (every node is an arbor) circuit = nx.DiGraph() for skid, digraphs in arbors.iteritems(): base_label = names[skid] tag = len(digraphs) > 1 i = 0 for g in digraphs: if g.number_of_nodes() == 0: #print "no nodes in g, from skeleton ID #%s" % skid continue if tag: label = "%s [%s]" % (base_label, i + 1) else: label = base_label circuit.add_node( g, { 'id': "%s_%s" % (skid, i + 1), 'label': label, 'skeleton_id': skid, 'node_count': len(g), 'node_reviewed_count': sum( 1 for v in g.node.itervalues() if 0 != len(v.get('reviewer_ids', [])) ), # TODO when bandwidth > 0, not all nodes are included. They will be included when the bandwidth is computed with an O(n) algorithm rather than the current O(n^2) 'branch': False }) i += 1 # Define edges between arbors, with number of synapses as an edge property for c in connectors.itervalues(): for pre_treenode, pre_skeleton in c[relations['presynaptic_to']]: for pre_arbor in arbors.get(pre_skeleton, ()): if pre_treenode in pre_arbor: # Found the DiGraph representing an arbor derived from the skeleton to which the presynaptic treenode belongs. for post_treenode, post_skeleton in c[ relations['postsynaptic_to']]: for post_arbor in arbors.get(post_skeleton, ()): if post_treenode in post_arbor: # Found the DiGraph representing an arbor derived from the skeleton to which the postsynaptic treenode belongs. edge_props = circuit.get_edge_data( pre_arbor, post_arbor) if edge_props: edge_props['c'] += 1 edge_props['pre_treenodes'].append( pre_treenode) edge_props['post_treenodes'].append( post_treenode) else: circuit.add_edge( pre_arbor, post_arbor, { 'c': 1, 'pre_treenodes': [pre_treenode], 'post_treenodes': [post_treenode], 'arrow': 'triangle', 'directed': True }) break break if compute_risk and bandwidth <= 0: # Compute synapse risk: # Compute synapse centrality of every node in every arbor that has synapses for skeleton_id, arbors in whole_arbors.iteritems(): synapses = skeleton_synapses[skeleton_id] pre = synapses[relations['presynaptic_to']] post = synapses[relations['postsynaptic_to']] for arbor in arbors: # The subset of synapses that belong to the fraction of the original arbor pre_sub = tuple(treenodeID for treenodeID in pre if treenodeID in arbor) post_sub = tuple(treenodeID for treenodeID in post if treenodeID in arbor) totalInputs = len(pre_sub) totalOutputs = len(post_sub) tc = {treenodeID: Counts() for treenodeID in arbor} for treenodeID in pre_sub: tc[treenodeID].outputs += 1 for treenodeID in post_sub: tc[treenodeID].inputs += 1 # Update the nPossibleIOPaths field in the Counts instance of each treenode _node_centrality_by_synapse(arbor, tc, totalOutputs, totalInputs) arbor.treenode_synapse_counts = tc if not locations: locations = {row[0]: (row[4], row[5], row[6]) for row in rows} # Estimate the risk factor of the edge between two arbors, # as a function of the number of synapses and their location within the arbor. # Algorithm by Casey Schneider-Mizell # Implemented by Albert Cardona for pre_arbor, post_arbor, edge_props in circuit.edges_iter(data=True): if pre_arbor == post_arbor: # Signal autapse edge_props['risk'] = -2 continue try: spanning = spanning_tree(post_arbor, edge_props['post_treenodes']) #for arbor in whole_arbors[circuit[post_arbor]['skeleton_id']]: # if post_arbor == arbor: # tc = arbor.treenode_synapse_counts tc = post_arbor.treenode_synapse_counts count = spanning.number_of_nodes() if count < 3: median_synapse_centrality = sum( tc[treenodeID].synapse_centrality for treenodeID in spanning.nodes_iter()) / count else: median_synapse_centrality = sorted( tc[treenodeID].synapse_centrality for treenodeID in spanning.nodes_iter())[count / 2] cable = cable_length(spanning, locations) if -1 == median_synapse_centrality: # Signal not computable edge_props['risk'] = -1 else: edge_props['risk'] = 1.0 / sqrt( pow(cable / cable_spread, 2) + pow(median_synapse_centrality / path_confluence, 2) ) # NOTE: should subtract 1 from median_synapse_centrality, but not doing it here to avoid potential divisions by zero except Exception as e: print >> sys.stderr, e # Signal error when computing edge_props['risk'] = -3 if expand and bandwidth > 0: # Add edges between circuit nodes that represent different domains of the same neuron for skeleton_id, list_mini in minis.iteritems(): for mini in list_mini: for node in mini.nodes_iter(): g = mini.node[node]['g'] if 1 == len(g) and g.nodes_iter( data=True).next()[1].get('branch'): # A branch node that was preserved in the minified arbor circuit.add_node( g, { 'id': '%s-%s' % (skeleton_id, node), 'skeleton_id': skeleton_id, 'label': "", # "%s [%s]" % (names[skeleton_id], node), 'node_count': 1, 'branch': True }) for node1, node2 in mini.edges_iter(): g1 = mini.node[node1]['g'] g2 = mini.node[node2]['g'] circuit.add_edge(g1, g2, { 'c': 10, 'arrow': 'none', 'directed': False }) return circuit
def _skeleton_for_3d_viewer(skeleton_id, project_id, with_connectors=True, lean=0, all_field=False): """ with_connectors: when False, connectors are not returned lean: when not zero, both connectors and tags are returned as empty arrays. """ skeleton_id = int(skeleton_id) # sanitize cursor = connection.cursor() # Fetch the neuron name cursor.execute( '''SELECT name FROM class_instance ci, class_instance_class_instance cici WHERE cici.class_instance_a = %s AND cici.class_instance_b = ci.id ''' % skeleton_id) row = cursor.fetchone() if not row: # Check that the skeleton exists cursor.execute('''SELECT id FROM class_instance WHERE id=%s''' % skeleton_id) if not cursor.fetchone(): raise Exception("Skeleton #%s doesn't exist!" % skeleton_id) else: raise Exception("No neuron found for skeleton #%s" % skeleton_id) name = row[0] if all_field: added_fields = ', creation_time, edition_time' else: added_fields = '' # Fetch all nodes, with their tags if any cursor.execute( '''SELECT id, parent_id, user_id, location_x, location_y, location_z, radius, confidence %s FROM treenode WHERE skeleton_id = %s ''' % (added_fields, skeleton_id) ) # array of properties: id, parent_id, user_id, x, y, z, radius, confidence nodes = tuple(cursor.fetchall()) tags = defaultdict(list) # node ID vs list of tags connectors = [] # Get all reviews for this skeleton if all_field: reviews = get_treenodes_to_reviews_with_time(skeleton_ids=[skeleton_id]) else: reviews = get_treenodes_to_reviews(skeleton_ids=[skeleton_id]) if 0 == lean: # meaning not lean # Text tags cursor.execute("SELECT id FROM relation WHERE project_id=%s AND relation_name='labeled_as'" % int(project_id)) labeled_as = cursor.fetchall()[0][0] cursor.execute( ''' SELECT treenode_class_instance.treenode_id, class_instance.name FROM treenode, class_instance, treenode_class_instance WHERE treenode.skeleton_id = %s AND treenode.id = treenode_class_instance.treenode_id AND treenode_class_instance.class_instance_id = class_instance.id AND treenode_class_instance.relation_id = %s ''' % (skeleton_id, labeled_as)) for row in cursor.fetchall(): tags[row[1]].append(row[0]) if with_connectors: if all_field: added_fields = ', c.creation_time' else: added_fields = '' # Fetch all connectors with their partner treenode IDs cursor.execute( ''' SELECT tc.treenode_id, tc.connector_id, r.relation_name, c.location_x, c.location_y, c.location_z %s FROM treenode_connector tc, connector c, relation r WHERE tc.skeleton_id = %s AND tc.connector_id = c.id AND tc.relation_id = r.id ''' % (added_fields, skeleton_id) ) # Above, purposefully ignoring connector tags. Would require a left outer join on the inner join of connector_class_instance and class_instance, and frankly connector tags are pointless in the 3d viewer. # List of (treenode_id, connector_id, relation_id, x, y, z)n with relation_id replaced by 0 (presynaptic) or 1 (postsynaptic) # 'presynaptic_to' has an 'r' at position 1: for row in cursor.fetchall(): x, y, z = imap(float, (row[3], row[4], row[5])) connectors.append((row[0], row[1], 0 if 'r' == row[2][1] else 1, x, y, z, row[6])) return name, nodes, tags, connectors, reviews return name, nodes, tags, connectors, reviews
def _skeleton_graph(project_id, skeleton_ids, confidence_threshold, bandwidth, expand, compute_risk, cable_spread, path_confluence, pre_rel='presynaptic_to', post_rel='postsynaptic_to'): """ Assumes all skeleton_ids belong to project_id. """ skeletons_string = ",".join(str(int(x)) for x in skeleton_ids) cursor = connection.cursor() # Fetch all treenodes of all skeletons cursor.execute(''' SELECT id, parent_id, confidence, skeleton_id, location_x, location_y, location_z FROM treenode WHERE skeleton_id IN (%s) ''' % skeletons_string) rows = tuple(cursor.fetchall()) # Each skeleton is represented with a DiGraph arbors = defaultdict(nx.DiGraph) # Get reviewers for the requested skeletons reviews = get_treenodes_to_reviews(skeleton_ids=skeleton_ids) # Create a DiGraph for every skeleton for row in rows: arbors[row[3]].add_node(row[0], {'reviewer_ids': reviews.get(row[0], [])}) # Dictionary of skeleton IDs vs list of DiGraph instances arbors = split_by_confidence_and_add_edges(confidence_threshold, arbors, rows) # Fetch all synapses relations = get_relation_to_id_map(project_id, cursor=cursor) cursor.execute(''' SELECT connector_id, relation_id, treenode_id, skeleton_id FROM treenode_connector WHERE skeleton_id IN (%s) AND (relation_id = %s OR relation_id = %s) ''' % (skeletons_string, relations[pre_rel], relations[post_rel])) connectors = defaultdict(partial(defaultdict, list)) skeleton_synapses = defaultdict(partial(defaultdict, list)) for row in cursor.fetchall(): connectors[row[0]][row[1]].append((row[2], row[3])) skeleton_synapses[row[3]][row[1]].append(row[2]) # Cluster by synapses minis = defaultdict(list) # skeleton_id vs list of minified graphs locations = None whole_arbors = arbors if expand and bandwidth > 0: locations = {row[0]: (row[4], row[5], row[6]) for row in rows} treenode_connector = defaultdict(list) for connector_id, pp in connectors.items(): for treenode_id in chain.from_iterable(pp[relations[pre_rel]]): treenode_connector[treenode_id].append((connector_id, pre_rel)) for treenode_id in chain.from_iterable(pp[relations[post_rel]]): treenode_connector[treenode_id].append((connector_id, post_rel)) arbors_to_expand = {skid: ls for skid, ls in arbors.items() if skid in expand} expanded_arbors, minis = split_by_synapse_domain(bandwidth, locations, arbors_to_expand, treenode_connector, minis) arbors.update(expanded_arbors) # Obtain neuron names cursor.execute(''' SELECT cici.class_instance_a, ci.name FROM class_instance ci, class_instance_class_instance cici WHERE cici.class_instance_a IN (%s) AND cici.class_instance_b = ci.id AND cici.relation_id = %s ''' % (skeletons_string, relations['model_of'])) names = dict(cursor.fetchall()) # A DiGraph representing the connections between the arbors (every node is an arbor) circuit = nx.DiGraph() for skid, digraphs in arbors.items(): base_label = names[skid] tag = len(digraphs) > 1 i = 0 for g in digraphs: if g.number_of_nodes() == 0: continue if tag: label = "%s [%s]" % (base_label, i+1) else: label = base_label circuit.add_node(g, {'id': "%s_%s" % (skid, i+1), 'label': label, 'skeleton_id': skid, 'node_count': len(g), 'node_reviewed_count': sum(1 for v in g.node.values() if 0 != len(v.get('reviewer_ids', []))), # TODO when bandwidth > 0, not all nodes are included. They will be included when the bandwidth is computed with an O(n) algorithm rather than the current O(n^2) 'branch': False}) i += 1 # Define edges between arbors, with number of synapses as an edge property for c in connectors.values(): for pre_treenode, pre_skeleton in c[relations[pre_rel]]: for pre_arbor in arbors.get(pre_skeleton, ()): if pre_treenode in pre_arbor: # Found the DiGraph representing an arbor derived from the skeleton to which the presynaptic treenode belongs. for post_treenode, post_skeleton in c[relations[post_rel]]: for post_arbor in arbors.get(post_skeleton, ()): if post_treenode in post_arbor: # Found the DiGraph representing an arbor derived from the skeleton to which the postsynaptic treenode belongs. edge_props = circuit.get_edge_data(pre_arbor, post_arbor) if edge_props: edge_props['c'] += 1 edge_props['pre_treenodes'].append(pre_treenode) edge_props['post_treenodes'].append(post_treenode) else: circuit.add_edge(pre_arbor, post_arbor, {'c': 1, 'pre_treenodes': [pre_treenode], 'post_treenodes': [post_treenode], 'arrow': 'triangle', 'directed': True}) break break if compute_risk and bandwidth <= 0: # Compute synapse risk: # Compute synapse centrality of every node in every arbor that has synapses for skeleton_id, arbors in whole_arbors.items(): synapses = skeleton_synapses[skeleton_id] pre = synapses[relations[pre_rel]] post = synapses[relations[post_rel]] for arbor in arbors: # The subset of synapses that belong to the fraction of the original arbor pre_sub = tuple(treenodeID for treenodeID in pre if treenodeID in arbor) post_sub = tuple(treenodeID for treenodeID in post if treenodeID in arbor) totalInputs = len(pre_sub) totalOutputs = len(post_sub) tc = {treenodeID: Counts() for treenodeID in arbor} for treenodeID in pre_sub: tc[treenodeID].outputs += 1 for treenodeID in post_sub: tc[treenodeID].inputs += 1 # Update the nPossibleIOPaths field in the Counts instance of each treenode _node_centrality_by_synapse(arbor, tc, totalOutputs, totalInputs) arbor.treenode_synapse_counts = tc if not locations: locations = {row[0]: (row[4], row[5], row[6]) for row in rows} # Estimate the risk factor of the edge between two arbors, # as a function of the number of synapses and their location within the arbor. # Algorithm by Casey Schneider-Mizell # Implemented by Albert Cardona for pre_arbor, post_arbor, edge_props in circuit.edges_iter(data=True): if pre_arbor == post_arbor: # Signal autapse edge_props['risk'] = -2 continue try: spanning = spanning_tree(post_arbor, edge_props['post_treenodes']) #for arbor in whole_arbors[circuit[post_arbor]['skeleton_id']]: # if post_arbor == arbor: # tc = arbor.treenode_synapse_counts tc = post_arbor.treenode_synapse_counts count = spanning.number_of_nodes() if count < 3: median_synapse_centrality = sum(tc[treenodeID].synapse_centrality for treenodeID in spanning.nodes_iter()) / count else: median_synapse_centrality = sorted(tc[treenodeID].synapse_centrality for treenodeID in spanning.nodes_iter())[count / 2] cable = cable_length(spanning, locations) if -1 == median_synapse_centrality: # Signal not computable edge_props['risk'] = -1 else: edge_props['risk'] = 1.0 / sqrt(pow(cable / cable_spread, 2) + pow(median_synapse_centrality / path_confluence, 2)) # NOTE: should subtract 1 from median_synapse_centrality, but not doing it here to avoid potential divisions by zero except Exception as e: logging.getLogger(__name__).error(e) # Signal error when computing edge_props['risk'] = -3 if expand and bandwidth > 0: # Add edges between circuit nodes that represent different domains of the same neuron for skeleton_id, list_mini in minis.items(): for mini in list_mini: for node in mini.nodes_iter(): g = mini.node[node]['g'] if 1 == len(g) and next(g.nodes_iter(data=True))[1].get('branch'): # A branch node that was preserved in the minified arbor circuit.add_node(g, {'id': '%s-%s' % (skeleton_id, node), 'skeleton_id': skeleton_id, 'label': "", # "%s [%s]" % (names[skeleton_id], node), 'node_count': 1, 'branch': True}) for node1, node2 in mini.edges_iter(): g1 = mini.node[node1]['g'] g2 = mini.node[node2]['g'] circuit.add_edge(g1, g2, {'c': 10, 'arrow': 'none', 'directed': False}) return circuit
def list_treenode_table(request, project_id=None): stack_id = request.POST.get('stack_id', None) specified_skeleton_count = request.POST.get('skeleton_nr', 0) display_start = request.POST.get('iDisplayStart', 0) display_length = request.POST.get('iDisplayLength', -1) should_sort = request.POST.get('iSortCol_0', None) filter_nodetype = request.POST.get('sSearch_1', None) filter_labels = request.POST.get('sSearch_2', None) relation_map = get_relation_to_id_map(project_id) response_on_error = '' try: def search_query_is_empty(): if specified_skeleton_count == 0: return True first_skeleton_id = request.POST.get('skeleton_0', None) if first_skeleton_id is None: return True elif upper(first_skeleton_id) in ['NONE', 'NULL']: return True return False if search_query_is_empty(): return HttpResponse( json.dumps({ 'iTotalRecords': 0, 'iTotalDisplayRecords': 0, 'aaData': [] })) else: response_on_error = 'Could not fetch %s skeleton IDs.' % \ specified_skeleton_count skeleton_ids = [int(request.POST.get('skeleton_%s' % i, 0)) \ for i in range(int(specified_skeleton_count))] if should_sort: column_count = int(request.POST.get('iSortingCols', 0)) sorting_directions = [request.POST.get('sSortDir_%d' % d) \ for d in range(column_count)] sorting_directions = map(lambda d: \ '-' if upper(d) == 'DESC' else '', sorting_directions) fields = [ 'tid', 'type', '"treenode"."labels"', 'confidence', 'x', 'y', 'z', '"treenode"."section"', 'radius', 'username', 'last_modified' ] # TODO type field not supported. sorting_index = [int(request.POST.get('iSortCol_%d' % d)) \ for d in range(column_count)] sorting_cols = map(lambda i: fields[i], sorting_index) response_on_error = 'Could not get the list of treenodes.' t = Treenode.objects \ .filter( project=project_id, skeleton_id__in=skeleton_ids) \ .extra( tables=['auth_user'], where=[ '"treenode"."user_id" = "auth_user"."id"'], select={ 'tid': '"treenode"."id"', 'radius': '"treenode"."radius"', 'confidence': '"treenode"."confidence"', 'parent_id': '"treenode"."parent_id"', 'user_id': '"treenode"."user_id"', 'edition_time': '"treenode"."edition_time"', 'x': '"treenode"."location_x"', 'y': '"treenode"."location_y"', 'z': '"treenode"."location_z"', 'username': '******', 'last_modified': 'to_char("treenode"."edition_time", \'DD-MM-YYYY HH24:MI\')' }) \ .distinct() # Rationale for using .extra(): # Since we don't use .order_by() for ordering, extra fields are not # included in the SELECT statement, and so .distinct() will work as # intended. See http://tinyurl.com/dj-distinct if should_sort: t = t.extra(order_by=[di + col \ for (di, col) in zip(sorting_directions, sorting_cols)]) if int(display_length) == -1: treenodes = list(t[display_start:]) else: treenodes = list(t[display_start:display_start + display_length]) # The number of results to be displayed should include items that are # filtered out. row_count = len(treenodes) # Filter out irrelevant treenodes if a label has been specified if 'labeled_as' in relation_map: response_on_error = 'Could not retrieve labels for project.' project_lables = TreenodeClassInstance.objects.filter( project=project_id, relation=relation_map['labeled_as']).values( 'treenode', 'class_instance__name') labels_by_treenode = {} # Key: Treenode ID, Value: List of labels. for label in project_lables: if label['treenode'] not in labels_by_treenode: labels_by_treenode[label['treenode']] = [ label['class_instance__name'] ] else: labels_by_treenode[label['treenode']].append( label['class_instance__name']) if filter_labels: def label_filter(treenode): if treenode.id not in labels_by_treenode: return False return upper(filter_labels) in upper(' '.join( labels_by_treenode[treenode.tid])) treenodes = filter(label_filter, treenodes) # Filter out irrelevant treenodes if a node type has been specified. # Count treenode's children to derive treenode types. The number of # children a treenode has determines its type. Types: # R : root (parent = null) # S : slab (has one child) # B : branch (has more than one child) # L : leaf (has no children) # X : undefined (uh oh!) if 0 == display_start and -1 == display_length: # All nodes are loaded: determine child_count from loaded nodes child_count = {} for treenode in treenodes: if treenode.parent is None: continue n_children = child_count.get(treenode.parent_id, 0) child_count[treenode.parent_id] = n_children + 1 else: # Query for parents response_on_error = 'Could not retrieve treenode parents.' child_count_query = Treenode.objects.filter( project=project_id, skeleton_id__in=skeleton_ids).annotate( child_count=Count('children')) child_count = {} for treenode in child_count_query: child_count[treenode.id] = treenode.child_count # Determine type for treenode in treenodes: if None == treenode.parent_id: treenode.nodetype = 'R' # Root continue children = child_count.get(treenode.tid, 0) if children == 1: treenode.nodetype = 'S' # Slab elif children == 0: treenode.nodetype = 'L' # Leaf elif children > 1: treenode.nodetype = 'B' # Branch else: treenode.nodetype = 'X' # Unknown, can never happen # Now that we've assigned node types, filter based on them: if filter_nodetype: filter_nodetype = upper(filter_nodetype) treenodes = [t for t in treenodes if t.nodetype in filter_nodetype] users = dict(User.objects.all().values_list('id', 'username')) users[-1] = "None" # Rather than AnonymousUser # Get all reviews for the current treenode set treenode_ids = [t.id for t in treenodes] treenode_to_reviews = get_treenodes_to_reviews(treenode_ids, umap=lambda r: users[r]) if stack_id: response_on_error = 'Could not retrieve resolution and translation ' \ 'parameters for project.' resolution = get_object_or_404(Stack, id=int(stack_id)).resolution translation = get_object_or_404(ProjectStack, stack=int(stack_id), project=project_id).translation else: resolution = Double3D(1.0, 1.0, 1.0) translation = Double3D(0.0, 0.0, 0.0) def formatTreenode(tn): row = [str(tn.tid)] row.append(tn.nodetype) if tn.tid in labels_by_treenode: row.append(', '.join(map(str, labels_by_treenode[tn.tid]))) else: row.append('') row.append(str(tn.confidence)) row.append('%.2f' % tn.x) row.append('%.2f' % tn.y) row.append('%.2f' % tn.z) row.append(int((tn.z - translation.z) / resolution.z)) row.append(str(tn.radius)) row.append(tn.username) row.append(tn.last_modified) row.append(', '.join(treenode_to_reviews.get(tn.id, ["None"]))) return row result = { 'iTotalRecords': row_count, 'iTotalDisplayRecords': row_count } response_on_error = 'Could not format output.' result['aaData'] = map(formatTreenode, treenodes) return HttpResponse(json.dumps(result)) except Exception as e: raise Exception(response_on_error + ':' + str(e))
def _skeleton_for_3d_viewer(skeleton_id, project_id, with_connectors=True, lean=0, all_field=False): """ with_connectors: when False, connectors are not returned lean: when not zero, both connectors and tags are returned as empty arrays. """ skeleton_id = int(skeleton_id) # sanitize cursor = connection.cursor() # Fetch the neuron name cursor.execute( '''SELECT name FROM class_instance ci, class_instance_class_instance cici WHERE cici.class_instance_a = %s AND cici.class_instance_b = ci.id ''' % skeleton_id) row = cursor.fetchone() if not row: # Check that the skeleton exists cursor.execute('''SELECT id FROM class_instance WHERE id=%s''' % skeleton_id) if not cursor.fetchone(): raise Exception("Skeleton #%s doesn't exist!" % skeleton_id) else: raise Exception("No neuron found for skeleton #%s" % skeleton_id) name = row[0] if all_field: added_fields = ', creation_time, edition_time' else: added_fields = '' # Fetch all nodes, with their tags if any cursor.execute( '''SELECT id, parent_id, user_id, location_x, location_y, location_z, radius, confidence %s FROM treenode WHERE skeleton_id = %s ''' % (added_fields, skeleton_id) ) # array of properties: id, parent_id, user_id, x, y, z, radius, confidence nodes = tuple(cursor.fetchall()) tags = defaultdict(list) # node ID vs list of tags connectors = [] # Get all reviews for this skeleton if all_field: reviews = get_treenodes_to_reviews_with_time(skeleton_ids=[skeleton_id]) else: reviews = get_treenodes_to_reviews(skeleton_ids=[skeleton_id]) if 0 == lean: # meaning not lean # Text tags cursor.execute("SELECT id FROM relation WHERE project_id=%s AND relation_name='labeled_as'" % int(project_id)) labeled_as = cursor.fetchall()[0][0] cursor.execute( ''' SELECT treenode_class_instance.treenode_id, class_instance.name FROM treenode, class_instance, treenode_class_instance WHERE treenode.skeleton_id = %s AND treenode.id = treenode_class_instance.treenode_id AND treenode_class_instance.class_instance_id = class_instance.id AND treenode_class_instance.relation_id = %s ''' % (skeleton_id, labeled_as)) for row in cursor.fetchall(): tags[row[1]].append(row[0]) if with_connectors: if all_field: added_fields = ', c.creation_time' else: added_fields = '' # Fetch all connectors with their partner treenode IDs cursor.execute( ''' SELECT tc.treenode_id, tc.connector_id, r.relation_name, c.location_x, c.location_y, c.location_z %s FROM treenode_connector tc, connector c, relation r WHERE tc.skeleton_id = %s AND tc.connector_id = c.id AND tc.relation_id = r.id ''' % (added_fields, skeleton_id) ) # Above, purposefully ignoring connector tags. Would require a left outer join on the inner join of connector_class_instance and class_instance, and frankly connector tags are pointless in the 3d viewer. # List of (treenode_id, connector_id, relation_id, x, y, z)n with relation_id replaced by 0 (presynaptic) or 1 (postsynaptic) # 'presynaptic_to' has an 'r' at position 1: for row in cursor.fetchall(): x, y, z = imap(float, (row[3], row[4], row[5])) connectors.append((row[0], row[1], 0 if 'r' == row[2][1] else 1, x, y, z, row[6])) return name, nodes, tags, connectors, reviews return name, nodes, tags, connectors, reviews
def list_treenode_table(request, project_id=None): stack_id = request.POST.get('stack_id', None) specified_skeleton_count = request.POST.get('skeleton_nr', 0) display_start = request.POST.get('iDisplayStart', 0) display_length = request.POST.get('iDisplayLength', -1) should_sort = request.POST.get('iSortCol_0', None) filter_nodetype = request.POST.get('sSearch_1', None) filter_labels = request.POST.get('sSearch_2', None) relation_map = get_relation_to_id_map(project_id) response_on_error = '' try: def search_query_is_empty(): if specified_skeleton_count == 0: return True first_skeleton_id = request.POST.get('skeleton_0', None) if first_skeleton_id is None: return True elif upper(first_skeleton_id) in ['NONE', 'NULL']: return True return False if search_query_is_empty(): return HttpResponse(json.dumps({ 'iTotalRecords': 0, 'iTotalDisplayRecords': 0, 'aaData': []})) else: response_on_error = 'Could not fetch %s skeleton IDs.' % \ specified_skeleton_count skeleton_ids = [int(request.POST.get('skeleton_%s' % i, 0)) \ for i in range(int(specified_skeleton_count))] if should_sort: column_count = int(request.POST.get('iSortingCols', 0)) sorting_directions = [request.POST.get('sSortDir_%d' % d) \ for d in range(column_count)] sorting_directions = map(lambda d: \ '-' if upper(d) == 'DESC' else '', sorting_directions) fields = ['tid', 'type', '"treenode"."labels"', 'confidence', 'x', 'y', 'z', '"treenode"."section"', 'radius', 'username', 'last_modified'] # TODO type field not supported. sorting_index = [int(request.POST.get('iSortCol_%d' % d)) \ for d in range(column_count)] sorting_cols = map(lambda i: fields[i], sorting_index) response_on_error = 'Could not get the list of treenodes.' t = Treenode.objects \ .filter( project=project_id, skeleton_id__in=skeleton_ids) \ .extra( tables=['auth_user'], where=[ '"treenode"."user_id" = "auth_user"."id"'], select={ 'tid': '"treenode"."id"', 'radius': '"treenode"."radius"', 'confidence': '"treenode"."confidence"', 'parent_id': '"treenode"."parent_id"', 'user_id': '"treenode"."user_id"', 'edition_time': '"treenode"."edition_time"', 'x': '"treenode"."location_x"', 'y': '"treenode"."location_y"', 'z': '"treenode"."location_z"', 'username': '******', 'last_modified': 'to_char("treenode"."edition_time", \'DD-MM-YYYY HH24:MI\')' }) \ .distinct() # Rationale for using .extra(): # Since we don't use .order_by() for ordering, extra fields are not # included in the SELECT statement, and so .distinct() will work as # intended. See http://tinyurl.com/dj-distinct if should_sort: t = t.extra(order_by=[di + col \ for (di, col) in zip(sorting_directions, sorting_cols)]) if int(display_length) == -1: treenodes = list(t[display_start:]) else: treenodes = list(t[display_start:display_start + display_length]) # The number of results to be displayed should include items that are # filtered out. row_count = len(treenodes) # Filter out irrelevant treenodes if a label has been specified if 'labeled_as' in relation_map: response_on_error = 'Could not retrieve labels for project.' project_lables = TreenodeClassInstance.objects.filter( project=project_id, relation=relation_map['labeled_as']).values( 'treenode', 'class_instance__name') labels_by_treenode = {} # Key: Treenode ID, Value: List of labels. for label in project_lables: if label['treenode'] not in labels_by_treenode: labels_by_treenode[label['treenode']] = [label['class_instance__name']] else: labels_by_treenode[label['treenode']].append(label['class_instance__name']) if filter_labels: def label_filter(treenode): if treenode.id not in labels_by_treenode: return False return upper(filter_labels) in upper(' '.join(labels_by_treenode[treenode.tid])) treenodes = filter(label_filter, treenodes) # Filter out irrelevant treenodes if a node type has been specified. # Count treenode's children to derive treenode types. The number of # children a treenode has determines its type. Types: # R : root (parent = null) # S : slab (has one child) # B : branch (has more than one child) # L : leaf (has no children) # X : undefined (uh oh!) if 0 == display_start and -1 == display_length: # All nodes are loaded: determine child_count from loaded nodes child_count = {} for treenode in treenodes: if treenode.parent is None: continue n_children = child_count.get(treenode.parent_id, 0) child_count[treenode.parent_id] = n_children + 1 else: # Query for parents response_on_error = 'Could not retrieve treenode parents.' child_count_query = Treenode.objects.filter( project=project_id, skeleton_id__in=skeleton_ids).annotate( child_count=Count('children')) child_count = {} for treenode in child_count_query: child_count[treenode.id] = treenode.child_count # Determine type for treenode in treenodes: if None == treenode.parent_id: treenode.nodetype = 'R' # Root continue children = child_count.get(treenode.tid, 0) if children == 1: treenode.nodetype = 'S' # Slab elif children == 0: treenode.nodetype = 'L' # Leaf elif children > 1: treenode.nodetype = 'B' # Branch else: treenode.nodetype = 'X' # Unknown, can never happen # Now that we've assigned node types, filter based on them: if filter_nodetype: filter_nodetype = upper(filter_nodetype) treenodes = [t for t in treenodes if t.nodetype in filter_nodetype] users = dict(User.objects.all().values_list('id', 'username')) users[-1] = "None" # Rather than AnonymousUser # Get all reviews for the current treenode set treenode_ids = [t.id for t in treenodes] treenode_to_reviews = get_treenodes_to_reviews(treenode_ids, umap=lambda r: users[r]) if stack_id: response_on_error = 'Could not retrieve resolution and translation ' \ 'parameters for project.' resolution = get_object_or_404(Stack, id=int(stack_id)).resolution translation = get_object_or_404(ProjectStack, stack=int(stack_id), project=project_id).translation else: resolution = Double3D(1.0, 1.0, 1.0) translation = Double3D(0.0, 0.0, 0.0) def formatTreenode(tn): row = [str(tn.tid)] row.append(tn.nodetype) if tn.tid in labels_by_treenode: row.append(', '.join(map(str, labels_by_treenode[tn.tid]))) else: row.append('') row.append(str(tn.confidence)) row.append('%.2f' % tn.x) row.append('%.2f' % tn.y) row.append('%.2f' % tn.z) row.append(int((tn.z - translation.z) / resolution.z)) row.append(str(tn.radius)) row.append(tn.username) row.append(tn.last_modified) row.append(', '.join(treenode_to_reviews.get(tn.id, ["None"]))) return row result = {'iTotalRecords': row_count, 'iTotalDisplayRecords': row_count} response_on_error = 'Could not format output.' result['aaData'] = map(formatTreenode, treenodes) return HttpResponse(json.dumps(result)) except Exception as e: raise Exception(response_on_error + ':' + str(e))
def _export_review_skeleton(project_id=None, skeleton_id=None, format=None, subarbor_node_id=None): """ Returns a list of segments for the requested skeleton. Each segment contains information about the review status of this part of the skeleton. If a valid subarbor_node_id is given, only data for the sub-arbor is returned that starts at this node. """ # Get all treenodes of the requested skeleton treenodes = Treenode.objects.filter(skeleton_id=skeleton_id).values_list( 'id', 'parent_id', 'location_x', 'location_y', 'location_z') # Get all reviews for the requested skeleton reviews = get_treenodes_to_reviews(skeleton_ids=[skeleton_id]) # Add each treenode to a networkx graph and attach reviewer information to # it. g = nx.DiGraph() reviewed = set() for t in treenodes: # While at it, send the reviewer IDs, which is useful to iterate fwd # to the first unreviewed node in the segment. g.add_node(t[0], {'id': t[0], 'x': t[2], 'y': t[3], 'z': t[4], 'rids': reviews[t[0]]}) if reviews[t[0]]: reviewed.add(t[0]) if t[1]: # if parent g.add_edge(t[1], t[0]) # edge from parent to child else: root_id = t[0] if subarbor_node_id and subarbor_node_id != root_id: # Make sure the subarbor node ID (if any) is part of this skeleton if subarbor_node_id not in g: raise ValueError("Supplied subarbor node ID (%s) is not part of " "provided skeleton (%s)" % (subarbor_node_id, skeleton_id)) # Remove connection to parent parent = g.predecessors(subarbor_node_id)[0] g.remove_edge(parent, subarbor_node_id) # Remove all nodes that are upstream from the subarbor node to_delete = set() to_lookat = [root_id] while to_lookat: n = to_lookat.pop() to_lookat.extend(g.successors(n)) to_delete.add(n) g.remove_nodes_from(to_delete) # Replace root id with sub-arbor ID root_id=subarbor_node_id # Create all sequences, as long as possible and always from end towards root distances = edge_count_to_root(g, root_node=root_id) # distance in number of edges from root seen = set() sequences = [] # Iterate end nodes sorted from highest to lowest distance to root endNodeIDs = (nID for nID in g.nodes() if 0 == len(g.successors(nID))) for nodeID in sorted(endNodeIDs, key=distances.get, reverse=True): sequence = [g.node[nodeID]] parents = g.predecessors(nodeID) while parents: parentID = parents[0] sequence.append(g.node[parentID]) if parentID in seen: break seen.add(parentID) parents = g.predecessors(parentID) if len(sequence) > 1: sequences.append(sequence) # Calculate status segments = [] for sequence in sorted(sequences, key=len, reverse=True): segments.append({ 'id': len(segments), 'sequence': sequence, 'status': '%.2f' % (100.0 * sum(1 for node in sequence if node['id'] in reviewed) / len(sequence)), 'nr_nodes': len(sequence) }) return segments