Esempio n. 1
0
def basic_graph(project_id, skeleton_ids):
    def newSynapseCounts():
        return [0, 0, 0, 0, 0]

    if not skeleton_ids:
        raise ValueError("No skeleton IDs provided")

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
    preID, postID = relations['presynaptic_to'], relations['postsynaptic_to']

    cursor.execute('''
    SELECT t1.skeleton_id, t2.skeleton_id, LEAST(t1.confidence, t2.confidence)
    FROM treenode_connector t1,
         treenode_connector t2
    WHERE t1.skeleton_id IN (%s)
      AND t1.relation_id = %s
      AND t1.connector_id = t2.connector_id
      AND t2.relation_id = %s
    ''' % (','.join(map(str, skeleton_ids)), preID, postID))

    edges = defaultdict(partial(defaultdict, newSynapseCounts))
    for row in cursor.fetchall():
        edges[row[0]][row[1]][row[2] - 1] += 1

    return {'edges': tuple((pre, post, count) for pre, edge in edges.iteritems() for post, count in edge.iteritems())}

    '''
    return {'edges': [{'source': pre,
                       'target': post,
                       'weight': count} for pre, edge in edges.iteritems() for post, count in edge.iteritems()]}
    '''

    """ Can't get the variable to be set with all the skeleton IDs
Esempio n. 2
0
def skeleton_connectors_by_partner(request, project_id):
    """ Return a dict of requested skeleton vs relation vs partner skeleton vs list of connectors.
    Connectors lacking a skeleton partner will of course not be included. """
    skeleton_ids = set(
        int(v) for k, v in request.POST.iteritems() if k.startswith('skids['))
    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id,
                                       ('presynaptic_to', 'postsynaptic_to'),
                                       cursor)
    pre = relations['presynaptic_to']
    post = relations['postsynaptic_to']

    cursor.execute('''
    SELECT tc1.skeleton_id, tc1.relation_id,
           tc2.skeleton_id, tc1.connector_id
    FROM treenode_connector tc1,
         treenode_connector tc2
    WHERE tc1.skeleton_id IN (%s)
      AND tc1.connector_id = tc2.connector_id
      AND tc1.skeleton_id != tc2.skeleton_id
      AND tc1.relation_id != tc2.relation_id
      AND (tc1.relation_id = %s OR tc1.relation_id = %s)
      AND (tc2.relation_id = %s OR tc2.relation_id = %s)
    ''' % (','.join(map(str, skeleton_ids)), pre, post, pre, post))

    # Dict of skeleton vs relation vs skeleton vs list of connectors
    partners = defaultdict(partial(defaultdict, partial(defaultdict, list)))

    for row in cursor.fetchall():
        partners[row[0]][relations[row[1]]][row[2]].append(row[3])

    return HttpResponse(json.dumps(partners))
Esempio n. 3
0
def _connector_associated_edgetimes(connector_ids, project_id):
    """ Return a dictionary of connector ID as keys and a dictionary as value
    containing two entries: 'presynaptic_to' with a skeleton ID of None,
    and 'postsynaptic_to' with a list of skeleton IDs (maybe empty) including
    the timestamp of the edge. """
    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
    PRE = relations['presynaptic_to']
    POST = relations['postsynaptic_to']

    cursor.execute('''
    SELECT connector_id, relation_id, skeleton_id, treenode_id, creation_time
    FROM treenode_connector
    WHERE connector_id IN (%s)
      AND (relation_id = %s OR relation_id = %s)
    ''' % (",".join(map(str, connector_ids), PRE, POST)))

    cs = {}
    for row in cursor.fetchall():
        c = cs.get(row[0])
        if not c:
            # Ensure each connector has the two entries at their minimum
            c = {'presynaptic_to': None, 'postsynaptic_to': []}
            cs[row[0]] = c
        if POST == row[1]:
            c['postsynaptic_to'].append( (row[2], row[3], row[4]) )
        elif PRE == row[1]:
            c['presynaptic_to'] = (row[2], row[3], row[4])

    return cs
Esempio n. 4
0
    def add_test_connector_links(self):
        c_1 = Connector.objects.get(pk=2463)
        c_1.id = None
        c_1.save()
        c_2 = Connector.objects.get(pk=2466)
        c_2.id = None
        c_2.save()

        cursor = connection.cursor()
        relations = get_relation_to_id_map(self.test_project_id, cursor=cursor)
        pre_id, post_id = relations['presynaptic_to'], relations['postsynaptic_to']

        connector_links = [
            # Treenode ID, connector ID, relation_id, user ID, creation date
            [ 7,  pre_id, c_1.id, 1, "2017-06-01T07:54:16.301Z"],
            [15, post_id, c_1.id, 1, "2017-06-30T22:23:24.117Z"],
            [11,  pre_id, c_2.id, 1, "2017-07-02T08:54:16.301Z"],
            [13, post_id, c_2.id, 1, "2017-08-02T08:55:16.301Z"],
            [ 7, post_id, c_1.id, 3, "2017-07-01T07:54:16.301Z"],
            [11, post_id, c_1.id, 3, "2017-07-01T07:55:13.844Z"],
            [13,  pre_id, c_2.id, 3, "2017-07-01T22:55:16.301Z"],
            [15, post_id, c_2.id, 3, "2017-07-01T02:50:10.204Z"],
        ]
        link_data = list(chain.from_iterable(connector_links))
        link_template = ','.join('(%s,%s,%s,%s,%s)' for _ in connector_links)
        cursor.execute(f"""
            INSERT INTO treenode_connector (project_id, treenode_id, skeleton_id,
                relation_id, connector_id, user_id, creation_time)
            SELECT %s, link.treenode_id, t.skeleton_id, link.relation_id,
                link.connector_id, link.user_id, link.creation_time::timestamptz
            FROM treenode t
            JOIN (VALUES {link_template}) link(treenode_id, relation_id, connector_id,
                user_id, creation_time)
            ON t.id = link.treenode_id
        """, [self.test_project_id] + link_data)
Esempio n. 5
0
def connector_user_info(request, project_id):
    """ Return information on a treenode connector edge.

    This function is called often (every connector mouseover) and should
    therefore be as fast as possible. Analogous to user_info for treenodes and
    connectors.
    """
    treenode_id = int(request.GET.get('treenode_id'))
    connector_id = int(request.GET.get('connector_id'))
    cursor = connection.cursor()
    relation_names = ('presynaptic_to', 'postsynaptic_to', 'abutting')
    relations = get_relation_to_id_map(project_id, relation_names, cursor)
    relation_id = relations[request.GET.get('relation_name')]
    cursor.execute('''
        SELECT tc.id, tc.user_id, tc.creation_time, tc.edition_time
        FROM treenode_connector tc
        WHERE tc.treenode_id = %s
          AND tc.connector_id = %s
          AND tc.relation_id = %s
                   ''', (treenode_id, connector_id, relation_id))

    # We expect at least one result node.
    if not cursor.rowcount:
        return HttpResponse(json.dumps({
            'error': 'No treenode connector exists for treenode %s, connector %s, relation %s' %
            (treenode_id, connector_id, relation_id)}))

    # Build result. Because there is no uniqueness restriction on treenode
    # connector edges, even with the same relation, the response must handle
    # multiple rows.
    return HttpResponse(json.dumps([{
        'user': info[1],
        'creation_time': str(info[2].isoformat()),
        'edition_time': str(info[3].isoformat()),
    } for info in cursor.fetchall()]))
Esempio n. 6
0
def skeleton_connectors_by_partner(request, project_id):
    """ Return a dict of requested skeleton vs relation vs partner skeleton vs list of connectors.
    Connectors lacking a skeleton partner will of course not be included. """
    skeleton_ids = set(int(v) for k,v in request.POST.iteritems() if k.startswith('skids['))
    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
    pre = relations['presynaptic_to']
    post = relations['postsynaptic_to']

    cursor.execute('''
    SELECT tc1.skeleton_id, tc1.relation_id,
           tc2.skeleton_id, tc1.connector_id
    FROM treenode_connector tc1,
         treenode_connector tc2
    WHERE tc1.skeleton_id IN (%s)
      AND tc1.connector_id = tc2.connector_id
      AND tc1.skeleton_id != tc2.skeleton_id
      AND tc1.relation_id != tc2.relation_id
      AND (tc1.relation_id = %s OR tc1.relation_id = %s)
      AND (tc2.relation_id = %s OR tc2.relation_id = %s)
    ''' % (','.join(map(str, skeleton_ids)), pre, post, pre, post))

    # Dict of skeleton vs relation vs skeleton vs list of connectors
    partners = defaultdict(partial(defaultdict, partial(defaultdict, list)))

    for row in cursor.fetchall():
        partners[row[0]][relations[row[1]]][row[2]].append(row[3])

    return HttpResponse(json.dumps(partners))
Esempio n. 7
0
def _connector_associated_edgetimes(connector_ids, project_id):
    """ Return a dictionary of connector ID as keys and a dictionary as value
    containing two entries: 'presynaptic_to' with a skeleton ID of None,
    and 'postsynaptic_to' with a list of skeleton IDs (maybe empty) including
    the timestamp of the edge. """
    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id,
                                       ('presynaptic_to', 'postsynaptic_to'),
                                       cursor)
    PRE = relations['presynaptic_to']
    POST = relations['postsynaptic_to']

    cursor.execute('''
    SELECT connector_id, relation_id, skeleton_id, treenode_id, creation_time
    FROM treenode_connector
    WHERE connector_id IN (%s)
      AND (relation_id = %s OR relation_id = %s)
    ''' % (",".join(map(str, connector_ids), PRE, POST)))

    cs = {}
    for row in cursor.fetchall():
        c = cs.get(row[0])
        if not c:
            # Ensure each connector has the two entries at their minimum
            c = {'presynaptic_to': None, 'postsynaptic_to': []}
            cs[row[0]] = c
        if POST == row[1]:
            c['postsynaptic_to'].append((row[2], row[3], row[4]))
        elif PRE == row[1]:
            c['presynaptic_to'] = (row[2], row[3], row[4])

    return cs
Esempio n. 8
0
    def add_test_connector_links(self):
        c_1 = Connector.objects.get(pk=2463)
        c_1.id = None
        c_1.save()
        c_2 = Connector.objects.get(pk=2466)
        c_2.id = None
        c_2.save()

        cursor = connection.cursor()
        relations = get_relation_to_id_map(self.test_project_id, cursor=cursor)
        pre_id, post_id = relations['presynaptic_to'], relations['postsynaptic_to']

        connector_links = [
            # Treenode ID, connector ID, relation_id, user ID, creation date
            [ 7,  pre_id, c_1.id, 1, "2017-06-01T07:54:16.301Z"],
            [15, post_id, c_1.id, 1, "2017-06-30T22:23:24.117Z"],
            [11,  pre_id, c_2.id, 1, "2017-07-02T08:54:16.301Z"],
            [13, post_id, c_2.id, 1, "2017-08-02T08:55:16.301Z"],
            [ 7, post_id, c_1.id, 3, "2017-07-01T07:54:16.301Z"],
            [11, post_id, c_1.id, 3, "2017-07-01T07:55:13.844Z"],
            [13,  pre_id, c_2.id, 3, "2017-07-01T22:55:16.301Z"],
            [15, post_id, c_2.id, 3, "2017-07-01T02:50:10.204Z"],
        ]
        link_data = list(chain.from_iterable(connector_links))
        link_template = ','.join('(%s,%s,%s,%s,%s)' for _ in connector_links)
        cursor.execute("""
            INSERT INTO treenode_connector (project_id, treenode_id, skeleton_id,
                relation_id, connector_id, user_id, creation_time)
            SELECT %s, link.treenode_id, t.skeleton_id, link.relation_id,
                link.connector_id, link.user_id, link.creation_time::timestamptz
            FROM treenode t
            JOIN (VALUES {}) link(treenode_id, relation_id, connector_id,
                user_id, creation_time)
            ON t.id = link.treenode_id
        """.format(link_template), [self.test_project_id] + link_data)
Esempio n. 9
0
    def test_delete_non_root_treenode(self):
        self.fake_authentication()
        treenode_id = 265

        relation_map = get_relation_to_id_map(self.test_project_id)
        get_skeleton = lambda: TreenodeClassInstance.objects.filter(
            project=self.test_project_id,
            relation=relation_map['element_of'],
            treenode=treenode_id)
        self.assertEqual(1, get_skeleton().count())

        children = Treenode.objects.filter(parent=treenode_id)
        self.assertTrue(children.count() > 0)
        tn_count = Treenode.objects.all().count()
        parent = get_object_or_404(Treenode, id=treenode_id).parent

        response = self.client.post(
            '/%d/treenode/delete' % self.test_project_id, {
                'treenode_id': treenode_id,
                'state': make_nocheck_state()
            })
        self.assertEqual(response.status_code, 200)
        parsed_response = json.loads(response.content.decode('utf-8'))
        expected_result = 'Removed treenode successfully.'
        self.assertEqual(expected_result, parsed_response['success'])
        self.assertEqual(0, Treenode.objects.filter(id=treenode_id).count())
        self.assertEqual(0, get_skeleton().count())
        self.assertEqual(tn_count - 1, Treenode.objects.all().count())

        for child in children:
            child_after_change = get_object_or_404(Treenode, id=child.id)
            self.assertEqual(parent, child_after_change.parent)
Esempio n. 10
0
    def test_delete_non_root_treenode(self):
        self.fake_authentication()
        treenode_id = 265

        relation_map = get_relation_to_id_map(self.test_project_id)
        get_skeleton = lambda: TreenodeClassInstance.objects.filter(
                project=self.test_project_id,
                relation=relation_map['element_of'],
                treenode=treenode_id)
        self.assertEqual(1, get_skeleton().count())

        children = Treenode.objects.filter(parent=treenode_id)
        self.assertTrue(children.count() > 0)
        tn_count = Treenode.objects.all().count()
        parent = get_object_or_404(Treenode, id=treenode_id).parent

        response = self.client.post(
                '/%d/treenode/delete' % self.test_project_id,
                {'treenode_id': treenode_id, 'state': make_nocheck_state()})
        self.assertEqual(response.status_code, 200)
        parsed_response = json.loads(response.content.decode('utf-8'))
        expected_result = 'Removed treenode successfully.'
        self.assertEqual(expected_result, parsed_response['success'])
        self.assertEqual(0, Treenode.objects.filter(id=treenode_id).count())
        self.assertEqual(0, get_skeleton().count())
        self.assertEqual(tn_count - 1, Treenode.objects.all().count())

        for child in children:
            child_after_change = get_object_or_404(Treenode, id=child.id)
            self.assertEqual(parent, child_after_change.parent)
Esempio n. 11
0
def connectors_info(request, project_id):
    """
    Given a list of connectors, a list of presynaptic skeletons and a list of postsynatic skeletons,
    return a list of rows, one per synaptic connection, in the same format as one_to_many_synapses.
    The list of connectors is optional.
    """

    cids = tuple(
        str(int(v)) for k, v in request.POST.iteritems()
        if k.startswith('cids['))
    skids_pre = tuple(
        str(int(v)) for k, v in request.POST.iteritems()
        if k.startswith('pre['))
    skids_post = tuple(
        str(int(v)) for k, v in request.POST.iteritems()
        if k.startswith('post['))

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id,
                                       ('presynaptic_to', 'postsynaptic_to'),
                                       cursor)
    pre = relations['presynaptic_to']
    post = relations['postsynaptic_to']

    cursor.execute('''
    SELECT DISTINCT
           tc1.connector_id, c.location_x, c.location_y, c.location_z,
           tc1.treenode_id, tc1.skeleton_id, tc1.confidence, tc1.user_id,
           t1.location_x, t1.location_y, t1.location_z,
           tc2.treenode_id, tc2.skeleton_id, tc2.confidence, tc2.user_id,
           t2.location_x, t2.location_y, t2.location_z
    FROM treenode_connector tc1,
         treenode_connector tc2,
         treenode t1,
         treenode t2,
         connector c
    WHERE %s
          tc1.connector_id = c.id
      AND tc1.connector_id = tc2.connector_id
      AND tc1.skeleton_id IN (%s)
      AND tc2.skeleton_id IN (%s)
      AND tc1.relation_id = %s
      AND tc2.relation_id = %s
      AND tc1.id != tc2.id
      AND tc1.treenode_id = t1.id
      AND tc2.treenode_id = t2.id
    ORDER BY tc2.skeleton_id
    ''' % ("c.id IN (%s) AND" % ",".join(cids) if cids else "",
           ",".join(skids_pre), ",".join(skids_post), pre, post))

    rows = tuple((row[0], (row[1], row[2], row[3]), row[4], row[5], row[6],
                  row[7], (row[8], row[9], row[10]), row[11], row[12], row[13],
                  row[14], (row[15], row[16], row[17]))
                 for row in cursor.fetchall())

    return HttpResponse(json.dumps(rows))
Esempio n. 12
0
def basic_graph(project_id,
                skeleton_ids,
                relations=None,
                source_link: str = "presynaptic_to",
                target_link: str = "postsynaptic_to",
                allowed_connector_ids=None) -> Dict[str, Tuple]:

    if not skeleton_ids:
        raise ValueError("No skeleton IDs provided")

    cursor = connection.cursor()

    if not relations:
        relations = get_relation_to_id_map(project_id,
                                           (source_link, target_link), cursor)
    source_rel_id, target_rel_id = relations[source_link], relations[
        target_link]

    undirected_links = source_link in UNDIRECTED_LINK_TYPES and \
            target_link in UNDIRECTED_LINK_TYPES

    # Find all links in the passed in set of skeletons. If a relation is
    # reciprocal, we need to avoid getting two result rows back for each
    # treenode-connector-treenode connection. To keep things simple, we will add
    # a "skeleton ID 1" < "skeleton ID 2" test for reciprocal links.
    cursor.execute(
        f"""
        SELECT t1.skeleton_id, t2.skeleton_id, LEAST(t1.confidence, t2.confidence)
        FROM treenode_connector t1,
             treenode_connector t2
        WHERE t1.skeleton_id = ANY(%(skeleton_ids)s::bigint[])
          AND t1.relation_id = %(source_rel)s
          AND t1.connector_id = t2.connector_id
          AND t2.skeleton_id = ANY(%(skeleton_ids)s::bigint[])
          AND t2.relation_id = %(target_rel)s
          AND t1.id <> t2.id
          {'AND t1.skeleton_id < t2.skeleton_id' if undirected_links else ''}
          {'AND t1.connector_id = ANY(%(allowed_c_ids)s::bigint[])' if allowed_connector_ids else ''}
    """, {
            'skeleton_ids': list(skeleton_ids),
            'source_rel': source_rel_id,
            'target_rel': target_rel_id,
            'allowed_c_ids': allowed_connector_ids,
        })

    edges: DefaultDict = defaultdict(
        partial(defaultdict, make_new_synapse_count_array))
    for row in cursor.fetchall():
        edges[row[0]][row[1]][row[2] - 1] += 1

    return {
        'edges':
        tuple((s, t, count) for s, edge in edges.items()
              for t, count in edge.items())
    }
Esempio n. 13
0
    def test_create_treenode2(self):
        self.fake_authentication()
        relation_map = get_relation_to_id_map(self.test_project_id)
        class_map = get_class_to_id_map(self.test_project_id)
        count_treenodes = lambda: Treenode.objects.all().count()
        count_skeletons = lambda: ClassInstance.objects.filter(
            project=self.test_project_id, class_column=class_map['skeleton'
                                                                 ]).count()
        count_neurons = lambda: ClassInstance.objects.filter(
            project=self.test_project_id, class_column=class_map['neuron'
                                                                 ]).count()
        treenode_count = count_treenodes()
        skeleton_count = count_skeletons()
        neuron_count = count_neurons()

        response = self.client.post(
            '/%d/treenode/create' % self.test_project_id, {
                'x': 5,
                'y': 10,
                'z': 15,
                'confidence': 5,
                'parent_id': -1,
                'radius': 2
            })
        self.assertEqual(response.status_code, 200)
        parsed_response = json.loads(response.content.decode('utf-8'))

        self.assertTrue('treenode_id' in parsed_response)
        self.assertTrue('skeleton_id' in parsed_response)

        self.assertEqual(treenode_count + 1, count_treenodes())
        self.assertEqual(skeleton_count + 1, count_skeletons())
        self.assertEqual(neuron_count + 1, count_neurons())

        treenode_skeleton_relation = TreenodeClassInstance.objects.filter(
            project=self.test_project_id,
            relation=relation_map['element_of'],
            treenode=parsed_response['treenode_id'],
            class_instance=parsed_response['skeleton_id'])
        neuron_skeleton_relation = ClassInstanceClassInstance.objects.filter(
            project=self.test_project_id,
            relation=relation_map['model_of'],
            class_instance_a=parsed_response['skeleton_id'])
        # FIXME: Log test doesn't work like this, because we don't have the
        # neuron ID available
        #neuron_log = Log.objects.filter(
        #        project=self.test_project_id,
        #        operation_type='create_neuron',
        #        freetext='Create neuron %s and skeleton %s' % (parsed_response['neuron_id'], parsed_response['skeleton_id']))

        root = ClassInstance.objects.filter(project=self.test_project_id,
                                            class_column=class_map['root'])[0]

        self.assertEqual(1, neuron_skeleton_relation.count())
Esempio n. 14
0
def connectors_info(request, project_id):
    """
    Given a list of connectors, a list of presynaptic skeletons and a list of postsynatic skeletons,
    return a list of rows, one per synaptic connection, in the same format as one_to_many_synapses.
    The list of connectors is optional.
    """

    int_to_str = lambda x: str(int(x))
    cids = get_request_list(request.POST, 'cids', map_fn=int_to_str)
    skids_pre = get_request_list(request.POST, 'pre', map_fn=int_to_str)
    skids_post = get_request_list(request.POST, 'post', map_fn=int_to_str)

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
    pre = relations['presynaptic_to']
    post = relations['postsynaptic_to']

    cursor.execute('''
    SELECT DISTINCT
           tc1.connector_id, c.location_x, c.location_y, c.location_z,
           tc1.treenode_id, tc1.skeleton_id, tc1.confidence, tc1.user_id,
           t1.location_x, t1.location_y, t1.location_z,
           tc2.treenode_id, tc2.skeleton_id, tc2.confidence, tc2.user_id,
           t2.location_x, t2.location_y, t2.location_z
    FROM treenode_connector tc1,
         treenode_connector tc2,
         treenode t1,
         treenode t2,
         connector c
    WHERE %s
          tc1.connector_id = c.id
      AND tc1.connector_id = tc2.connector_id
      AND tc1.skeleton_id IN (%s)
      AND tc2.skeleton_id IN (%s)
      AND tc1.relation_id = %s
      AND tc2.relation_id = %s
      AND tc1.id != tc2.id
      AND tc1.treenode_id = t1.id
      AND tc2.treenode_id = t2.id
    ORDER BY tc2.skeleton_id
    ''' % ("c.id IN (%s) AND" % ",".join(cids) if cids else "",
           ",".join(skids_pre),
           ",".join(skids_post),
           pre,
           post))

    rows = tuple((row[0], (row[1], row[2], row[3]),
                  row[4], row[5], row[6], row[7],
                  (row[8], row[9], row[10]),
                  row[11], row[12], row[13], row[14],
                  (row[15], row[16], row[17])) for row in cursor.fetchall())

    return HttpResponse(json.dumps(rows))
Esempio n. 15
0
def tree_object_expand(request, project_id=None):
    class_instance_id = request.POST.get('class_instance_id', None)
    if class_instance_id is None:
        raise Exception('A skeleton id has not been provided!')
    else:
        class_instance_id = int(class_instance_id) # sanitize by casting to int

    relation_map = get_relation_to_id_map(project_id)

    # Treenode is element_of class_instance (skeleton), which is model_of (neuron)
    # which is part_of class_instance (?), recursively, until reaching class_instance
    # ('root').

    response_on_error = ''
    try:
        # 1. Retrieve neuron id of the skeleton
        response_on_error = 'Cannot find neuron for the skeleton with id: %s' % class_instance_id
        neuron_id = ClassInstanceClassInstance.objects.filter(
            project=project_id,
            relation=relation_map['model_of'],
            class_instance_a=class_instance_id)[0].class_instance_b_id

        path = [class_instance_id, neuron_id]

        while True:
            # 2. Retrieve all the nodes of which the neuron is a part of.
            response_on_error = 'Cannot find parent instance for instance with id: %s' % path[-1]
            parent = ClassInstanceClassInstance.objects.filter(
                project=project_id,
                class_instance_a=path[-1],
                relation=relation_map['part_of']).values(
                'class_instance_b',
                'class_instance_b__class_column__class_name',
                'class_instance_b__name')[0]

            path.append(parent['class_instance_b'])

            # The 'Isolated synaptic terminals' is a special group:
            # 1. Its contained elements are never listed by default.
            # 2. If a treenode is selected that belongs to it, the neuron of the skeleton of that node
            #    is listed alone.
            # Here, interrupt the chain at the group level
            if 'Isolated synaptic terminals' == parent['class_instance_b__name']:
                break

            if 'root' == parent['class_instance_b__class_column__class_name']:
                break

        path.reverse()
        return HttpResponse(json.dumps(path))

    except Exception as e:
        raise Exception(response_on_error + ':' + str(e))
Esempio n. 16
0
    def test_create_treenode2(self):
        self.fake_authentication()
        relation_map = get_relation_to_id_map(self.test_project_id)
        class_map = get_class_to_id_map(self.test_project_id)
        count_treenodes = lambda: Treenode.objects.all().count()
        count_skeletons = lambda: ClassInstance.objects.filter(
                project=self.test_project_id,
                class_column=class_map['skeleton']).count()
        count_neurons = lambda: ClassInstance.objects.filter(
                project=self.test_project_id,
                class_column=class_map['neuron']).count()
        treenode_count = count_treenodes()
        skeleton_count = count_skeletons()
        neuron_count = count_neurons()

        response = self.client.post('/%d/treenode/create' % self.test_project_id, {
            'x': 5,
            'y': 10,
            'z': 15,
            'confidence': 5,
            'parent_id': -1,
            'radius': 2})
        self.assertEqual(response.status_code, 200)
        parsed_response = json.loads(response.content.decode('utf-8'))

        self.assertTrue('treenode_id' in parsed_response)
        self.assertTrue('skeleton_id' in parsed_response)

        self.assertEqual(treenode_count + 1, count_treenodes())
        self.assertEqual(skeleton_count + 1, count_skeletons())
        self.assertEqual(neuron_count + 1, count_neurons())

        treenode_skeleton_relation = TreenodeClassInstance.objects.filter(
                project=self.test_project_id,
                relation=relation_map['element_of'],
                treenode=parsed_response['treenode_id'],
                class_instance=parsed_response['skeleton_id'])
        neuron_skeleton_relation = ClassInstanceClassInstance.objects.filter(
                project=self.test_project_id,
                relation=relation_map['model_of'],
                class_instance_a=parsed_response['skeleton_id'])
        # FIXME: Log test doesn't work like this, because we don't have the
        # neuron ID available
        #neuron_log = Log.objects.filter(
        #        project=self.test_project_id,
        #        operation_type='create_neuron',
        #        freetext='Create neuron %s and skeleton %s' % (parsed_response['neuron_id'], parsed_response['skeleton_id']))

        root = ClassInstance.objects.filter(
                project=self.test_project_id,
                class_column=class_map['root'])[0]

        self.assertEqual(1, neuron_skeleton_relation.count())
Esempio n. 17
0
def _list_completed(project_id,
                    completed_by=None,
                    from_date=None,
                    to_date=None):
    """ Get a list of connector links that can be optionally constrained to be
    completed by a certain user in a given time frame. The returned connector
    links are by default only constrained by both sides having different
    relations and the first link was created before the second one.
    """
    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id,
                                       ('presynaptic_to', 'postsynaptic_to'),
                                       cursor)
    pre = relations['presynaptic_to']
    post = relations['postsynaptic_to']

    params = [project_id, pre, post, pre, post]
    query = '''
        SELECT tc2.connector_id, c.location_x, c.location_y, c.location_z,
            tc2.treenode_id, tc2.skeleton_id, tc2.confidence, tc2.user_id,
            t2.location_x, t2.location_y, t2.location_z,
            tc1.treenode_id, tc1.skeleton_id, tc1.confidence, tc1.user_id,
            t1.location_x, t1.location_y, t1.location_z
        FROM treenode_connector tc1
        JOIN treenode_connector tc2 ON tc1.connector_id = tc2.connector_id
        JOIN connector c ON tc1.connector_id = c.id
        JOIN treenode t1 ON t1.id = tc1.treenode_id
        JOIN treenode t2 ON t2.id = tc2.treenode_id
        WHERE t1.project_id=%s
        AND tc1.relation_id <> tc2.relation_id
        AND tc1.creation_time > tc2.creation_time
        AND (tc1.relation_id = %s OR tc1.relation_id = %s)
        AND (tc2.relation_id = %s OR tc2.relation_id = %s)'''

    if completed_by:
        params.append(completed_by)
        query += " AND tc1.user_id=%s"
    if from_date:
        params.append(from_date.isoformat())
        query += " AND tc1.creation_time >= %s"
    if to_date:
        to_date = to_date + timedelta(days=1)
        params.append(to_date.isoformat())
        query += " AND tc1.creation_time < %s"

    cursor.execute(query, params)

    return tuple((row[0], (row[1], row[2], row[3]), row[4], row[5], row[6],
                  row[7], (row[8], row[9], row[10]), row[11], row[12], row[13],
                  row[14], (row[15], row[16], row[17]))
                 for row in cursor.fetchall())
Esempio n. 18
0
def populate_connector_stats_summary(project_id,
                                     incremental=True,
                                     cursor=None):
    """Add connector summary information to the summary table. Create hourly
    aggregates in UTC time. These aggregates can still be moved in other
    timezones with good enough precision for our purpose. By default, this
    happens in an incremental manner, but can optionally be fone for all data
    from scratch (overriding existing statistics).
    """
    if not cursor:
        cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, cursor=cursor)
    pre_id, post_id = relations.get('presynaptic_to'), relations.get(
        'postsynaptic_to')
    if pre_id and post_id:
        cursor.execute(
            """
            WITH last_precomputation AS (
                SELECT CASE WHEN %(incremental)s = FALSE THEN '-infinity'
                    ELSE COALESCE(date_trunc('hour', MAX(date)) - interval '1 hour',
                        '-infinity') END AS max_date
                FROM catmaid_stats_summary
                WHERE project_id=%(project_id)s
                    AND n_connector_links > 0
            ),
            connector_info AS (
                SELECT t1.user_id,
                    date_trunc('hour', t1.creation_time) AS date,
                    count(*) AS n_connector_links
                FROM last_precomputation, treenode_connector t1
                JOIN treenode_connector t2 ON t1.connector_id = t2.connector_id
                WHERE t1.project_id=%(project_id)s
                AND t1.creation_time >= last_precomputation.max_date
                AND t1.creation_time < date_trunc('hour', CURRENT_TIMESTAMP)
                AND t1.relation_id <> t2.relation_id
                AND (t1.relation_id = %(pre_id)s OR t1.relation_id = %(post_id)s)
                AND (t2.relation_id = %(pre_id)s OR t2.relation_id = %(post_id)s)
                AND t1.creation_time > t2.creation_time
                GROUP BY t1.user_id, date
            )
            INSERT INTO catmaid_stats_summary (project_id, user_id, date,
                    n_connector_links)
            SELECT %(project_id)s, ci.user_id, ci.date, ci.n_connector_links
            FROM connector_info ci
            ON CONFLICT (project_id, user_id, date) DO UPDATE
            SET n_connector_links = EXCLUDED.n_connector_links;
        """,
            dict(project_id=project_id,
                 pre_id=pre_id,
                 post_id=post_id,
                 incremental=incremental))
Esempio n. 19
0
    def test_create_treenode(self):
        self.fake_authentication()
        relation_map = get_relation_to_id_map(self.test_project_id)
        class_map = get_class_to_id_map(self.test_project_id)
        count_treenodes = lambda: Treenode.objects.all().count()
        count_skeletons = lambda: ClassInstance.objects.filter(
            project=self.test_project_id, class_column=class_map['skeleton'
                                                                 ]).count()
        count_neurons = lambda: ClassInstance.objects.filter(
            project=self.test_project_id, class_column=class_map['neuron'
                                                                 ]).count()

        treenode_count = count_treenodes()
        skeleton_count = count_skeletons()
        neuron_count = count_neurons()

        response = self.client.post(
            '/%d/treenode/create' % self.test_project_id, {
                'x': 5,
                'y': 10,
                'z': 15,
                'confidence': 5,
                'parent_id': -1,
                'radius': 2
            })
        self.assertEqual(response.status_code, 200)
        parsed_response = json.loads(response.content.decode('utf-8'))

        self.assertTrue('treenode_id' in parsed_response)
        self.assertTrue('skeleton_id' in parsed_response)

        self.assertEqual(treenode_count + 1, count_treenodes())
        self.assertEqual(skeleton_count + 1, count_skeletons())
        self.assertEqual(neuron_count + 1, count_neurons())

        treenode_skeleton_relation = TreenodeClassInstance.objects.filter(
            project=self.test_project_id,
            relation=relation_map['element_of'],
            treenode=parsed_response['treenode_id'],
            class_instance=parsed_response['skeleton_id'])
        neuron_skeleton_relation = ClassInstanceClassInstance.objects.filter(
            project=self.test_project_id,
            relation=relation_map['model_of'],
            class_instance_a=parsed_response['skeleton_id'])
        neuron_log = Log.objects.filter(project=self.test_project_id,
                                        operation_type='create_neuron')

        # FIXME: discussed in
        # https://github.com/catmaid/CATMAID/issues/754
        #self.assertEqual(1, treenode_skeleton_relation.count())
        self.assertEqual(1, neuron_skeleton_relation.count())
Esempio n. 20
0
    def test_create_treenode(self):
        self.fake_authentication()
        relation_map = get_relation_to_id_map(self.test_project_id)
        class_map = get_class_to_id_map(self.test_project_id)
        count_treenodes = lambda: Treenode.objects.all().count()
        count_skeletons = lambda: ClassInstance.objects.filter(
                project=self.test_project_id,
                class_column=class_map['skeleton']).count()
        count_neurons = lambda: ClassInstance.objects.filter(
                project=self.test_project_id,
                class_column=class_map['neuron']).count()

        treenode_count = count_treenodes()
        skeleton_count = count_skeletons()
        neuron_count = count_neurons()

        response = self.client.post('/%d/treenode/create' % self.test_project_id, {
            'x': 5,
            'y': 10,
            'z': 15,
            'confidence': 5,
            'parent_id': -1,
            'radius': 2})
        self.assertEqual(response.status_code, 200)
        parsed_response = json.loads(response.content.decode('utf-8'))

        self.assertTrue('treenode_id' in parsed_response)
        self.assertTrue('skeleton_id' in parsed_response)

        self.assertEqual(treenode_count + 1, count_treenodes())
        self.assertEqual(skeleton_count + 1, count_skeletons())
        self.assertEqual(neuron_count + 1, count_neurons())

        treenode_skeleton_relation = TreenodeClassInstance.objects.filter(
                project=self.test_project_id,
                relation=relation_map['element_of'],
                treenode=parsed_response['treenode_id'],
                class_instance=parsed_response['skeleton_id'])
        neuron_skeleton_relation = ClassInstanceClassInstance.objects.filter(
                project=self.test_project_id,
                relation=relation_map['model_of'],
                class_instance_a=parsed_response['skeleton_id'])
        neuron_log = Log.objects.filter(
                project=self.test_project_id,
                operation_type='create_neuron')

        # FIXME: discussed in
        # https://github.com/catmaid/CATMAID/issues/754
        #self.assertEqual(1, treenode_skeleton_relation.count())
        self.assertEqual(1, neuron_skeleton_relation.count())
Esempio n. 21
0
def connector_types(request, project_id):
    """Get a list of available connector types.

    Returns a list of all available connector link types in a project. Each
    list element consists of an object with the following fields: type,
    relation, relation_id.
    """
    relation_map = get_relation_to_id_map(project_id)

    types = copy.deepcopy(LINK_TYPES)
    for t in types:
        t['relation_id'] = relation_map[t['relation']]

    return JsonResponse(types, safe=False)
Esempio n. 22
0
def connector_types(request, project_id):
    """Get a list of available connector types.

    Returns a list of all available connector link types in a project. Each
    list element consists of an object with the following fields: type,
    relation, relation_id.
    """
    relation_map = get_relation_to_id_map(project_id)

    types = copy.deepcopy(LINK_TYPES)
    for t in types:
        t['relation_id'] = relation_map[t['relation']]

    return JsonResponse(types, safe=False)
Esempio n. 23
0
def basic_graph(project_id,
                skeleton_ids,
                relations=None,
                source_link="presynaptic_to",
                target_link="postsynaptic_to"):

    if not skeleton_ids:
        raise ValueError("No skeleton IDs provided")

    cursor = connection.cursor()

    if not relations:
        relations = get_relation_to_id_map(project_id,
                                           (source_link, target_link), cursor)
    source_rel_id, target_rel_id = relations[source_link], relations[
        target_link]

    cursor.execute(
        '''
    SELECT t1.skeleton_id, t2.skeleton_id, LEAST(t1.confidence, t2.confidence)
    FROM treenode_connector t1,
         treenode_connector t2
    WHERE t1.skeleton_id IN (%(skids)s)
      AND t1.relation_id = %(source_rel)s
      AND t1.connector_id = t2.connector_id
      AND t2.skeleton_id IN (%(skids)s)
      AND t2.relation_id = %(target_rel)s
      AND t1.id <> t2.id
    ''' % {
            'skids': ','.join(map(str, skeleton_ids)),
            'source_rel': source_rel_id,
            'target_rel': target_rel_id
        })

    edges = defaultdict(partial(defaultdict, make_new_synapse_count_array))
    for row in cursor.fetchall():
        edges[row[0]][row[1]][row[2] - 1] += 1

    return {
        'edges':
        tuple((s, t, count) for s, edge in six.iteritems(edges)
              for t, count in six.iteritems(edge))
    }
    '''
    return {'edges': [{'source': pre,
                       'target': post,
                       'weight': count} for pre, edge in six.iteritems(edges) for post, count in six.iteritems(edge)]}
    '''
    """ Can't get the variable to be set with all the skeleton IDs
Esempio n. 24
0
def setup_classification(workspace_pid, user):
    """ Tests which of the needed classes and relations is missing
    from the dummy project''s semantic space and adds those.
    """
    # Get classification and relation data
    class_map = get_class_to_id_map(workspace_pid)
    relation_map = get_relation_to_id_map(workspace_pid)

    # Add what is missing
    for c in needed_classes:
        if c not in class_map:
            add_class(workspace_pid, user, c, needed_classes[c])
    for r in needed_relations:
        if r not in relation_map:
            add_relation(workspace_pid, user, r, needed_relations[r])
Esempio n. 25
0
def _list_completed(project_id, completed_by=None, from_date=None, to_date=None):
    """ Get a list of connector links that can be optionally constrained to be
    completed by a certain user in a given time frame. The returned connector
    links are by default only constrained by both sides having different
    relations and the first link was created before the second one.
    """
    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
    pre = relations['presynaptic_to']
    post = relations['postsynaptic_to']

    params = [project_id, pre, post, pre, post]
    query = '''
        SELECT tc2.connector_id, c.location_x, c.location_y, c.location_z,
            tc2.treenode_id, tc2.skeleton_id, tc2.confidence, tc2.user_id,
            t2.location_x, t2.location_y, t2.location_z,
            tc1.treenode_id, tc1.skeleton_id, tc1.confidence, tc1.user_id,
            t1.location_x, t1.location_y, t1.location_z
        FROM treenode_connector tc1
        JOIN treenode_connector tc2 ON tc1.connector_id = tc2.connector_id
        JOIN connector c ON tc1.connector_id = c.id
        JOIN treenode t1 ON t1.id = tc1.treenode_id
        JOIN treenode t2 ON t2.id = tc2.treenode_id
        WHERE t1.project_id=%s
        AND tc1.relation_id <> tc2.relation_id
        AND tc1.creation_time > tc2.creation_time
        AND (tc1.relation_id = %s OR tc1.relation_id = %s)
        AND (tc2.relation_id = %s OR tc2.relation_id = %s)'''

    if completed_by:
        params.append(completed_by)
        query += " AND tc1.user_id=%s"
    if from_date:
        params.append(from_date.isoformat())
        query += " AND tc1.creation_time >= %s"
    if to_date:
        to_date =  to_date + timedelta(days=1)
        params.append(to_date.isoformat())
        query += " AND tc1.creation_time < %s"

    cursor.execute(query, params)

    return tuple((row[0], (row[1], row[2], row[3]),
                  row[4], row[5], row[6], row[7],
                  (row[8], row[9], row[10]),
                  row[11], row[12], row[13], row[14],
                  (row[15], row[16], row[17])) for row in cursor.fetchall())
Esempio n. 26
0
    def test_create_treenode_with_existing_neuron(self):
        self.fake_authentication()
        relation_map = get_relation_to_id_map(self.test_project_id)
        class_map = get_class_to_id_map(self.test_project_id)
        neuron_id = 2389
        count_skeletons = lambda: ClassInstance.objects.filter(
            project=self.test_project_id, class_column=class_map['skeleton'
                                                                 ]).count()
        count_treenodes = lambda: Treenode.objects.all().count()

        treenode_count = count_treenodes()
        skeleton_count = count_skeletons()

        response = self.client.post(
            '/%d/treenode/create' % self.test_project_id, {
                'x': 5,
                'y': 10,
                'z': 15,
                'confidence': 5,
                'parent_id': -1,
                'useneuron': neuron_id,
                'radius': 2
            })
        self.assertEqual(response.status_code, 200)
        parsed_response = json.loads(response.content.decode('utf-8'))

        self.assertTrue('treenode_id' in parsed_response)
        self.assertTrue('skeleton_id' in parsed_response)

        self.assertEqual(treenode_count + 1, count_treenodes())
        self.assertEqual(skeleton_count + 1, count_skeletons())

        treenode_skeleton_relation = TreenodeClassInstance.objects.filter(
            project=self.test_project_id,
            relation=relation_map['element_of'],
            treenode=parsed_response['treenode_id'],
            class_instance=parsed_response['skeleton_id'])
        neuron_skeleton_relation = ClassInstanceClassInstance.objects.filter(
            project=self.test_project_id,
            relation=relation_map['model_of'],
            class_instance_a=parsed_response['skeleton_id'],
            class_instance_b=neuron_id)

        # FIXME: treenode_skeleton_relation.count() should be 1, but we
        # currently don't store these relations.
        # See: https://github.com/catmaid/CATMAID/issues/754
        self.assertEqual(0, treenode_skeleton_relation.count())
        self.assertEqual(1, neuron_skeleton_relation.count())
Esempio n. 27
0
def _many_to_many_synapses(skids1, skids2, relation_name, project_id):
    """
    Return all rows that connect skeletons of one set with another set with a
    specific relation.
    """
    if relation_name not in LINK_RELATION_NAMES:
        raise Exception("Cannot accept a relation named '%s'" % relation_name)

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, cursor=cursor)
    relation_id = relations[relation_name]
    undirected_link_ids = [relations[l] for l in UNDIRECTED_LINK_TYPES]

    cursor.execute('''
    SELECT tc1.connector_id, c.location_x, c.location_y, c.location_z,
           tc1.treenode_id, tc1.skeleton_id, tc1.confidence, tc1.user_id,
           t1.location_x, t1.location_y, t1.location_z,
           tc2.treenode_id, tc2.skeleton_id, tc2.confidence, tc2.user_id,
           t2.location_x, t2.location_y, t2.location_z
    FROM treenode_connector tc1,
         treenode_connector tc2,
         treenode t1,
         treenode t2,
         connector c
    WHERE tc1.skeleton_id = ANY(%(skeleton_ids_1)s::int[])
      AND tc1.connector_id = c.id
      AND tc2.skeleton_id = ANY(%(skeleton_ids_2)s::int[])
      AND tc1.connector_id = tc2.connector_id
      AND tc1.relation_id = %(relation_id)s
      AND (tc1.relation_id != tc2.relation_id
        OR tc1.relation_id = ANY(%(undir_rel_ids)s::int[]))
      AND tc1.id != tc2.id
      AND tc1.treenode_id = t1.id
      AND tc2.treenode_id = t2.id
    ''', {
        'skeleton_ids_1': skids1,
        'skeleton_ids_2': skids2,
        'relation_id': relation_id,
        'undir_rel_ids': undirected_link_ids,
    })

    return tuple((row[0], (row[1], row[2], row[3]),
                  row[4], row[5], row[6], row[7],
                  (row[8], row[9], row[10]),
                  row[11], row[12], row[13], row[14],
                  (row[15], row[16], row[17])) for row in cursor.fetchall())
Esempio n. 28
0
def _many_to_many_synapses(skids1, skids2, relation_name, project_id) -> Tuple:
    """
    Return all rows that connect skeletons of one set with another set with a
    specific relation.
    """
    if relation_name not in LINK_RELATION_NAMES:
        raise Exception("Cannot accept a relation named '%s'" % relation_name)

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, cursor=cursor)
    relation_id = relations[relation_name]
    undirected_link_ids = [relations[link_type] for link_type in UNDIRECTED_LINK_TYPES]

    cursor.execute('''
    SELECT tc1.connector_id, c.location_x, c.location_y, c.location_z,
           tc1.treenode_id, tc1.skeleton_id, tc1.confidence, tc1.user_id,
           t1.location_x, t1.location_y, t1.location_z,
           tc2.treenode_id, tc2.skeleton_id, tc2.confidence, tc2.user_id,
           t2.location_x, t2.location_y, t2.location_z
    FROM treenode_connector tc1,
         treenode_connector tc2,
         treenode t1,
         treenode t2,
         connector c
    WHERE tc1.skeleton_id = ANY(%(skeleton_ids_1)s::bigint[])
      AND tc1.connector_id = c.id
      AND tc2.skeleton_id = ANY(%(skeleton_ids_2)s::bigint[])
      AND tc1.connector_id = tc2.connector_id
      AND tc1.relation_id = %(relation_id)s
      AND (tc1.relation_id != tc2.relation_id
        OR tc1.relation_id = ANY(%(undir_rel_ids)s::bigint[]))
      AND tc1.id != tc2.id
      AND tc1.treenode_id = t1.id
      AND tc2.treenode_id = t2.id
    ''', {
        'skeleton_ids_1': skids1,
        'skeleton_ids_2': skids2,
        'relation_id': relation_id,
        'undir_rel_ids': undirected_link_ids,
    })

    return tuple((row[0], (row[1], row[2], row[3]),
                  row[4], row[5], row[6], row[7],
                  (row[8], row[9], row[10]),
                  row[11], row[12], row[13], row[14],
                  (row[15], row[16], row[17])) for row in cursor.fetchall())
Esempio n. 29
0
    def __init__(self, job):
        self.job = job
        # The name of entities that are exported
        self.entity_name = "treenode"

        # Output path for this job will be initialized, when needed
        self.output_path = None

        # Cache for neuron and relation folder names
        self.skid_to_neuron_folder = {}
        self.relid_to_rel_folder = {}

        # Get relation map
        self.relation_map = get_relation_to_id_map(job.project_id)

        # Store meta data for each node
        self.metadata = {}
Esempio n. 30
0
    def test_create_treenode_with_existing_neuron(self):
        self.fake_authentication()
        relation_map = get_relation_to_id_map(self.test_project_id)
        class_map = get_class_to_id_map(self.test_project_id)
        neuron_id = 2389
        count_skeletons = lambda: ClassInstance.objects.filter(
                project=self.test_project_id,
                class_column=class_map['skeleton']).count()
        count_treenodes = lambda: Treenode.objects.all().count()

        treenode_count = count_treenodes()
        skeleton_count = count_skeletons()

        response = self.client.post('/%d/treenode/create' % self.test_project_id, {
            'x': 5,
            'y': 10,
            'z': 15,
            'confidence': 5,
            'parent_id': -1,
            'useneuron': neuron_id,
            'radius': 2})
        self.assertEqual(response.status_code, 200)
        parsed_response = json.loads(response.content.decode('utf-8'))

        self.assertTrue('treenode_id' in parsed_response)
        self.assertTrue('skeleton_id' in parsed_response)

        self.assertEqual(treenode_count + 1, count_treenodes())
        self.assertEqual(skeleton_count + 1, count_skeletons())

        treenode_skeleton_relation = TreenodeClassInstance.objects.filter(
                project=self.test_project_id,
                relation=relation_map['element_of'],
                treenode=parsed_response['treenode_id'],
                class_instance=parsed_response['skeleton_id'])
        neuron_skeleton_relation = ClassInstanceClassInstance.objects.filter(
                project=self.test_project_id,
                relation=relation_map['model_of'],
                class_instance_a=parsed_response['skeleton_id'],
                class_instance_b=neuron_id)

        # FIXME: treenode_skeleton_relation.count() should be 1, but we
        # currently don't store these relations.
        # See: https://github.com/catmaid/CATMAID/issues/754
        self.assertEqual(0, treenode_skeleton_relation.count())
        self.assertEqual(1, neuron_skeleton_relation.count())
Esempio n. 31
0
    def __init__(self, job):
        self.job = job
        # The name of entities that are exported
        self.entity_name = "treenode"

        # Output path for this job will be initialized, when needed
        self.output_path = None

        # Cache for neuron and relation folder names
        self.skid_to_neuron_folder = {}
        self.relid_to_rel_folder = {}

        # Get relation map
        self.relation_map = get_relation_to_id_map(job.project_id)

        # Store meta data for each node
        self.metadata = {}
Esempio n. 32
0
def populate_connector_stats_summary(project_id, incremental=True, cursor=None):
    """Add connector summary information to the summary table. Create hourly
    aggregates in UTC time. These aggregates can still be moved in other
    timezones with good enough precision for our purpose. By default, this
    happens in an incremental manner, but can optionally be fone for all data
    from scratch (overriding existing statistics).
    """
    if not cursor:
        cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, cursor=cursor)
    pre_id, post_id = relations.get('presynaptic_to'), relations.get('postsynaptic_to')
    if pre_id and post_id:
        cursor.execute("""
            WITH last_precomputation AS (
                SELECT CASE WHEN %(incremental)s = FALSE THEN '-infinity'
                    ELSE COALESCE(date_trunc('hour', MAX(date)) - interval '1 hour',
                        '-infinity') END AS max_date
                FROM catmaid_stats_summary
                WHERE project_id=%(project_id)s
                    AND n_connector_links > 0
            ),
            connector_info AS (
                SELECT t1.user_id,
                    date_trunc('hour', t1.creation_time) AS date,
                    count(*) AS n_connector_links
                FROM last_precomputation, treenode_connector t1
                JOIN treenode_connector t2 ON t1.connector_id = t2.connector_id
                WHERE t1.project_id=%(project_id)s
                AND t1.creation_time >= last_precomputation.max_date
                AND t1.creation_time < date_trunc('hour', CURRENT_TIMESTAMP)
                AND t1.relation_id <> t2.relation_id
                AND (t1.relation_id = %(pre_id)s OR t1.relation_id = %(post_id)s)
                AND (t2.relation_id = %(pre_id)s OR t2.relation_id = %(post_id)s)
                AND t1.creation_time > t2.creation_time
                GROUP BY t1.user_id, date
            )
            INSERT INTO catmaid_stats_summary (project_id, user_id, date,
                    n_connector_links)
            SELECT %(project_id)s, ci.user_id, ci.date, ci.n_connector_links
            FROM connector_info ci
            ON CONFLICT (project_id, user_id, date) DO UPDATE
            SET n_connector_links = EXCLUDED.n_connector_links;
        """, dict(project_id=project_id, pre_id=pre_id, post_id=post_id,
                  incremental=incremental))
Esempio n. 33
0
def _many_to_many_synapses(skids1, skids2, relation_name, project_id):
    """
    Return all rows that connect skeletons of one set with another set with a
    specific relation.
    """
    if relation_name not in ('postsynaptic_to', 'presynaptic_to', 'gapjunction_with'):
        raise Exception("Cannot accept a relation named '%s'" % relation_name)

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, cursor=cursor)
    gapjunction_id = relations.get('gapjunction_with', -1)

    cursor.execute('''
    SELECT tc1.connector_id, c.location_x, c.location_y, c.location_z,
           tc1.treenode_id, tc1.skeleton_id, tc1.confidence, tc1.user_id,
           t1.location_x, t1.location_y, t1.location_z,
           tc2.treenode_id, tc2.skeleton_id, tc2.confidence, tc2.user_id,
           t2.location_x, t2.location_y, t2.location_z
    FROM treenode_connector tc1,
         treenode_connector tc2,
         treenode t1,
         treenode t2,
         relation r1,
         connector c
    WHERE tc1.skeleton_id IN (%s)
      AND tc1.connector_id = c.id
      AND tc2.skeleton_id IN (%s)
      AND tc1.connector_id = tc2.connector_id
      AND tc1.relation_id = r1.id
      AND r1.relation_name = '%s'
      AND (tc1.relation_id != tc2.relation_id OR tc1.relation_id = %d)
      AND tc1.id != tc2.id
      AND tc1.treenode_id = t1.id
      AND tc2.treenode_id = t2.id
    ''' % (','.join(map(str, skids1)),
           ','.join(map(str, skids2)),
           relation_name,
           gapjunction_id))

    return tuple((row[0], (row[1], row[2], row[3]),
                  row[4], row[5], row[6], row[7],
                  (row[8], row[9], row[10]),
                  row[11], row[12], row[13], row[14],
                  (row[15], row[16], row[17])) for row in cursor.fetchall())
Esempio n. 34
0
def check_classification_setup(workspace_pid):
    """ Checks if all classes and relations needed by the
    classification system are available. Needed classes are
    'classification_root' and 'classification_project' and the
    nedded relations are 'is_a' and 'classified_by'.
    """
    # Get classification and relation data
    class_map = get_class_to_id_map(workspace_pid)
    relation_map = get_relation_to_id_map(workspace_pid)

    # Check if all is good
    all_good = True
    for c in needed_classes:
        all_good = (all_good and (c in class_map))
    for r in needed_relations:
        all_good = (all_good and (r in relation_map))

    return all_good
Esempio n. 35
0
def synapseNodesFromSkeletonID(sid):
    sk = ClassInstance.objects.get(pk=sid)
    pid = sk.project_id
    relations = get_relation_to_id_map(pid, ("presynaptic_to", "postsynaptic_to"))

    qs_tc = TreenodeConnector.objects.filter(
        project=pid, skeleton=sid, relation__in=(relations["presynaptic_to"], relations["postsynaptic_to"])
    ).select_related("connector")

    synapse_nodes = []
    connector_ids = []
    synapse_relations = []

    for tc in qs_tc:
        synapse_nodes.append(tc.treenode_id)
        connector_ids.append(tc.connector_id)
        synapse_relations.append(tc.relation_id)
    return synapse_nodes, connector_ids, synapse_relations
Esempio n. 36
0
def _connector_skeletons(connector_ids, project_id):
    """Return a dictionary of connector ID as keys and a dictionary as value
    containing two entries: 'presynaptic_to' with a skeleton ID or None,
    and 'postsynaptic_to' with a list of skeleton IDs (maybe empty).
    """
    if not connector_ids:
        raise ValueError('No connector IDs provided')

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id,
                                       ('presynaptic_to', 'postsynaptic_to'),
                                       cursor)
    PRE = relations['presynaptic_to']
    POST = relations['postsynaptic_to']

    cursor.execute('''
    SELECT connector_id, relation_id, skeleton_id, treenode_id
    FROM treenode_connector
    WHERE connector_id IN (%s)
      AND (relation_id = %s OR relation_id = %s)
    ''' % (",".join(map(str, connector_ids)), PRE, POST))

    cs = {}
    for row in cursor.fetchall():
        c = cs.get(row[0])
        if not c:
            # Ensure each connector has the two entries at their minimum
            c = {
                'presynaptic_to': None,
                'postsynaptic_to': [],
                'presynaptic_to_node': None,
                'postsynaptic_to_node': []
            }
            cs[row[0]] = c
        if POST == row[1]:
            c['postsynaptic_to'].append(row[2])
            c['postsynaptic_to_node'].append(row[3])
        elif PRE == row[1]:
            c['presynaptic_to'] = row[2]
            c['presynaptic_to_node'] = row[3]

    return cs
Esempio n. 37
0
def list_annotations(request, project_id=None):
    """ Creates a list of objects containing an annotation name and the user
    name and ID of the users having linked that particular annotation.
    """

    if not request.POST:
        cursor = connection.cursor()
        classes = get_class_to_id_map(project_id, ("annotation",), cursor)
        relations = get_relation_to_id_map(project_id, ("annotated_with",), cursor)

        cursor.execute(
            """
            SELECT DISTINCT ci.name, ci.id, u.id, u.username
            FROM class_instance ci
            LEFT OUTER JOIN class_instance_class_instance cici
                         ON (ci.id = cici.class_instance_b)
            LEFT OUTER JOIN auth_user u
                         ON (cici.user_id = u.id)
            WHERE (ci.class_id = %s AND cici.relation_id = %s
              AND ci.project_id = %s AND cici.project_id = %s);
                       """,
            (classes["annotation"], relations["annotated_with"], project_id, project_id),
        )
        annotation_tuples = cursor.fetchall()
    else:
        annotation_query = create_annotation_query(project_id, request.POST)
        annotation_tuples = annotation_query.distinct().values_list(
            "name", "id", "cici_via_b__user__id", "cici_via_b__user__username"
        )

    # Create a set mapping annotation names to its users
    ids = {}
    annotation_dict = {}
    for annotation, aid, uid, username in annotation_tuples:
        ids[aid] = annotation
        ls = annotation_dict.get(aid)
        if ls is None:
            ls = []
            annotation_dict[aid] = ls
        ls.append({"id": uid, "name": username})
    # Flatten dictionary to list
    annotations = tuple({"name": ids[aid], "id": aid, "users": users} for aid, users in annotation_dict.iteritems())
    return HttpResponse(json.dumps({"annotations": annotations}), content_type="text/json")
Esempio n. 38
0
def basic_graph(project_id, skeleton_ids, relations=None,
        source_link="presynaptic_to", target_link="postsynaptic_to"):

    if not skeleton_ids:
        raise ValueError("No skeleton IDs provided")

    cursor = connection.cursor()

    if not relations:
        relations = get_relation_to_id_map(project_id, (source_link, target_link), cursor)
    source_rel_id, target_rel_id = relations[source_link], relations[target_link]

    cursor.execute('''
    SELECT t1.skeleton_id, t2.skeleton_id, LEAST(t1.confidence, t2.confidence)
    FROM treenode_connector t1,
         treenode_connector t2
    WHERE t1.skeleton_id IN (%(skids)s)
      AND t1.relation_id = %(source_rel)s
      AND t1.connector_id = t2.connector_id
      AND t2.skeleton_id IN (%(skids)s)
      AND t2.relation_id = %(target_rel)s
      AND t1.id <> t2.id
    ''' % {'skids': ','.join(map(str, skeleton_ids)),
           'source_rel': source_rel_id,
           'target_rel': target_rel_id})

    edges = defaultdict(partial(defaultdict, make_new_synapse_count_array))
    for row in cursor.fetchall():
        edges[row[0]][row[1]][row[2] - 1] += 1

    return {
        'edges': tuple((s, t, count)
                for s, edge in edges.items()
                for t, count in edge.items())
    }

    '''
    return {'edges': [{'source': pre,
                       'target': post,
                       'weight': count} for pre, edge in edges.items() for post, count in edge.items()]}
    '''

    """ Can't get the variable to be set with all the skeleton IDs
Esempio n. 39
0
def _many_to_many_synapses(skids1, skids2, relation_name, project_id):
    """
    Return all rows that connect skeletons of one set with another set with a
    specific relation.
    """
    if relation_name not in ('postsynaptic_to', 'presynaptic_to',
                             'gapjunction_with'):
        raise Exception("Cannot accept a relation named '%s'" % relation_name)

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, cursor=cursor)
    gapjunction_id = relations.get('gapjunction_with', -1)

    cursor.execute('''
    SELECT tc1.connector_id, c.location_x, c.location_y, c.location_z,
           tc1.treenode_id, tc1.skeleton_id, tc1.confidence, tc1.user_id,
           t1.location_x, t1.location_y, t1.location_z,
           tc2.treenode_id, tc2.skeleton_id, tc2.confidence, tc2.user_id,
           t2.location_x, t2.location_y, t2.location_z
    FROM treenode_connector tc1,
         treenode_connector tc2,
         treenode t1,
         treenode t2,
         relation r1,
         connector c
    WHERE tc1.skeleton_id IN (%s)
      AND tc1.connector_id = c.id
      AND tc2.skeleton_id IN (%s)
      AND tc1.connector_id = tc2.connector_id
      AND tc1.relation_id = r1.id
      AND r1.relation_name = '%s'
      AND (tc1.relation_id != tc2.relation_id OR tc1.relation_id = %d)
      AND tc1.id != tc2.id
      AND tc1.treenode_id = t1.id
      AND tc2.treenode_id = t2.id
    ''' % (','.join(map(str, skids1)), ','.join(map(
        str, skids2)), relation_name, gapjunction_id))

    return tuple((row[0], (row[1], row[2], row[3]), row[4], row[5], row[6],
                  row[7], (row[8], row[9], row[10]), row[11], row[12], row[13],
                  row[14], (row[15], row[16], row[17]))
                 for row in cursor.fetchall())
Esempio n. 40
0
def basic_graph(project_id, skeleton_ids):
    if not skeleton_ids:
        raise ValueError("No skeleton IDs provided")

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, ("presynaptic_to", "postsynaptic_to"), cursor)
    preID, postID = relations["presynaptic_to"], relations["postsynaptic_to"]

    cursor.execute(
        """
    SELECT tc.connector_id, tc.relation_id, tc.skeleton_id
    FROM treenode_connector tc
    WHERE tc.project_id = %s
      AND tc.skeleton_id IN (%s)
      AND (tc.relation_id = %s OR tc.relation_id = %s)
    """
        % (int(project_id), ",".join(str(int(skid)) for skid in skeleton_ids), preID, postID)
    )

    # stores entire query set in memory, linking pre and post
    connectors = defaultdict(partial(defaultdict, list))
    for row in cursor.fetchall():
        connectors[row[0]][row[1]].append(row[2])

    # Safe to placeholder, half-complete connectors:
    # only adds the edge if both pre and post exist
    edges = defaultdict(partial(defaultdict, int))
    for c in connectors.itervalues():
        for pre in c[preID]:  # should be one or none
            for post in c[postID]:
                edges[pre][post] += 1

    return {"edges": tuple((pre, post, count) for pre, edge in edges.iteritems() for post, count in edge.iteritems())}

    """
    return {'edges': [{'source': pre,
                       'target': post,
                       'weight': count} for pre, edge in edges.iteritems() for post, count in edge.iteritems()]}
    """

    """ Can't get the variable to be set with all the skeleton IDs
Esempio n. 41
0
def check_tracing_setup_detailed(
        project_id,
        opt_class_map=None,
        opt_relation_map=None,
        check_root_ci=True) -> Tuple[bool, List, List, List]:
    """ Checks if all classes and relations needed by the tracing system are
    available. It returns a four-tuple with a boolean indicating if all is
    setup, the missing class names, the missing relation names and the missing
    class instance names. Allows avoidng tests for root class instances and
    passing already available class and relation maps.
    """
    # Get class and relation data. If available, use the provided one.
    class_map = opt_class_map or get_class_to_id_map(project_id)
    relation_map = opt_relation_map or get_relation_to_id_map(project_id)

    # Check if all classes and relations are available
    all_good = True
    missing_classes = []
    missing_relations = []
    missing_classinstances = []

    for c in needed_classes:
        if c not in class_map:
            all_good = False
            missing_classes.append(c)
    for r in needed_relations:
        if r not in relation_map:
            all_good = False
            missing_relations.append(r)
    # Check if the root node is there if requested
    if check_root_ci:
        if 'root' in class_map:
            exists = ClassInstance.objects.filter(
                class_column=class_map['root'],
                project_id=project_id).exists()
            if not exists:
                all_good = False
                missing_classinstances.append('root')
        else:
            missing_classinstances.append('root')

    return all_good, missing_classes, missing_relations, missing_classinstances
Esempio n. 42
0
def skeletons_neuroml(request, project_id=None):
    """ Export a list of skeletons each as a Cell in NeuroML. """
    project_id = int(project_id) # sanitize
    skeleton_ids = tuple(int(v) for k,v in request.POST.iteritems() if k.startswith('skids['))

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
    preID = relations['presynaptic_to']
    postID = relations['postsynaptic_to']

    # TODO could certainly fetch all nodes and synapses in one single query and then split them up.
    cells = (_skeleton_neuroml_cell(skeleton_id, preID, postID) for skeleton_id in skeleton_ids)

    response = HttpResponse(content_type='text/txt')
    response['Content-Disposition'] = 'attachment; filename="data.neuroml"'

    neuroml_network(cells, response)

    return response
Esempio n. 43
0
def skeletons_neuroml(request, project_id=None):
    """ Export a list of skeletons each as a Cell in NeuroML. """
    project_id = int(project_id) # sanitize
    skeleton_ids = tuple(int(v) for k,v in request.POST.iteritems() if k.startswith('skids['))

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
    preID = relations['presynaptic_to']
    postID = relations['postsynaptic_to']

    # TODO could certainly fetch all nodes and synapses in one single query and then split them up.
    cells = (_skeleton_neuroml_cell(skeleton_id, preID, postID) for skeleton_id in skeleton_ids)

    response = HttpResponse(content_type='text/txt')
    response['Content-Disposition'] = 'attachment; filename="data.neuroml"'

    neuroml_network(cells, response)

    return response
Esempio n. 44
0
def connector_user_info(request, project_id):
    """ Return information on a treenode connector edge.

    This function is called often (every connector mouseover) and should
    therefore be as fast as possible. Analogous to user_info for treenodes and
    connectors.
    """
    treenode_id = int(request.GET.get('treenode_id'))
    connector_id = int(request.GET.get('connector_id'))
    cursor = connection.cursor()
    relation_names = ('presynaptic_to', 'postsynaptic_to', 'abutting',
                      'gapjunction_with')
    relations = get_relation_to_id_map(project_id, relation_names, cursor)
    relation_id = relations[request.GET.get('relation_name')]
    cursor.execute(
        '''
        SELECT tc.id, tc.user_id, tc.creation_time, tc.edition_time
        FROM treenode_connector tc
        WHERE tc.treenode_id = %s
          AND tc.connector_id = %s
          AND tc.relation_id = %s
                   ''', (treenode_id, connector_id, relation_id))

    # We expect at least one result node.
    if not cursor.rowcount:
        return HttpResponse(
            json.dumps({
                'error':
                'No treenode connector exists for treenode %s, connector %s, relation %s'
                % (treenode_id, connector_id, relation_id)
            }))

    # Build result. Because there is no uniqueness restriction on treenode
    # connector edges, even with the same relation, the response must handle
    # multiple rows.
    return HttpResponse(
        json.dumps([{
            'user': info[1],
            'creation_time': str(info[2].isoformat()),
            'edition_time': str(info[3].isoformat()),
        } for info in cursor.fetchall()]))
Esempio n. 45
0
def basic_graph(project_id, skeleton_ids):
    def newSynapseCounts():
        return [0, 0, 0, 0, 0]

    if not skeleton_ids:
        raise ValueError("No skeleton IDs provided")

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id,
                                       ('presynaptic_to', 'postsynaptic_to'),
                                       cursor)
    preID, postID = relations['presynaptic_to'], relations['postsynaptic_to']

    cursor.execute('''
    SELECT t1.skeleton_id, t2.skeleton_id, LEAST(t1.confidence, t2.confidence)
    FROM treenode_connector t1,
         treenode_connector t2
    WHERE t1.skeleton_id IN (%s)
      AND t1.relation_id = %s
      AND t1.connector_id = t2.connector_id
      AND t2.relation_id = %s
    ''' % (','.join(map(str, skeleton_ids)), preID, postID))

    edges = defaultdict(partial(defaultdict, newSynapseCounts))
    for row in cursor.fetchall():
        edges[row[0]][row[1]][row[2] - 1] += 1

    return {
        'edges':
        tuple((pre, post, count) for pre, edge in edges.iteritems()
              for post, count in edge.iteritems())
    }
    '''
    return {'edges': [{'source': pre,
                       'target': post,
                       'weight': count} for pre, edge in edges.iteritems() for post, count in edge.iteritems()]}
    '''
    """ Can't get the variable to be set with all the skeleton IDs
Esempio n. 46
0
def connector_types(request, project_id):
    """Get a list of available connector types.

    Returns a list of all available connector link types in a project. Each
    list element consists of an object with the following fields: type,
    relation, relation_id.
    """
    relation_map = get_relation_to_id_map(project_id)

    def set_id(t):
        relation_id = relation_map.get(t['relation'])
        # If the relation doesn't exist in the database, don't return it. Add it
        # to the log though:
        if relation_id is None:
            logger.info("Tracing relation {} not found in database".format(t['relation']))
            return False
        else:
            t['relation_id'] = relation_id
            return True

    types = list(filter(set_id, copy.deepcopy(LINK_TYPES)))
    return JsonResponse(types, safe=False)
Esempio n. 47
0
def synapseNodesFromSkeletonID(sid) -> Tuple[List, List, List]:
    sk = ClassInstance.objects.get(pk=sid)
    pid = sk.project_id
    relations = get_relation_to_id_map(pid,
                                       ('presynaptic_to', 'postsynaptic_to'))

    qs_tc = TreenodeConnector.objects.filter(
        project=pid,
        skeleton=sid,
        relation__in=(
            relations['presynaptic_to'],
            relations['postsynaptic_to'])).select_related('connector')

    synapse_nodes = []
    connector_ids = []
    synapse_relations = []

    for tc in qs_tc:
        synapse_nodes.append(tc.treenode_id)
        connector_ids.append(tc.connector_id)
        synapse_relations.append(tc.relation_id)
    return synapse_nodes, connector_ids, synapse_relations
Esempio n. 48
0
def check_tracing_setup_detailed(project_id, opt_class_map=None,
        opt_relation_map=None, check_root_ci=True):
    """ Checks if all classes and relations needed by the tracing system are
    available. It returns a four-tuple with a boolean indicating if all is
    setup, the missing class names, the missing relation names and the missing
    class instance names. Allows to avoid test for root class instances and to
    pass already available class and relation maps.
    """
    # Get class and relation data. If available, use the provided one.
    class_map = opt_class_map or get_class_to_id_map(project_id)
    relation_map = opt_relation_map or get_relation_to_id_map(project_id)

    # Check if all classes and relations are available
    all_good = True
    missing_classes = []
    missing_relations = []
    missing_classinstances = []

    for c in needed_classes:
        if not c in class_map:
            all_good = False
            missing_classes.append(c)
    for r in needed_relations:
        if not r in relation_map:
            all_good = False
            missing_relations.append(r)
    # Check if the root node is there if requested
    if check_root_ci:
        if 'root' in class_map:
            exists = ClassInstance.objects.filter(
                class_column=class_map['root'],
                project_id=project_id).exists()
            if not exists:
                all_good = False
                missing_classinstances.append('root')
        else:
                missing_classinstances.append('root')

    return all_good, missing_classes, missing_relations, missing_classinstances
Esempio n. 49
0
def connector_types(request:HttpRequest, project_id) -> JsonResponse:
    """Get a list of available connector types.

    Returns a list of all available connector link types in a project. Each
    list element consists of an object with the following fields: type,
    relation, relation_id.
    """
    relation_map = get_relation_to_id_map(project_id)

    def set_id(t) -> bool:
        relation_id = relation_map.get(t['relation'])
        # If the relation doesn't exist in the database, don't return it. Add it
        # to the log though:
        if relation_id is None:
            logger.info(f"Tracing relation {t['relation']} not found in database")
            return False
        else:
            t['relation_id'] = relation_id
            return True

    types = list(filter(set_id, copy.deepcopy(LINK_TYPES)))
    return JsonResponse(types, safe=False)
Esempio n. 50
0
def _connector_skeletons(connector_ids, project_id):
    """Return a dictionary of connector ID as keys and a dictionary as value
    containing two entries: 'presynaptic_to' with a skeleton ID or None,
    and 'postsynaptic_to' with a list of skeleton IDs (maybe empty).
    """
    if not connector_ids:
        raise ValueError('No connector IDs provided')

    cursor = connection.cursor()

    relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
    PRE = relations['presynaptic_to']
    POST = relations['postsynaptic_to']

    cursor.execute('''
    SELECT connector_id, relation_id, skeleton_id, treenode_id
    FROM treenode_connector
    WHERE connector_id IN (%s)
      AND (relation_id = %s OR relation_id = %s)
    ''' % (",".join(map(str, connector_ids)), PRE, POST))

    cs = {}
    for row in cursor.fetchall():
        c = cs.get(row[0])
        if not c:
            # Ensure each connector has the two entries at their minimum
            c = {'presynaptic_to': None, 'postsynaptic_to': [],
                 'presynaptic_to_node': None, 'postsynaptic_to_node': []}
            cs[row[0]] = c
        if POST == row[1]:
            c['postsynaptic_to'].append(row[2])
            c['postsynaptic_to_node'].append(row[3])
        elif PRE == row[1]:
            c['presynaptic_to'] = row[2]
            c['presynaptic_to_node'] = row[3]

    return cs
Esempio n. 51
0
def list_ontology(request, project_id=None):
    root_class = request.GET.get('rootclass', None)
    parent_id = int(request.GET.get('parentid', 0))
    expand_request = request.GET.get('expandtarget', None)
    parent_type = request.GET.get('parenttype', "relation")
    class_b_id  = int(request.GET.get('classbid', 0))
    if expand_request is None:
        expand_request = tuple()
    else:
        # Parse to int to sanitize
        expand_request = tuple(int(x) for x in expand_request.split(','))

    relation_map = get_relation_to_id_map(project_id)
    class_map = get_class_to_id_map(project_id)

    response_on_error = ''
    try:
        if parent_type == "relation":
            # A class is wanted
            if 0 == parent_id:
                response_on_error = 'Could not select the id of any ontology root node'
                # If the no root class is explicitely requested, return all known
                # root classes.
                root_class_ids = []
                if root_class is None:
                    for rc in root_classes:
                        if rc in class_map:
                            root_class_ids.append( class_map[rc] )
                    if len(root_class_ids) == 0:
                        warning = {'warning': 'Could not find any of the known root classes. ' \
                            'Please add at least one of them to build an ontology.'}
                        return JsonResponse(warning)
                else:
                    if root_class not in class_map:
                        raise Exception('Root class "{0}" not found'.format( root_class ))
                    root_class_ids = [ class_map[root_class] ]

                root_node_q = Class.objects.filter(id__in=root_class_ids,
                    project=project_id)

                # Make sure we actually got at least one root node
                if 0 == len(root_node_q):
                    raise Exception("Couldn't select any root node")

                roots = []
                for root_node in root_node_q:
                    root_id = root_node.id
                    root_name = root_node.class_name
                    num_children = ClassClass.objects.filter(
                        class_b=root_id, project=project_id).count()

                    data = {
                        'id': root_id,
                        'text': '%s (%d)' % (root_name, root_id),
                        'type': 'root',
                        'cname': root_name
                    }
                    # Test if there are links present and mark the root
                    # as leaf if there are none.
                    if num_children > 0:
                        data['state'] = {
                            'opened': False
                        }
                    # Add this root node to the output list
                    roots.append(data)

                return JsonResponse(tuple(r for r in roots), safe=False)
            else:
                response_on_error = 'Could not retrieve child nodes.'
                # Select all classes that are linked with the passed relation
                cc_q = ClassClass.objects.filter(class_b=class_b_id,
                    relation=parent_id, project=project_id)

                links = []
                for cc in cc_q:
                    # Get known restrictions
                    restrictions = get_restrictions( cc )
                    restrictions_json = json.dumps( restrictions )
                    # Create name, mark restrictin availability with *
                    node_name = "%s (%d)" % (cc.class_a.class_name, cc.class_a.id)
                    if len(restrictions) > 0:
                        node_name = node_name + "*"
                    # Collect standard jSTree data
                    data = {
                        'id': cc.class_a.id,
                        'text': node_name,
                        'type': 'class',
                        'restrictions': restrictions_json,
                        'cname': cc.class_a.class_name,
                        'ccid': cc.id
                    }

                    # Only add a 'state' field if this node has children
                    # (i.e. relations where it is class_b).
                    num_children = ClassClass.objects.filter(
                        class_b=cc.class_a.id, project=project_id).count()
                    if num_children > 0:
                        data['state'] = {
                            'opened': False
                        }
                    # Add this class-class link to the list
                    links.append(data)

                return JsonResponse(tuple(l for l in links), safe=False)
        elif parent_type in ("class", "root"):
            # A relation is wanted
            cc_q = ClassClass.objects.filter(
                project=project_id, class_b_id=parent_id)
            # Combine same relations into one
            relations = {}
            for cc in cc_q:
                if cc.relation not in relations:
                    relations[ cc.relation ] = []
                relations[ cc.relation ].append( cc )

            return JsonResponse(tuple({
                'id': r.id,
                'text': '%s (%d)' % (r.relation_name, r.id),
                'type': 'relation',
                'name': r.relation_name,
                'classbname': relations[r][0].class_b.class_name,
                'classbid': parent_id
            } for r in relations), safe=False)
        else:
            response_on_error = 'Unknown parent type'
            raise Exception(parent_type)

    except Exception as e:
        raise Exception(response_on_error + ': ' + str(e))
Esempio n. 52
0
def get_available_relations(request, project_id=None):
    """ Returns a simple list of all relations available available
    for the given project."""
    relation_map = get_relation_to_id_map(project_id)
    return JsonResponse(relation_map)
Esempio n. 53
0
def stats_user_history(request, project_id=None):
    # Get the start date for the query, defaulting to 10 days ago.
    start_date = request.GET.get('start_date', None)
    if start_date:
        start_date = dateparser.parse(start_date)
        print(start_date)
    else:
        start_date = datetime.now() - timedelta(10)
    # Get the end date for the query, defaulting to now.
    end_date = request.GET.get('end_date', None)
    if end_date:
        # We need to set the end date to the last second of the day to get all
        # events.
        end_date = dateparser.parse(end_date) + timedelta(days=1) - timedelta(seconds=1)
    else:
        end_date = datetime.now()
    # Calculate number of days between (including) start and end
    daydelta = (end_date + timedelta(days=1) - start_date).days

    all_users = User.objects.filter().values_list('id', flat=True)
    days = []
    daysformatted = []
    for i in range(daydelta):
        tmp_date = start_date + timedelta(days=i)
        days.append(tmp_date.strftime("%Y%m%d"))
        daysformatted.append(tmp_date.strftime("%a %d, %h %Y"))
    stats_table = {}
    for userid in all_users:
        if userid == -1:
            continue
        userid = str(userid)
        stats_table[userid] = {}
        for i in range(daydelta):
            date = (start_date + timedelta(days=i)).strftime("%Y%m%d")
            stats_table[userid][date] = {}

    # Look up all tree nodes for the project in the given date range. Also add
    # a computed field which is just the day of the last edited date/time.
    treenode_stats = []
    cursor = connection.cursor()

    cursor.execute('''
        SELECT child.uid, child.day, round(sum(edge.length))
        FROM (
            SELECT
                child.user_id AS uid,
                date_trunc('day', child.creation_time) AS day,
                child.parent_id,
                child.location_x,
                child.location_y,
                child.location_z
            FROM treenode child
            WHERE child.project_id = %(project_id)s
              AND child.creation_time BETWEEN %(start_date)s AND %(end_date)s
        ) AS child
        INNER JOIN LATERAL (
            SELECT sqrt(pow(child.location_x - parent.location_x, 2)
                      + pow(child.location_y - parent.location_y, 2)
                      + pow(child.location_z - parent.location_z, 2)) AS length
            FROM treenode parent
            WHERE parent.project_id = %(project_id)s
              AND parent.id = child.parent_id
            LIMIT 1
        ) AS edge ON TRUE
        GROUP BY child.uid, child.day
    ''', dict(project_id=project_id, start_date=start_date, end_date=end_date))

    treenode_stats = cursor.fetchall()

    relations = get_relation_to_id_map(project_id, cursor=cursor)
    preId, postId = relations['presynaptic_to'], relations['postsynaptic_to']

    # Retrieve a list of how many completed connector relations a user has
    # created in a given time frame. A completed connector relation is either
    # one were a user created both the presynaptic and the postsynaptic side
    # (one of them in the given time frame) or if a user completes an existing
    # 'half connection'. To avoid duplicates, only links are counted, where the
    # second node is younger than the first one
    cursor.execute('''
        SELECT t1.user_id, (date_trunc('day', t1.creation_time)) AS date, count(*)
        FROM treenode_connector t1
        JOIN treenode_connector t2 ON t1.connector_id = t2.connector_id
        WHERE t1.project_id=%s
        AND t1.creation_time BETWEEN %s AND %s
        AND t1.relation_id <> t2.relation_id
        AND (t1.relation_id = %s OR t1.relation_id = %s)
        AND (t2.relation_id = %s OR t2.relation_id = %s)
        AND t1.creation_time > t2.creation_time
        GROUP BY t1.user_id, date
    ''', (project_id, start_date, end_date, preId, postId, preId, postId))
    connector_stats = cursor.fetchall()

    tree_reviewed_nodes = Review.objects \
        .filter(
            project_id=project_id,
            review_time__range=(start_date, end_date)) \
        .extra(select={'date': "date_trunc('day', review_time)"}) \
        .order_by('date') \
        .values_list('reviewer_id', 'date') \
        .annotate(count = Count('treenode'))

    for di in treenode_stats:
        user_id = str(di[0])
        date = di[1].strftime('%Y%m%d')
        stats_table[user_id][date]['new_treenodes'] = di[2]

    for di in connector_stats:
        user_id = str(di[0])
        date = di[1].strftime('%Y%m%d')
        stats_table[user_id][date]['new_connectors'] = di[2]

    for di in tree_reviewed_nodes:
        user_id = str(di[0])
        date = di[1].strftime('%Y%m%d')
        stats_table[user_id][date]['new_reviewed_nodes'] = di[2]

    return HttpResponse(json.dumps({
        'stats_table': stats_table,
        'days': days,
        'daysformatted': daysformatted}), content_type='text/json')
Esempio n. 54
0
def list_annotations(request, project_id=None):
    """List annotations matching filtering criteria that are currently in use.

    The result set is the intersection of annotations matching criteria (the
    criteria are conjunctive) unless stated otherwise.
    ---
    parameters:
      - name: annotations
        description: A list of (meta) annotations with which which resulting annotations should be annotated with.
        paramType: form
        type: array
        items:
            type: integer
            description: An annotation ID
      - name: annotates
        description: A list of entity IDs (like annotations and neurons) that should be annotated by the result set.
        paramType: form
        type: array
        items:
            type: integer
            description: An entity ID
      - name: parallel_annotations
        description: A list of annotation that have to be used alongside the result set.
        paramType: form
        type: array
        items:
            type: integer
            description: An annotation ID
      - name: user_id
        description: Result annotations have to be used by this user.
        paramType: form
        type: integer
      - name: neuron_id
        description: Result annotations will annotate this neuron.
        paramType: form
        type: integer
      - name: skeleton_id
        description: Result annotations will annotate the neuron modeled by this skeleton.
        paramType: form
        type: integer
      - name: ignored_annotations
        description: A list of annotation names that will be excluded from the result set.
        paramType: form
        type: array
        items:
            type: string
    models:
      annotation_user_list_element:
        id: annotation_user_list_element
        properties:
          id:
            type: integer
            name: id
            description: The user id
            required: true
          name:
            type: string
            name: name
            description: The user name
            required: true
      annotation_list_element:
        id: annotation_list_element
        description: Represents one annotation along with its users.
        properties:
          name:
            type: string
            description: The name of the annotation
            required: true
          id:
            type: integer
            description: The id of the annotation
            required: true
          users:
            type: array
            description: A list of users
            required: true
            items:
              $ref: annotation_user_list_element
    type:
      - type: array
        items:
          $ref: annotation_list_element
        required: true
    """

    if not request.POST:
        cursor = connection.cursor()
        classes = get_class_to_id_map(project_id, ('annotation', ), cursor)
        relations = get_relation_to_id_map(project_id, ('annotated_with', ),
                                           cursor)

        cursor.execute(
            '''
            SELECT DISTINCT ci.name, ci.id, u.id, u.username
            FROM class_instance ci
            LEFT OUTER JOIN class_instance_class_instance cici
                         ON (ci.id = cici.class_instance_b)
            LEFT OUTER JOIN auth_user u
                         ON (cici.user_id = u.id)
            WHERE (ci.class_id = %s AND (cici.relation_id = %s OR cici.id IS NULL));
                       ''',
            (classes['annotation'], relations['annotated_with']))
        annotation_tuples = cursor.fetchall()
    else:
        annotation_query = create_annotation_query(project_id, request.POST)
        annotation_tuples = annotation_query.distinct().values_list(
            'name', 'id', 'cici_via_b__user__id', 'cici_via_b__user__username')

    # Create a set mapping annotation names to its users
    ids = {}
    annotation_dict = {}
    for annotation, aid, uid, username in annotation_tuples:
        ids[aid] = annotation
        ls = annotation_dict.get(aid)
        if ls is None:
            ls = []
            annotation_dict[aid] = ls
        if uid is not None:
            ls.append({'id': uid, 'name': username})
    # Flatten dictionary to list
    annotations = tuple({
        'name': ids[aid],
        'id': aid,
        'users': users
    } for aid, users in annotation_dict.iteritems())
    return JsonResponse({'annotations': annotations})
Esempio n. 55
0
def node_nearest(request, project_id=None):
    params = {}
    param_float_defaults = {
        'x': 0,
        'y': 0,
        'z': 0}
    param_int_defaults = {
        'skeleton_id': -1,
        'neuron_id': -1}
    for p in param_float_defaults.keys():
        params[p] = float(request.POST.get(p, param_float_defaults[p]))
    for p in param_int_defaults.keys():
        params[p] = int(request.POST.get(p, param_int_defaults[p]))
    relation_map = get_relation_to_id_map(project_id)

    if params['skeleton_id'] < 0 and params['neuron_id'] < 0:
        raise Exception('You must specify either a skeleton or a neuron')

    for rel in ['part_of', 'model_of']:
        if rel not in relation_map:
            raise Exception('Could not find required relation %s for project %s.' % (rel, project_id))

    skeletons = []
    if params['skeleton_id'] > 0:
        skeletons.append(params['skeleton_id'])

    response_on_error = ''
    try:
        if params['neuron_id'] > 0:  # Add skeletons related to specified neuron
            # Assumes that a cici 'model_of' relationship always involves a
            # skeleton as ci_a and a neuron as ci_b.
            response_on_error = 'Finding the skeletons failed.'
            neuron_skeletons = ClassInstanceClassInstance.objects.filter(
                class_instance_b=params['neuron_id'],
                relation=relation_map['model_of'])
            for neur_skel_relation in neuron_skeletons:
                skeletons.append(neur_skel_relation.class_instance_a_id)

        # Get all treenodes connected to skeletons
        response_on_error = 'Finding the treenodes failed.'
        treenodes = Treenode.objects.filter(project=project_id, skeleton__in=skeletons)

        def getNearestTreenode(x, y, z, treenodes):
            minDistance = -1
            nearestTreenode = None
            for tn in treenodes:
                xdiff = x - tn.location_x
                ydiff = y - tn.location_y
                zdiff = z - tn.location_z
                distanceSquared = xdiff ** 2 + ydiff ** 2 + zdiff ** 2
                if distanceSquared < minDistance or minDistance < 0:
                    nearestTreenode = tn
                    minDistance = distanceSquared
            return nearestTreenode

        nearestTreenode = getNearestTreenode(
            params['x'],
            params['y'],
            params['z'],
            treenodes)
        if nearestTreenode is None:
            raise Exception('No treenodes were found for skeletons in %s' % skeletons)

        return HttpResponse(json.dumps({
            'treenode_id': nearestTreenode.id,
            'x': int(nearestTreenode.location_x),
            'y': int(nearestTreenode.location_y),
            'z': int(nearestTreenode.location_z),
            'skeleton_id': nearestTreenode.skeleton_id}))

    except Exception as e:
        raise Exception(response_on_error + ':' + str(e))
Esempio n. 56
0
def _create_treenode(project_id,
                     creator,
                     editor,
                     x,
                     y,
                     z,
                     radius,
                     confidence,
                     neuron_id,
                     parent_id,
                     creation_time=None,
                     neuron_name=None):

    relation_map = get_relation_to_id_map(project_id)
    class_map = get_class_to_id_map(project_id)

    def insert_new_treenode(parent_id=None, skeleton_id=None):
        """ If the parent_id is not None and the skeleton_id of the parent does
        not match with the skeleton.id, then the database will throw an error
        given that the skeleton_id, being defined as foreign key in the
        treenode table, will not meet the being-foreign requirement.
        """
        new_treenode = Treenode()
        new_treenode.user = creator
        new_treenode.editor = editor
        new_treenode.project_id = project_id
        if creation_time:
            new_treenode.creation_time = creation_time
        new_treenode.location_x = float(x)
        new_treenode.location_y = float(y)
        new_treenode.location_z = float(z)
        new_treenode.radius = int(radius)
        new_treenode.skeleton_id = skeleton_id
        new_treenode.confidence = int(confidence)
        if parent_id:
            new_treenode.parent_id = parent_id
        new_treenode.save()
        return new_treenode

    def relate_neuron_to_skeleton(neuron, skeleton):
        return _create_relation(creator, project_id, relation_map['model_of'],
                                skeleton, neuron)

    response_on_error = ''
    try:
        if -1 != int(parent_id):  # A root node and parent node exist
            # Select the parent treenode for update to prevent race condition
            # updates to its skeleton ID while this node is being created.
            cursor = connection.cursor()
            cursor.execute(
                '''
                SELECT t.skeleton_id, t.edition_time FROM treenode t
                WHERE t.id = %s FOR NO KEY UPDATE OF t
                ''', (parent_id, ))

            if cursor.rowcount != 1:
                raise ValueError('Parent treenode %s does not exist' %
                                 parent_id)

            parent_node = cursor.fetchone()
            parent_skeleton_id = parent_node[0]
            parent_edition_time = parent_node[1]

            # Raise an Exception if the user doesn't have permission to edit
            # the neuron the skeleton of the treenode is modeling.
            can_edit_skeleton_or_fail(editor, project_id, parent_skeleton_id,
                                      relation_map['model_of'])

            response_on_error = 'Could not insert new treenode!'
            new_treenode = insert_new_treenode(parent_id, parent_skeleton_id)

            return NewTreenode(new_treenode.id, new_treenode.edition_time,
                               parent_skeleton_id, parent_edition_time)
        else:
            # No parent node: We must create a new root node, which needs a
            # skeleton and a neuron to belong to.
            response_on_error = 'Could not insert new treenode instance!'

            new_skeleton = ClassInstance()
            new_skeleton.user = creator
            new_skeleton.project_id = project_id
            new_skeleton.class_column_id = class_map['skeleton']
            new_skeleton.name = 'skeleton'
            new_skeleton.save()
            new_skeleton.name = 'skeleton %d' % new_skeleton.id
            new_skeleton.save()

            if -1 != neuron_id:
                # Check that the neuron to use exists
                if 0 == ClassInstance.objects.filter(pk=neuron_id).count():
                    neuron_id = -1

            if -1 != neuron_id:
                # Raise an Exception if the user doesn't have permission to
                # edit the existing neuron.
                can_edit_class_instance_or_fail(editor, neuron_id, 'neuron')

                # A neuron already exists, so we use it
                response_on_error = 'Could not relate the neuron model to ' \
                                    'the new skeleton!'
                relate_neuron_to_skeleton(neuron_id, new_skeleton.id)

                response_on_error = 'Could not insert new treenode!'
                new_treenode = insert_new_treenode(None, new_skeleton.id)

                return NewTreenode(new_treenode.id, new_treenode.edition_time,
                                   new_skeleton.id, None)
            else:
                # A neuron does not exist, therefore we put the new skeleton
                # into a new neuron.
                response_on_error = 'Failed to insert new instance of a neuron.'
                new_neuron = ClassInstance()
                new_neuron.user = creator
                new_neuron.project_id = project_id
                new_neuron.class_column_id = class_map['neuron']
                if neuron_name:
                    # Create a regular expression to find allowed patterns. The
                    # first group is the whole {nX} part, while the second group
                    # is X only.
                    counting_pattern = re.compile(r"(\{n(\d+)\})")
                    # Look for patterns, replace all {n} with {n1} to normalize.
                    neuron_name = neuron_name.replace("{n}", "{n1}")

                    if counting_pattern.search(neuron_name):
                        # Find starting values for each substitution.
                        counts = [
                            int(m.groups()[1])
                            for m in counting_pattern.finditer(neuron_name)
                        ]
                        # Find existing matching neurons in database.
                        name_match = counting_pattern.sub(
                            r"(\d+)", neuron_name)
                        name_pattern = re.compile(name_match)
                        matching_neurons = ClassInstance.objects.filter(
                            project_id=project_id,
                            class_column_id=class_map['neuron'],
                            name__regex=name_match).order_by('name')

                        # Increment substitution values based on existing neurons.
                        for n in matching_neurons:
                            for i, (count, g) in enumerate(
                                    zip(counts,
                                        name_pattern.search(n.name).groups())):
                                if count == int(g):
                                    counts[i] = count + 1

                        # Substitute values.
                        count_ind = 0
                        m = counting_pattern.search(neuron_name)
                        while m:
                            neuron_name = m.string[:m.start()] + str(
                                counts[count_ind]) + m.string[m.end():]
                            count_ind = count_ind + 1
                            m = counting_pattern.search(neuron_name)

                    new_neuron.name = neuron_name
                else:
                    new_neuron.name = 'neuron'
                    new_neuron.save()
                    new_neuron.name = 'neuron %d' % new_neuron.id

                new_neuron.save()

                response_on_error = 'Could not relate the neuron model to ' \
                                    'the new skeleton!'
                relate_neuron_to_skeleton(new_neuron.id, new_skeleton.id)

                response_on_error = 'Failed to insert instance of treenode.'
                new_treenode = insert_new_treenode(None, new_skeleton.id)

                response_on_error = 'Failed to write to logs.'
                new_location = (new_treenode.location_x,
                                new_treenode.location_y,
                                new_treenode.location_z)
                insert_into_log(
                    project_id, creator.id, 'create_neuron', new_location,
                    'Create neuron %d and skeleton '
                    '%d' % (new_neuron.id, new_skeleton.id))

                return NewTreenode(new_treenode.id, new_treenode.edition_time,
                                   new_skeleton.id, None)

    except Exception as e:
        import traceback
        raise Exception(
            "%s: %s %s" %
            (response_on_error, str(e), str(traceback.format_exc())))
Esempio n. 57
0
def list_connector(request, project_id=None):
    stack_id = request.POST.get('stack_id', None)
    skeleton_id = request.POST.get('skeleton_id', None)

    def empty_result():
        return HttpResponse(
            json.dumps({
                'iTotalRecords': 0,
                'iTotalDisplayRecords': 0,
                'aaData': []
            }))

    if not skeleton_id:
        return empty_result()
    else:
        skeleton_id = int(skeleton_id)

    relation_type = int(request.POST.get('relation_type',
                                         0))  # 0: Presyn, 1 Postsyn, 2 Gj
    display_start = int(request.POST.get('iDisplayStart', 0))
    display_length = int(request.POST.get('iDisplayLength', 0))
    sorting_column = int(request.POST.get('iSortCol_0', 0))
    sort_descending = upper(request.POST.get('sSortDir_0', 'DESC')) != 'ASC'

    response_on_error = ''
    try:
        response_on_error = 'Could not fetch relations.'
        relation_map = get_relation_to_id_map(project_id)
        for rel in [
                'presynaptic_to', 'postsynaptic_to', 'gapjunction_with',
                'element_of', 'labeled_as'
        ]:
            if rel not in relation_map:
                raise Exception('Failed to find the required relation %s' %
                                rel)

        if relation_type == 1:
            relation_type_id = relation_map['presynaptic_to']
            inverse_relation_type_id = relation_map['postsynaptic_to']
        elif relation_type == 2:
            relation_type_id = relation_map['gapjunction_with']
            inverse_relation_type_id = relation_map['gapjunction_with']
        else:
            relation_type_id = relation_map['postsynaptic_to']
            inverse_relation_type_id = relation_map['presynaptic_to']

        response_on_error = 'Failed to select connectors.'
        cursor = connection.cursor()
        cursor.execute(
            '''
            SELECT
            connector.id AS connector_id,
            tn_other.user_id AS connector_user_id,
            treenode_user.username AS connector_username,
            connector.location_x AS connector_x,
            connector.location_y AS connector_y,
            connector.location_z AS connector_z,
            tn_other.id AS other_treenode_id,
            tn_other.location_x AS other_treenode_x,
            tn_other.location_y AS other_treenode_y,
            tn_other.location_z AS other_treenode_z,
            tn_other.skeleton_id AS other_skeleton_id,
            tn_this.location_x AS this_treenode_x,
            tn_this.location_y AS this_treenode_y,
            tn_this.location_z AS this_treenode_z,
            tn_this.id AS this_treenode_id,
            tc_this.relation_id AS this_to_connector_relation_id,
            tc_other.relation_id AS connector_to_other_relation_id,
            tc_other.confidence AS confidence,
            to_char(connector.edition_time, 'DD-MM-YYYY HH24:MI') AS last_modified
            FROM
            treenode tn_other,
            treenode_connector tc_other,
            connector,
            "auth_user" treenode_user,
            treenode_connector tc_this,
            treenode tn_this
            WHERE
            treenode_user.id = tn_other.user_id AND
            tn_other.id = tc_other.treenode_id AND
            tc_other.connector_id = connector.id AND
            tc_other.relation_id = %s AND
            tc_this.connector_id = connector.id AND
            tn_this.id = tc_this.treenode_id AND
            tn_this.skeleton_id = %s AND
            tc_this.relation_id = %s
            ORDER BY
            connector_id, other_treenode_id, this_treenode_id
            ''', [inverse_relation_type_id, skeleton_id, relation_type_id])

        connectors = cursor_fetch_dictionary(cursor)
        connected_skeletons = map(lambda con: con['other_skeleton_id'],
                                  connectors)
        connector_ids = map(lambda con: con['connector_id'], connectors)

        response_on_error = 'Failed to find counts of treenodes in skeletons.'
        skel_tn_count = Treenode.objects.filter(skeleton__in=connected_skeletons)\
        .values('skeleton').annotate(treenode_count=Count('skeleton'))
        # .values to group by skeleton_id. See http://tinyurl.com/dj-values-annotate

        skeleton_to_treenode_count = {}
        for s in skel_tn_count:
            skeleton_to_treenode_count[s['skeleton']] = s['treenode_count']

        # Rather than do a LEFT OUTER JOIN to also include the connectors
        # with no partners, just do another query to find the connectors
        # without the conditions:

        response_on_error = 'Failed to select all connectors.'
        cursor.execute(
            '''
            SELECT
            connector.id AS connector_id,
            connector.user_id AS connector_user_id,
            connector_user.username AS connector_username,
            connector.location_x AS connector_x,
            connector.location_y AS connector_y,
            connector.location_z AS connector_z,
            tn_this.id AS this_treenode_id,
            tc_this.relation_id AS this_to_connector_relation_id,
            tc_this.confidence AS confidence,
            to_char(connector.edition_time, 'DD-MM-YYYY HH24:MI') AS last_modified
            FROM
            connector,
            "auth_user" connector_user,
            treenode_connector tc_this,
            treenode tn_this
            WHERE
            connector_user.id = connector.user_id AND
            tc_this.connector_id = connector.id AND
            tn_this.id = tc_this.treenode_id AND
            tn_this.skeleton_id = %s AND
            tc_this.relation_id = %s
            ORDER BY
            connector_id, this_treenode_id
            ''', [skeleton_id, relation_type_id])
        for row in cursor_fetch_dictionary(cursor):
            connector_id = row['connector_id']
            if connector_id not in connector_ids:
                connectors.append(row)
                connector_ids.append(connector_id)

        # For each of the connectors, find all of its labels:
        response_on_error = 'Failed to find the labels for connectors'
        if (connector_ids > 0):
            connector_labels = ConnectorClassInstance.objects.filter(
                project=project_id,
                connector__in=connector_ids,
                relation=relation_map['labeled_as']).values(
                    'connector', 'class_instance__name')

            labels_by_connector = {
            }  # Key: Connector ID, Value: List of labels.
            for label in connector_labels:
                if label['connector'] not in labels_by_connector:
                    labels_by_connector[label['connector']] = [
                        label['class_instance__name']
                    ]
                else:
                    labels_by_connector[label['connector']].append(
                        label['class_instance__name'])
                # Sort labels by name
            for labels in labels_by_connector.values():
                labels.sort(key=upper)

        total_result_count = len(connectors)

        if 0 == total_result_count:
            return empty_result()

        # Paging
        if display_length == 0:
            connectors = connectors[display_start:]
            connector_ids = connector_ids[display_start:]
        else:
            connectors = connectors[display_start:display_start +
                                    display_length]
            connector_ids = connector_ids[display_start:display_start +
                                          display_length]

        response_on_error = 'Could not retrieve resolution and translation parameters for project.'
        if stack_id:
            resolution = get_object_or_404(Stack, id=int(stack_id)).resolution
            translation = get_object_or_404(ProjectStack,
                                            stack=int(stack_id),
                                            project=project_id).translation
        else:
            resolution = Double3D(1.0, 1.0, 1.0)
            translation = Double3D(0.0, 0.0, 0.0)

        # Format output
        aaData_output = []
        for c in connectors:
            response_on_error = 'Failed to format output for connector with ID %s.' % c[
                'connector_id']
            if 'other_skeleton_id' in c:
                connected_skeleton_treenode_count = skeleton_to_treenode_count[
                    c['other_skeleton_id']]
            else:
                c['other_skeleton_id'] = ''
                c['other_treenode_id'] = ''
                c['other_treenode_x'] = c['connector_x']
                c['other_treenode_y'] = c['connector_y']
                c['other_treenode_z'] = c['connector_z']
                connected_skeleton_treenode_count = 0

            if c['connector_id'] in labels_by_connector:
                labels = ', '.join(
                    map(str, labels_by_connector[c['connector_id']]))
            else:
                labels = ''

            row = []
            row.append(c['connector_id'])
            row.append(c['other_skeleton_id'])
            row.append(c['other_treenode_x'])  #('%.2f' % )
            row.append(c['other_treenode_y'])
            z = c['other_treenode_z']
            row.append(z)
            # FIXME: This is the only place we need a stack nad this can be
            # done in the client as well. So we really want to keep this and
            # have a more complicated API?
            row.append(int((z - translation.z) / resolution.z))
            row.append(c['confidence'])
            row.append(labels)
            row.append(connected_skeleton_treenode_count)
            row.append(c['connector_username'])
            row.append(c['other_treenode_id'])
            row.append(c['last_modified'])
            aaData_output.append(row)

        # Sort output
        def fetch_value_for_sorting(row):
            value = row[sorting_column]
            if isinstance(value, str) or isinstance(value, unicode):
                return upper(value)
            return value

        aaData_output.sort(key=fetch_value_for_sorting)

        # Fix excessive decimal precision in coordinates
        for row in aaData_output:
            row[2] = float('%.2f' % row[2])
            row[3] = float('%.2f' % row[3])
            row[4] = float('%.2f' % row[4])

        if sort_descending:
            aaData_output.reverse()

        return HttpResponse(
            json.dumps({
                'iTotalRecords': total_result_count,
                'iTotalDisplayRecords': total_result_count,
                'aaData': aaData_output
            }))

    except Exception as e:
        raise Exception(response_on_error + ':' + str(e))
Esempio n. 58
0
def connectors_info(request, project_id):
    """
    Given a list of connectors, a list of presynaptic skeletons and a list of
    postsynatic skeletons, return a list of rows, one per synaptic connection,
    in the same format as one_to_many_synapses. The list of connectors (cids),
    pre-skeletons (pre) and post-skeletons (post) is optional.
    """

    cids = get_request_list(request.POST, 'cids', map_fn=int)
    skids = get_request_list(request.POST, 'skids', map_fn=int)
    skids_pre = get_request_list(request.POST, 'pre', map_fn=int)
    skids_post = get_request_list(request.POST, 'post', map_fn=int)

    cursor = connection.cursor()

    if skids_pre or skids_post:
        if skids:
            raise ValueError("The skids parameter can't be used together with "
                             "pre and/or post.")

        relations = get_relation_to_id_map(
            project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
        pre = relations['presynaptic_to']
        post = relations['postsynaptic_to']
    else:
        pre = post = None

    # Construct base query
    query_parts = [
        '''
        SELECT DISTINCT
               tc1.connector_id, c.location_x, c.location_y, c.location_z,
               tc1.treenode_id, tc1.skeleton_id, tc1.confidence, tc1.user_id,
               t1.location_x, t1.location_y, t1.location_z,
               tc2.treenode_id, tc2.skeleton_id, tc2.confidence, tc2.user_id,
               t2.location_x, t2.location_y, t2.location_z
        FROM connector c
    '''
    ]

    query_params = []

    # Add connector filter, if requested
    if cids:
        cid_template = ",".join(("(%s)", ) * len(cids))
        query_parts.append('''
            JOIN (VALUES {}) rc(id) ON c.id = rc.id
        '''.format(cid_template))
        query_params.extend(cids)

    # Add pre-synaptic skeleton filter, if requested
    query_parts.append('''
        JOIN treenode_connector tc1 ON tc1.connector_id = c.id
        JOIN treenode t1 ON tc1.treenode_id = t1.id
    ''')
    if skids_pre:
        pre_skid_template = ",".join(("(%s)", ) * len(skids_pre))
        query_parts.append('''
            JOIN (VALUES {}) sk_pre(id) ON tc1.skeleton_id = sk_pre.id
        '''.format(pre_skid_template))
        query_params.extend(skids_pre)

    # Add post-synaptic skeleton filter, if requested
    query_parts.append('''
        JOIN treenode_connector tc2 ON tc2.connector_id = c.id
        JOIN treenode t2 ON tc2.treenode_id = t2.id
    ''')
    if skids_post:
        post_skid_template = ",".join(("(%s)", ) * len(skids_post))
        query_parts.append('''
            JOIN (VALUES {}) sk_post(id) ON tc2.skeleton_id = sk_post.id
        '''.format(post_skid_template))
        query_params.extend(skids_post)

    # Add generic skeleton filters
    if skids:
        skid_template = ",".join(("(%s)", ) * len(skids))
        query_parts.append('''
            JOIN (VALUES {}) sk(id) ON tc1.skeleton_id = sk.id OR tc2.skeleton_id = sk.id
        '''.format(skid_template))
        query_params.extend(skids)

    # Prevent self-joins of connector partners
    query_parts.append('''
        WHERE tc1.id != tc2.id
    ''')

    # Pre-synaptic skeleton filters also constrain the relation
    if skids_pre:
        query_parts.append('''
            AND tc1.relation_id = %s
        ''')
        query_params.append(pre)

    # Post-synaptic skeleton filters also constrain the relation
    if skids_post:
        query_parts.append('''
            AND tc2.relation_id = %s
        ''')
        query_params.append(post)

    if skids:
        query_parts.append('''
            AND tc1.treenode_id < tc2.treenode_id
        ''')

    query_parts.append('''
        ORDER BY tc2.skeleton_id
    ''')

    cursor.execute("\n".join(query_parts), query_params)

    rows = tuple((row[0], (row[1], row[2], row[3]), row[4], row[5], row[6],
                  row[7], (row[8], row[9], row[10]), row[11], row[12], row[13],
                  row[14], (row[15], row[16], row[17]))
                 for row in cursor.fetchall())

    return HttpResponse(json.dumps(rows))