Ejemplo n.º 1
0
def insert_into_log(project_id,
                    user_id,
                    op_type,
                    location=None,
                    freetext=None):
    """ Inserts a new entry into the log table. If the location parameter is
    passed, it is expected to be an iteratable (list, tuple).
    """
    # valid operation types
    operation_type_array = [
        "rename_root", "create_neuron", "rename_neuron", "remove_neuron",
        "move_neuron", "create_group", "rename_group", "remove_group",
        "move_group", "create_skeleton", "rename_skeleton", "remove_skeleton",
        "move_skeleton", "split_skeleton", "join_skeleton", "reroot_skeleton",
        "change_confidence", "reset_reviews"
    ]

    if not op_type in operation_type_array:
        return {'error': 'Operation type {0} not valid'.format(op_type)}

    new_log = Log()
    new_log.user_id = user_id
    new_log.project_id = project_id
    new_log.operation_type = op_type
    if not location is None:
        new_log.location = Double3D(*location)
    if not freetext is None:
        new_log.freetext = freetext

    new_log.save()
Ejemplo n.º 2
0
def insert_into_log(project_id: Union[int, str],
                    user_id,
                    op_type: str,
                    location=None,
                    freetext=None) -> Optional[Dict[str, str]]:
    """ Inserts a new entry into the log table. If the location parameter is
    passed, it is expected to be an iteratable (list, tuple).
    """
    # valid operation types
    operation_type_array = [
        "rename_root", "create_neuron", "rename_neuron", "remove_neuron",
        "move_neuron", "create_group", "rename_group", "remove_group",
        "move_group", "create_skeleton", "rename_skeleton", "remove_skeleton",
        "move_skeleton", "split_skeleton", "join_skeleton", "reroot_skeleton",
        "change_confidence", "reset_reviews"
    ]

    if op_type not in operation_type_array:
        raise ValueError(f'Operation type {op_type} not valid')

    new_log = Log()
    new_log.user_id = user_id
    new_log.project_id = project_id
    new_log.operation_type = op_type
    if location is not None:
        new_log.location = Double3D(*location)
    if freetext is not None:
        new_log.freetext = freetext

    new_log.save()
    return None
Ejemplo n.º 3
0
def _export_review_skeleton(project_id=None, skeleton_id=None, format=None):
    treenodes = Treenode.objects.filter(skeleton_id=skeleton_id).values_list(
        'id', 'location', 'parent_id', 'reviewer_id')

    g = nx.DiGraph()
    reviewed = set()
    for t in treenodes:
        loc = Double3D.from_str(t[1])
        # While at it, send the reviewer ID, which is useful to iterate fwd
        # to the first unreviewed node in the segment.
        g.add_node(t[0], {
            'id': t[0],
            'x': loc.x,
            'y': loc.y,
            'z': loc.z,
            'rid': t[3]
        })
        if -1 != t[3]:
            reviewed.add(t[0])
        if t[2]:  # if parent
            g.add_edge(t[2], t[0])  # edge from parent to child
        else:
            root_id = t[0]

    # Create all sequences, as long as possible and always from end towards root
    distances = edge_count_to_root(
        g, root_node=root_id)  # distance in number of edges from root
    seen = set()
    sequences = []
    # Iterate end nodes sorted from highest to lowest distance to root
    endNodeIDs = (nID for nID in g.nodes() if 0 == len(g.successors(nID)))
    for nodeID in sorted(endNodeIDs, key=distances.get, reverse=True):
        sequence = [g.node[nodeID]]
        parents = g.predecessors(nodeID)
        while parents:
            parentID = parents[0]
            sequence.append(g.node[parentID])
            if parentID in seen:
                break
            seen.add(parentID)
            parents = g.predecessors(parentID)

        if len(sequence) > 1:
            sequences.append(sequence)

    segments = []
    for sequence in sorted(sequences, key=len, reverse=True):
        segments.append({
            'id':
            len(segments),
            'sequence':
            sequence,
            'status':
            '%.2f' %
            (100.0 * sum(1 for node in sequence if node['id'] in reviewed) /
             len(sequence)),
            'nr_nodes':
            len(sequence)
        })
    return segments
Ejemplo n.º 4
0
def link_roi_to_class_instance(request, project_id=None, relation_id=None,
        stack_id=None, ci_id=None):
    """ With the help of this method one can link a region of interest
    (ROI) to a class instance. The information about the ROI is passed
    as POST variables.
    """
    # Try to get all needed POST parameters
    x_min = float(request.POST['x_min'])
    x_max = float(request.POST['x_max'])
    y_min = float(request.POST['y_min'])
    y_max = float(request.POST['y_max'])
    z = float(request.POST['z'])
    zoom_level = int(request.POST['zoom_level'])
    rotation_cw = int(request.POST['rotation_cw'])

    # Get related objects
    project = Project.objects.get(id=project_id)
    stack = Stack.objects.get(id=stack_id)
    ci = ClassInstance.objects.get(id=ci_id)
    rel = Relation.objects.get(id=relation_id)

    # Calculate ROI center and extent
    cx = (x_max + x_min) * 0.5
    cy = (y_max + y_min) * 0.5
    cz = z
    width = abs(x_max - x_min)
    height = abs(y_max - y_min)

    # Create a new ROI class instance
    roi = RegionOfInterest()
    roi.user = request.user
    roi.editor = request.user
    roi.project = project
    roi.stack = stack
    roi.zoom_level = zoom_level
    roi.location = Double3D(cx, cy, cz)
    roi.width = width
    roi.height = height
    roi.rotation_cw = rotation_cw
    roi.save()

    # Link ROI and class instance
    roi_ci = RegionOfInterestClassInstance()
    roi_ci.user = request.user
    roi_ci.project = project
    roi_ci.relation = rel
    roi_ci.region_of_interest = roi
    roi_ci.class_instance = ci
    roi_ci.save()

    # Create cropped image, if wanted
    if settings.ROI_AUTO_CREATE_IMAGE:
        file_name, file_path = create_roi_path(roi.id)
        create_roi_image(request.user, project_id, roi.id, file_path)

    # Build result data set
    status = {'status': "Created new ROI with ID %s." % roi.id}

    return HttpResponse(json.dumps(status))
Ejemplo n.º 5
0
def _create_interpolated_treenode(request, params, project_id, skip_last):
    """ Create interpolated treenodes between the 'parent_id' and the clicked x,y,z
    coordinate. The skip_last is to prevent the creation of the last node, used by
    the join_skeletons_interpolated. """
    response_on_error = 'Could not create interpolated treenode'
    try:
        parent = Treenode.objects.get(pk=params['parent_id'])
        parent_skeleton_id = parent.skeleton_id
        loc = parent.location
        parent_x = decimal.Decimal(loc.x)
        parent_y = decimal.Decimal(loc.y)
        parent_z = decimal.Decimal(loc.z)

        steps = abs((params['z'] - parent_z) / params['resz']).quantize(
            decimal.Decimal('1'), rounding=decimal.ROUND_FLOOR)
        if steps == decimal.Decimal(0):
            steps = decimal.Decimal(1)

        dx = (params['x'] - parent_x) / steps
        dy = (params['y'] - parent_y) / steps
        dz = (params['z'] - parent_z) / steps

        broken_slices = set(
            int(bs.index)
            for bs in BrokenSlice.objects.filter(stack=params['stack_id']))
        sign = -1 if dz < 0 else 1

        # Loop the creation of treenodes in z resolution steps until target
        # section is reached
        parent_id = params['parent_id']
        atn_slice_index = ((parent_z - params['stack_translation_z']) /
                           params['resz']).quantize(
                               decimal.Decimal('1'),
                               rounding=decimal.ROUND_FLOOR)
        for i in range(1, steps + (0 if skip_last else 1)):
            if (atn_slice_index + i * sign) in broken_slices:
                continue
            response_on_error = 'Error while trying to insert treenode.'
            new_treenode = Treenode()
            new_treenode.user_id = request.user.id
            new_treenode.editor_id = request.user.id
            new_treenode.project_id = project_id
            new_treenode.location = Double3D(float(parent_x + dx * i),
                                             float(parent_y + dy * i),
                                             float(parent_z + dz * i))
            new_treenode.radius = params['radius']
            new_treenode.skeleton_id = parent_skeleton_id
            new_treenode.confidence = params['confidence']
            new_treenode.parent_id = parent_id  # This is not a root node.
            new_treenode.save()

            parent_id = new_treenode.id

        # parent_id contains the ID of the last added node
        return parent_id, parent_skeleton_id

    except Exception as e:
        raise Exception(response_on_error + ':' + str(e))
Ejemplo n.º 6
0
def _export_review_skeleton(project_id=None, skeleton_id=None, format=None):
    """ Returns a list of segments for the requested skeleton. Each segment
    contains information about the review status of this part of the skeleton.
    """
    # Get all treenodes of the requested skeleton
    treenodes = Treenode.objects.filter(skeleton_id=skeleton_id).values_list('id', 'location', 'parent_id')
    # Get all reviews for the requested skeleton
    reviews = get_treenodes_to_reviews(skeleton_ids=[skeleton_id])

    # Add each treenode to a networkx graph and attach reviewer information to
    # it.
    g = nx.DiGraph()
    reviewed = set()
    for t in treenodes:
        loc = Double3D.from_str(t[1])
        # While at it, send the reviewer IDs, which is useful to iterate fwd
        # to the first unreviewed node in the segment.
        g.add_node(t[0], {'id': t[0], 'x': loc.x, 'y': loc.y, 'z': loc.z, 'rids': reviews[t[0]]})
        if reviews[t[0]]:
            reviewed.add(t[0])
        if t[2]: # if parent
            g.add_edge(t[2], t[0]) # edge from parent to child
        else:
            root_id = t[0]

    # Create all sequences, as long as possible and always from end towards root
    distances = edge_count_to_root(g, root_node=root_id) # distance in number of edges from root
    seen = set()
    sequences = []
    # Iterate end nodes sorted from highest to lowest distance to root
    endNodeIDs = (nID for nID in g.nodes() if 0 == len(g.successors(nID)))
    for nodeID in sorted(endNodeIDs, key=distances.get, reverse=True):
        sequence = [g.node[nodeID]]
        parents = g.predecessors(nodeID)
        while parents:
            parentID = parents[0]
            sequence.append(g.node[parentID])
            if parentID in seen:
                break
            seen.add(parentID)
            parents = g.predecessors(parentID)

        if len(sequence) > 1:
            sequences.append(sequence)

    # Calculate status

    segments = []
    for sequence in sorted(sequences, key=len, reverse=True):
        segments.append({
            'id': len(segments),
            'sequence': sequence,
            'status': '%.2f' % (100.0 * sum(1 for node in sequence if node['id'] in reviewed) / len(sequence)),
            'nr_nodes': len(sequence)
        })
    return segments
Ejemplo n.º 7
0
def _update(Kind, table, nodes, now, user):
    if not nodes:
        return
    # 0: id
    # 1: X
    # 2: Y
    # 3: Z
    can_edit_all_or_fail(user, (node[0] for node in nodes.itervalues()), table)
    for node in nodes.itervalues():
        Kind.objects.filter(id=int(node[0])).update(
            editor=user,
            edition_time=now,
            location=Double3D(float(node[1]), float(node[2]), float(node[3])))
Ejemplo n.º 8
0
def export_review_skeleton(request, project_id=None, skeleton_id=None, format=None):
    """
    Export the skeleton as a list of sequences of entries, each entry containing
    an id, a sequence of nodes, the percent of reviewed nodes, and the node count.
    """
    treenodes = Treenode.objects.filter(skeleton_id=skeleton_id).values_list('id', 'location', 'parent_id', 'reviewer_id')

    g = nx.DiGraph()
    reviewed = set()
    for t in treenodes:
        loc = Double3D.from_str(t[1])
        # While at it, send the reviewer ID, which is useful to iterate fwd
        # to the first unreviewed node in the segment.
        g.add_node(t[0], {'id': t[0], 'x': loc.x, 'y': loc.y, 'z': loc.z, 'rid': t[3]})
        if -1 != t[3]:
            reviewed.add(t[0])
        if t[2]: # if parent
            g.add_edge(t[2], t[0]) # edge from parent to child
        else:
            root_id = t[0]

    # Create all sequences, as long as possible and always from end towards root
    distances = edge_count_to_root(g, root_node=root_id) # distance in number of edges from root
    seen = set()
    sequences = []
    # Iterate end nodes sorted from highest to lowest distance to root
    endNodeIDs = (nID for nID in g.nodes() if 0 == len(g.successors(nID)))
    for nodeID in sorted(endNodeIDs, key=distances.get, reverse=True):
        sequence = [g.node[nodeID]]
        parents = g.predecessors(nodeID)
        while parents:
            parentID = parents[0]
            sequence.append(g.node[parentID])
            if parentID in seen:
                break
            seen.add(parentID)
            parents = g.predecessors(parentID)

        if len(sequence) > 1:
            sequences.append(sequence)

    segments = []
    for sequence in sorted(sequences, key=len, reverse=True):
        segments.append({
            'id': len(segments),
            'sequence': sequence,
            'status': '%.2f' % (100.0 * sum(1 for node in sequence if node['id'] in reviewed) / len(sequence)),
            'nr_nodes': len(sequence)
        })

    return HttpResponse(json.dumps(segments))
Ejemplo n.º 9
0
 def insert_stack(self):
     s = Stack()
     s.title = "Example Stack"
     s.image_base = "http://incf.ini.uzh.ch/image-stack-fib/"
     s.trakem2_project = False
     s.dimension = Integer3D(x=2048, y=1536, z=460)
     s.resolution = Double3D(x=5.0001, y=5.0002, z=9.0003)
     s.num_zoom_levels = -1
     s.file_extension = 'jpg'
     s.tile_width = 256
     s.tile_height = 256
     s.tile_source_type = 1
     s.save()
     return s
Ejemplo n.º 10
0
    def format_node_data(node):
        '''
        Formats node data for our json output.

        When we start using Django 1.4, we can use prefetch_related instead of using
        .values('treenode__xxx'), and will then be able to access a proper location
        object.
        '''
        location = Double3D.from_str(node['treenode__location'])
        return {
            'id': node['treenode'],
            'x': int(location.x),
            'y': int(location.y),
            'z': int(location.z),
            'skid': node['treenode__skeleton']}
Ejemplo n.º 11
0
def update_textlabel(request: HttpRequest, project_id=None) -> HttpResponse:
    params = {}
    parameter_names = [
        'tid', 'pid', 'x', 'y', 'z', 'text', 'type', 'r', 'g', 'b', 'a',
        'font_name', 'font_style', 'font_size', 'scaling'
    ]
    for p in parameter_names:
        params[p] = request.POST.get(p, None)

    # Scaling is given 0 or 1 value by the caller, but our models use bool
    if params['scaling'] is not None:
        params['scaling'] = bool(int(params['scaling']))

    # Type must be either bubble or text.
    if params['type'] is not None:
        if (params['type'] != 'bubble'):
            params['type'] = 'text'

    response_on_error = ''
    try:
        response_on_error = 'Failed to find Textlabel with id %s.' % params[
            'tid']
        label = Textlabel.objects.filter(id=params['tid'])[0]

        response_on_error = 'Failed to update Textlabel with id %s.' % params[
            'tid']
        special_parameters = ['x', 'y', 'z', 'r', 'g', 'b', 'a', 'tid']
        # Set new values for label unless they haven't been specified or need
        # special handling.
        # for par in [p for p in parameter_names if p not in special_parameters]:
        for par in set(params.keys()).difference(special_parameters):
            if params[par] is not None:
                setattr(label, par, params[par])
        label.save()

        # If all parameters x, y and z have been specified, change the location
        if all(
            [val is not None for val in [params[p] for p in ['x', 'y', 'z']]]):
            response_on_error = 'Failed to update the location of textlabel with id %s' % params[
                'tid']
            TextlabelLocation.objects.filter(textlabel=params['tid']).update(
                location=Double3D(float(params['x']), float(params['y']),
                                  float(params['z'])))

        return HttpResponse(' ')

    except Exception as e:
        raise ValueError(response_on_error + ':' + str(e))
Ejemplo n.º 12
0
 def insert_new_treenode(parent_id=None, skeleton=None):
     """ If the parent_id is not None and the skeleton_id of the parent does not match with the skeleton.id, then the database will throw an error given that the skeleton_id, being defined as foreign key in the treenode table, will not meet the being-foreign requirement.
     """
     new_treenode = Treenode()
     new_treenode.user = request.user
     new_treenode.editor = request.user
     new_treenode.project_id = project_id
     new_treenode.location = Double3D(float(params['x']),
                                      float(params['y']),
                                      float(params['z']))
     new_treenode.radius = int(params['radius'])
     new_treenode.skeleton = skeleton
     new_treenode.confidence = int(params['confidence'])
     if parent_id:
         new_treenode.parent_id = parent_id
     new_treenode.save()
     return new_treenode
Ejemplo n.º 13
0
    def insert_stack(self):
        s = Stack()
        s.title = "Example Stack"
        s.dimension = Integer3D(x=2048, y=1536, z=460)
        s.resolution = Double3D(x=5.0001, y=5.0002, z=9.0003)
        s.save()

        sm = StackMirror()
        sm.stack = s
        sm.image_base = "http://incf.ini.uzh.ch/image-stack-fib/"
        sm.file_extension = 'jpg'
        sm.tile_width = 256
        sm.tile_height = 256
        sm.tile_source_type = 1
        sm.save()

        return s
Ejemplo n.º 14
0
def create_textlabel(request, project_id=None):
    print >> sys.stderr, 'creating text label'
    params = {}
    param_defaults = {
        'x': 0,
        'y': 0,
        'z': 0,
        'text': 'Edit this text...',
        'type': 'text',
        'r': 1,
        'g': 0.5,
        'b': 0,
        'a': 1,
        'fontname': False,
        'fontstyle': False,
        'fontsize': False,
        'scaling': False}
    for p in param_defaults.keys():
        params[p] = request.POST.get(p, param_defaults[p])
    if (params['type'] != 'bubble'):
        params['type'] = 'text'

    new_label = Textlabel(
        text=params['text'],
        type=params['type'],
        scaling=params['scaling']
    )
    new_label.project_id = project_id
    if params['fontname']:
        new_label.font_name = params['fontname']
    if params['fontstyle']:
        new_label.font_style = params['fontstyle']
    if params['fontsize']:
        new_label.font_size = params['fontsize']
    new_label.save()

    TextlabelLocation(
        textlabel=new_label,
        location=Double3D(float(params['x']), float(params['y']), float(params['z']))).save()

    return HttpResponse(json.dumps({'tid': new_label.id}))
Ejemplo n.º 15
0
def create_connector(request, project_id=None):
    query_parameters = {}
    default_values = {'x': 0, 'y': 0, 'z': 0, 'confidence': 5}
    for p in default_values.keys():
        query_parameters[p] = request.POST.get(p, default_values[p])

    parsed_confidence = int(query_parameters['confidence'])
    if parsed_confidence < 1 or parsed_confidence > 5:
        return HttpResponse(
            json.dumps({'error': 'Confidence not in range 1-5 inclusive.'}))

    location = Double3D(x=float(query_parameters['x']),
                        y=float(query_parameters['y']),
                        z=float(query_parameters['z']))
    new_connector = Connector(user=request.user,
                              editor=request.user,
                              project=Project.objects.get(id=project_id),
                              location=location,
                              confidence=parsed_confidence)
    new_connector.save()

    return HttpResponse(json.dumps({'connector_id': new_connector.id}))
Ejemplo n.º 16
0
def _parse_location(loc):
    return Double3D(*(imap(float, loc[1:-1].split(','))))
Ejemplo n.º 17
0
def list_connector(request, project_id=None):
    stack_id = request.POST.get('stack_id', None)
    skeleton_id = request.POST.get('skeleton_id', None)

    def empty_result():
        return HttpResponse(
            json.dumps({
                'iTotalRecords': 0,
                'iTotalDisplayRecords': 0,
                'aaData': []
            }))

    if not skeleton_id:
        return empty_result()
    else:
        skeleton_id = int(skeleton_id)

    relation_type = int(request.POST.get('relation_type',
                                         0))  # 0: Presyn, 1 Postsyn, 2 Gj
    display_start = int(request.POST.get('iDisplayStart', 0))
    display_length = int(request.POST.get('iDisplayLength', 0))
    sorting_column = int(request.POST.get('iSortCol_0', 0))
    sort_descending = upper(request.POST.get('sSortDir_0', 'DESC')) != 'ASC'

    response_on_error = ''
    try:
        response_on_error = 'Could not fetch relations.'
        relation_map = get_relation_to_id_map(project_id)
        for rel in [
                'presynaptic_to', 'postsynaptic_to', 'gapjunction_with',
                'element_of', 'labeled_as'
        ]:
            if rel not in relation_map:
                raise Exception('Failed to find the required relation %s' %
                                rel)

        if relation_type == 1:
            relation_type_id = relation_map['presynaptic_to']
            inverse_relation_type_id = relation_map['postsynaptic_to']
        elif relation_type == 2:
            relation_type_id = relation_map['gapjunction_with']
            inverse_relation_type_id = relation_map['gapjunction_with']
        else:
            relation_type_id = relation_map['postsynaptic_to']
            inverse_relation_type_id = relation_map['presynaptic_to']

        response_on_error = 'Failed to select connectors.'
        cursor = connection.cursor()
        cursor.execute(
            '''
            SELECT
            connector.id AS connector_id,
            tn_other.user_id AS connector_user_id,
            treenode_user.username AS connector_username,
            connector.location_x AS connector_x,
            connector.location_y AS connector_y,
            connector.location_z AS connector_z,
            tn_other.id AS other_treenode_id,
            tn_other.location_x AS other_treenode_x,
            tn_other.location_y AS other_treenode_y,
            tn_other.location_z AS other_treenode_z,
            tn_other.skeleton_id AS other_skeleton_id,
            tn_this.location_x AS this_treenode_x,
            tn_this.location_y AS this_treenode_y,
            tn_this.location_z AS this_treenode_z,
            tn_this.id AS this_treenode_id,
            tc_this.relation_id AS this_to_connector_relation_id,
            tc_other.relation_id AS connector_to_other_relation_id,
            tc_other.confidence AS confidence,
            to_char(connector.edition_time, 'DD-MM-YYYY HH24:MI') AS last_modified
            FROM
            treenode tn_other,
            treenode_connector tc_other,
            connector,
            "auth_user" treenode_user,
            treenode_connector tc_this,
            treenode tn_this
            WHERE
            treenode_user.id = tn_other.user_id AND
            tn_other.id = tc_other.treenode_id AND
            tc_other.connector_id = connector.id AND
            tc_other.relation_id = %s AND
            tc_this.connector_id = connector.id AND
            tn_this.id = tc_this.treenode_id AND
            tn_this.skeleton_id = %s AND
            tc_this.relation_id = %s
            ORDER BY
            connector_id, other_treenode_id, this_treenode_id
            ''', [inverse_relation_type_id, skeleton_id, relation_type_id])

        connectors = cursor_fetch_dictionary(cursor)
        connected_skeletons = map(lambda con: con['other_skeleton_id'],
                                  connectors)
        connector_ids = map(lambda con: con['connector_id'], connectors)

        response_on_error = 'Failed to find counts of treenodes in skeletons.'
        skel_tn_count = Treenode.objects.filter(skeleton__in=connected_skeletons)\
        .values('skeleton').annotate(treenode_count=Count('skeleton'))
        # .values to group by skeleton_id. See http://tinyurl.com/dj-values-annotate

        skeleton_to_treenode_count = {}
        for s in skel_tn_count:
            skeleton_to_treenode_count[s['skeleton']] = s['treenode_count']

        # Rather than do a LEFT OUTER JOIN to also include the connectors
        # with no partners, just do another query to find the connectors
        # without the conditions:

        response_on_error = 'Failed to select all connectors.'
        cursor.execute(
            '''
            SELECT
            connector.id AS connector_id,
            connector.user_id AS connector_user_id,
            connector_user.username AS connector_username,
            connector.location_x AS connector_x,
            connector.location_y AS connector_y,
            connector.location_z AS connector_z,
            tn_this.id AS this_treenode_id,
            tc_this.relation_id AS this_to_connector_relation_id,
            tc_this.confidence AS confidence,
            to_char(connector.edition_time, 'DD-MM-YYYY HH24:MI') AS last_modified
            FROM
            connector,
            "auth_user" connector_user,
            treenode_connector tc_this,
            treenode tn_this
            WHERE
            connector_user.id = connector.user_id AND
            tc_this.connector_id = connector.id AND
            tn_this.id = tc_this.treenode_id AND
            tn_this.skeleton_id = %s AND
            tc_this.relation_id = %s
            ORDER BY
            connector_id, this_treenode_id
            ''', [skeleton_id, relation_type_id])
        for row in cursor_fetch_dictionary(cursor):
            connector_id = row['connector_id']
            if connector_id not in connector_ids:
                connectors.append(row)
                connector_ids.append(connector_id)

        # For each of the connectors, find all of its labels:
        response_on_error = 'Failed to find the labels for connectors'
        if (connector_ids > 0):
            connector_labels = ConnectorClassInstance.objects.filter(
                project=project_id,
                connector__in=connector_ids,
                relation=relation_map['labeled_as']).values(
                    'connector', 'class_instance__name')

            labels_by_connector = {
            }  # Key: Connector ID, Value: List of labels.
            for label in connector_labels:
                if label['connector'] not in labels_by_connector:
                    labels_by_connector[label['connector']] = [
                        label['class_instance__name']
                    ]
                else:
                    labels_by_connector[label['connector']].append(
                        label['class_instance__name'])
                # Sort labels by name
            for labels in labels_by_connector.values():
                labels.sort(key=upper)

        total_result_count = len(connectors)

        if 0 == total_result_count:
            return empty_result()

        # Paging
        if display_length == 0:
            connectors = connectors[display_start:]
            connector_ids = connector_ids[display_start:]
        else:
            connectors = connectors[display_start:display_start +
                                    display_length]
            connector_ids = connector_ids[display_start:display_start +
                                          display_length]

        response_on_error = 'Could not retrieve resolution and translation parameters for project.'
        if stack_id:
            resolution = get_object_or_404(Stack, id=int(stack_id)).resolution
            translation = get_object_or_404(ProjectStack,
                                            stack=int(stack_id),
                                            project=project_id).translation
        else:
            resolution = Double3D(1.0, 1.0, 1.0)
            translation = Double3D(0.0, 0.0, 0.0)

        # Format output
        aaData_output = []
        for c in connectors:
            response_on_error = 'Failed to format output for connector with ID %s.' % c[
                'connector_id']
            if 'other_skeleton_id' in c:
                connected_skeleton_treenode_count = skeleton_to_treenode_count[
                    c['other_skeleton_id']]
            else:
                c['other_skeleton_id'] = ''
                c['other_treenode_id'] = ''
                c['other_treenode_x'] = c['connector_x']
                c['other_treenode_y'] = c['connector_y']
                c['other_treenode_z'] = c['connector_z']
                connected_skeleton_treenode_count = 0

            if c['connector_id'] in labels_by_connector:
                labels = ', '.join(
                    map(str, labels_by_connector[c['connector_id']]))
            else:
                labels = ''

            row = []
            row.append(c['connector_id'])
            row.append(c['other_skeleton_id'])
            row.append(c['other_treenode_x'])  #('%.2f' % )
            row.append(c['other_treenode_y'])
            z = c['other_treenode_z']
            row.append(z)
            # FIXME: This is the only place we need a stack nad this can be
            # done in the client as well. So we really want to keep this and
            # have a more complicated API?
            row.append(int((z - translation.z) / resolution.z))
            row.append(c['confidence'])
            row.append(labels)
            row.append(connected_skeleton_treenode_count)
            row.append(c['connector_username'])
            row.append(c['other_treenode_id'])
            row.append(c['last_modified'])
            aaData_output.append(row)

        # Sort output
        def fetch_value_for_sorting(row):
            value = row[sorting_column]
            if isinstance(value, str) or isinstance(value, unicode):
                return upper(value)
            return value

        aaData_output.sort(key=fetch_value_for_sorting)

        # Fix excessive decimal precision in coordinates
        for row in aaData_output:
            row[2] = float('%.2f' % row[2])
            row[3] = float('%.2f' % row[3])
            row[4] = float('%.2f' % row[4])

        if sort_descending:
            aaData_output.reverse()

        return HttpResponse(
            json.dumps({
                'iTotalRecords': total_result_count,
                'iTotalDisplayRecords': total_result_count,
                'aaData': aaData_output
            }))

    except Exception as e:
        raise Exception(response_on_error + ':' + str(e))
Ejemplo n.º 18
0
def label_update(request, project_id=None, location_id=None, ntype=None):
    """ location_id is the ID of a treenode or connector.
        ntype is either 'treenode' or 'connector'. """
    labeled_as_relation = Relation.objects.get(project=project_id,
                                               relation_name='labeled_as')
    p = get_object_or_404(Project, pk=project_id)

    # TODO will FAIL when a tag contains a coma by itself
    new_tags = request.POST['tags'].split(',')
    delete_existing_labels = request.POST.get('delete_existing',
                                              'true') == 'true'

    kwargs = {
        'relation': labeled_as_relation,
        'class_instance__class_column__class_name': 'label'
    }

    table = get_link_model(ntype)
    if 'treenode' == ntype:
        kwargs['treenode__id'] = location_id
        node = Treenode.objects.get(id=location_id)
    elif 'connector' == ntype:
        kwargs['connector__id'] = location_id
        node = Connector.objects.get(id=location_id)

    if not table:
        raise Http404('Unknown node type: "%s"' % (ntype, ))

    # Get the existing list of tags for the tree node/connector and delete any
    # that are not in the new list.
    existingLabels = table.objects.filter(
        **kwargs).select_related('class_instance__name')
    existing_names = set(ele.class_instance.name for ele in existingLabels)
    labels_to_delete = table.objects.filter(**kwargs).exclude(
        class_instance__name__in=new_tags)

    if delete_existing_labels:
        # Iterate over all labels that should get deleted to check permission
        # on each one. Remember each label that couldn't be deleted in the
        # other_labels array.
        other_labels = []
        deleted_labels = []
        for l in labels_to_delete:
            try:
                can_edit_or_fail(request.user, l.id, table._meta.db_table)
                if remove_label(l.id, ntype):
                    deleted_labels.append(l)
                else:
                    other_labels.append(l)
            except:
                other_labels.append(l)

        # Create change requests for labels associated to the treenode by other users
        for label in other_labels:
            change_request_params = {
                'type':
                'Remove Tag',
                'project':
                p,
                'user':
                request.user,
                'recipient':
                node.user,
                'location':
                Double3D(node.location_x, node.location_y, node.location_z),
                ntype:
                node,
                'description':
                "Remove tag '%s'" % label.class_instance.name,
                'validate_action':
                'from catmaid.control.label import label_exists\n' +
                'is_valid = label_exists(%s, "%s")' % (str(label.id), ntype),
                'approve_action':
                'from catmaid.control.label import remove_label\n' +
                'remove_label(%s, "%s")' % (str(label.id), ntype)
            }
            ChangeRequest(**change_request_params).save()

    # Add any new labels.
    label_class = Class.objects.get(project=project_id, class_name='label')
    kwargs = {
        'user': request.user,
        'project': p,
        'relation': labeled_as_relation,
        ntype: node
    }

    for tag_name in new_tags:
        if len(tag_name) > 0 and tag_name not in existing_names:
            # Make sure the tag instance exists
            existing_tags = tuple(
                ClassInstance.objects.filter(project=p,
                                             name=tag_name,
                                             class_column=label_class))
            if len(existing_tags) < 1:
                tag = ClassInstance(project=p,
                                    name=tag_name,
                                    user=request.user,
                                    class_column=label_class)
                tag.save()
            else:
                tag = existing_tags[0]

            # Associate the tag with the treenode/connector.
            kwargs['class_instance'] = tag
            tci = table(
                **kwargs
            )  # creates new TreenodeClassInstance or ConnectorClassInstance
            tci.save()

            if node.user != request.user:
                # Inform the owner of the node that the tag was added and give them the option of removing it.
                change_request_params = {
                    'type':
                    'Add Tag',
                    'description':
                    'Added tag \'' + tag_name + '\'',
                    'project':
                    p,
                    'user':
                    request.user,
                    'recipient':
                    node.user,
                    'location':
                    Double3D(node.location_x, node.location_y,
                             node.location_z),
                    ntype:
                    node,
                    'validate_action':
                    'from catmaid.control.label import label_exists\n' +
                    'is_valid = label_exists(%s, "%s")' % (str(tci.id), ntype),
                    'reject_action':
                    'from catmaid.control.label import remove_label\n' +
                    'remove_label(%s, "%s")' % (str(tci.id), ntype)
                }
                ChangeRequest(**change_request_params).save()

    return HttpResponse(json.dumps({'message': 'success'}),
                        content_type='application/json')
Ejemplo n.º 19
0
def import_projects(user, pre_projects, make_public, tags, permissions,
                    tile_width, tile_height, tile_source_type,
                    cls_graph_ids_to_link):
    """ Creates real CATMAID projects out of the PreProject objects
    and imports them into CATMAID.
    """
    imported = []
    not_imported = []
    for pp in pre_projects:
        try:
            # Create stacks and add them to project
            stacks = []
            for s in pp.stacks:
                stack = Stack.objects.create(title=s.name,
                                             dimension=s.dimension,
                                             resolution=s.resolution,
                                             image_base=s.image_base,
                                             num_zoom_levels=s.num_zoom_levels,
                                             file_extension=s.file_extension,
                                             tile_width=tile_width,
                                             tile_height=tile_height,
                                             tile_source_type=tile_source_type,
                                             metadata=s.metadata)
                stacks.append(stack)
                # Add overlays of this stack
                for o in s.overlays:
                    Overlay.objects.create(title=o.name,
                                           stack=stack,
                                           image_base=o.image_base,
                                           default_opacity=o.default_opacity,
                                           file_extension=o.file_extension,
                                           tile_width=tile_width,
                                           tile_height=tile_height,
                                           tile_source_type=tile_source_type)
            # Create new project
            p = Project.objects.create(title=pp.name, public=make_public)
            # Assign permissions to project
            assigned_permissions = []
            for user_or_group, perm in permissions:
                assigned_perm = assign(perm.codename, user_or_group, p)
                assigned_permissions.append(assigned_perm)
            # Tag the project
            p.tags.add(*tags)
            # Add stacks to project
            for s in stacks:
                trln = Double3D()
                ps = ProjectStack.objects.create(project=p,
                                                 stack=s,
                                                 translation=trln)
            # Make project persistent
            p.save()
            # Link classification graphs
            for cg in cls_graph_ids_to_link:
                workspace = settings.ONTOLOGY_DUMMY_PROJECT_ID
                cgroot = ClassInstance.objects.get(pk=cg)
                link_existing_classification(workspace, user, p, cgroot)
            # Remember created project
            imported.append(pp)
        except Exception as e:
            not_imported.append((pp, e))

    return (imported, not_imported)
Ejemplo n.º 20
0
def label_update(request, project_id=None, location_id=None, ntype=None):
    """ location_id is the ID of a treenode or connector.
        ntype is either 'treenode' or 'connector'. """
    labeled_as_relation = Relation.objects.get(project=project_id,
                                               relation_name='labeled_as')
    p = get_object_or_404(Project, pk=project_id)

    # TODO will FAIL when a tag contains a coma by itself
    new_tags = request.POST['tags'].split(',')
    delete_existing_labels = request.POST.get('delete_existing',
                                              'true') == 'true'

    kwargs = {
        'relation': labeled_as_relation,
        'class_instance__class_column__class_name': 'label'
    }

    table = get_link_model(ntype)
    if 'treenode' == ntype:
        kwargs['treenode__id'] = location_id
        node = Treenode.objects.get(id=location_id)
    elif 'connector' == ntype:
        kwargs['connector__id'] = location_id
        node = Connector.objects.get(id=location_id)

    if not table:
        raise Http404('Unknown node type: "%s"' % (ntype, ))

    # Get the existing list of tags for the tree node/connector and delete any
    # that are not in the new list.
    existing_labels = table.objects.filter(
        **kwargs).select_related('class_instance__name')
    existing_names = set(ele.class_instance.name for ele in existing_labels)
    duplicate_labels = table.objects.filter(**kwargs).exclude(
        class_instance__name__in=new_tags).select_related(
            'class_instance__name')

    other_labels = []
    deleted_labels = []
    if delete_existing_labels:
        # Iterate over all labels that should get deleted to check permission
        # on each one. Remember each label that couldn't be deleted in the
        # other_labels array.
        for l in duplicate_labels:
            try:
                can_edit_or_fail(request.user, l.id, table._meta.db_table)
                if remove_label(l.id, ntype):
                    deleted_labels.append(l)
                else:
                    other_labels.append(l)
            except:
                other_labels.append(l)

        # Create change requests for labels associated to the treenode by other users
        for label in other_labels:
            change_request_params = {
                'type':
                'Remove Tag',
                'project':
                p,
                'user':
                request.user,
                'recipient':
                node.user,
                'location':
                Double3D(node.location_x, node.location_y, node.location_z),
                ntype:
                node,
                'description':
                "Remove tag '%s'" % label.class_instance.name,
                'validate_action':
                'from catmaid.control.label import label_exists\n' +
                'is_valid = label_exists(%s, "%s")' % (str(label.id), ntype),
                'approve_action':
                'from catmaid.control.label import remove_label\n' +
                'remove_label(%s, "%s")' % (str(label.id), ntype)
            }
            ChangeRequest(**change_request_params).save()

    # Add any new labels.
    label_class = Class.objects.get(project=project_id, class_name='label')
    kwargs = {
        'user': request.user,
        'project': p,
        'relation': labeled_as_relation,
        ntype: node
    }

    new_labels = []
    for tag_name in new_tags:
        if len(tag_name) > 0 and tag_name not in existing_names:
            # Make sure the tag instance exists
            existing_tags = tuple(
                ClassInstance.objects.filter(project=p,
                                             name=tag_name,
                                             class_column=label_class))
            if len(existing_tags) < 1:
                tag = ClassInstance(project=p,
                                    name=tag_name,
                                    user=request.user,
                                    class_column=label_class)
                tag.save()
            else:
                tag = existing_tags[0]

            # Associate the tag with the treenode/connector.
            kwargs['class_instance'] = tag
            tci = table(
                **kwargs
            )  # creates new TreenodeClassInstance or ConnectorClassInstance
            tci.save()
            new_labels.append(tag_name)

            if node.user != request.user:
                # Inform the owner of the node that the tag was added and give them the option of removing it.
                change_request_params = {
                    'type':
                    'Add Tag',
                    'description':
                    'Added tag \'' + tag_name + '\'',
                    'project':
                    p,
                    'user':
                    request.user,
                    'recipient':
                    node.user,
                    'location':
                    Double3D(node.location_x, node.location_y,
                             node.location_z),
                    ntype:
                    node,
                    'validate_action':
                    'from catmaid.control.label import label_exists\n' +
                    'is_valid = label_exists(%s, "%s")' % (str(tci.id), ntype),
                    'reject_action':
                    'from catmaid.control.label import remove_label\n' +
                    'remove_label(%s, "%s")' % (str(tci.id), ntype)
                }
                ChangeRequest(**change_request_params).save()

    response = {
        'message':
        'success',
        'new_labels':
        new_labels,
        'duplicate_labels': [
            l.class_instance.name for l in duplicate_labels
            if l not in deleted_labels
        ],
        'deleted_labels': [l.class_instance.name for l in deleted_labels],
    }

    # Check if any labels on this node violate cardinality restrictions on
    # its skeleton.
    if 'treenode' == ntype:
        limited_labels = {
            l: SKELETON_LABEL_CARDINALITY[l]
            for l in new_tags if l in SKELETON_LABEL_CARDINALITY
        }

        if limited_labels:
            ll_names, ll_maxes = zip(*limited_labels.items())
            cursor = connection.cursor()
            cursor.execute(
                """
                SELECT
                  ll.name,
                  COUNT(tci.treenode_id),
                  ll.max
                FROM
                  class_instance ci,
                  treenode_class_instance tci,
                  treenode tn,
                  unnest(%s::text[], %s::integer[]) AS ll (name, max)
                WHERE ci.name = ll.name
                  AND ci.project_id = %s
                  AND ci.class_id = %s
                  AND tci.class_instance_id = ci.id
                  AND tci.relation_id = %s
                  AND tn.id = tci.treenode_id
                  AND tn.skeleton_id = %s
                GROUP BY
                  ll.name, ll.max
                HAVING
                  COUNT(tci.treenode_id) > ll.max
            """, (list(ll_names), list(ll_maxes), p.id, label_class.id,
                  labeled_as_relation.id, node.skeleton_id))

            if cursor.rowcount:
                response['warning'] = 'The skeleton has too many of the following tags: ' + \
                    ', '.join('{0} ({1}, max. {2})'.format(*row) for row in cursor.fetchall())

    return JsonResponse(response)
Ejemplo n.º 21
0
def list_treenode_table(request, project_id=None):
    stack_id = request.POST.get('stack_id', None)
    specified_skeleton_count = request.POST.get('skeleton_nr', 0)
    display_start = request.POST.get('iDisplayStart', 0)
    display_length = request.POST.get('iDisplayLength', -1)
    should_sort = request.POST.get('iSortCol_0', None)
    filter_nodetype = request.POST.get('sSearch_1', None)
    filter_labels = request.POST.get('sSearch_2', None)

    relation_map = get_relation_to_id_map(project_id)

    response_on_error = ''
    try:

        def search_query_is_empty():
            if specified_skeleton_count == 0:
                return True
            first_skeleton_id = request.POST.get('skeleton_0', None)
            if first_skeleton_id is None:
                return True
            elif upper(first_skeleton_id) in ['NONE', 'NULL']:
                return True
            return False

        if search_query_is_empty():
            return HttpResponse(
                json.dumps({
                    'iTotalRecords': 0,
                    'iTotalDisplayRecords': 0,
                    'aaData': []
                }))
        else:
            response_on_error = 'Could not fetch %s skeleton IDs.' % \
                specified_skeleton_count
            skeleton_ids = [int(request.POST.get('skeleton_%s' % i, 0)) \
                for i in range(int(specified_skeleton_count))]

        if should_sort:
            column_count = int(request.POST.get('iSortingCols', 0))
            sorting_directions = [request.POST.get('sSortDir_%d' % d) \
                for d in range(column_count)]
            sorting_directions = map(lambda d: \
                '-' if upper(d) == 'DESC' else '', sorting_directions)

            fields = [
                'tid', 'type', '"treenode"."labels"', 'confidence', 'x', 'y',
                'z', '"treenode"."section"', 'radius', 'username',
                'last_modified'
            ]
            # TODO type field not supported.
            sorting_index = [int(request.POST.get('iSortCol_%d' % d)) \
                for d in range(column_count)]
            sorting_cols = map(lambda i: fields[i], sorting_index)

        response_on_error = 'Could not get the list of treenodes.'
        t = Treenode.objects \
            .filter(
                project=project_id,
                skeleton_id__in=skeleton_ids) \
            .extra(
                tables=['auth_user'],
                where=[
                    '"treenode"."user_id" = "auth_user"."id"'],
                select={
                    'tid': '"treenode"."id"',
                    'radius': '"treenode"."radius"',
                    'confidence': '"treenode"."confidence"',
                    'parent_id': '"treenode"."parent_id"',
                    'user_id': '"treenode"."user_id"',
                    'edition_time': '"treenode"."edition_time"',
                    'x': '"treenode"."location_x"',
                    'y': '"treenode"."location_y"',
                    'z': '"treenode"."location_z"',
                    'username': '******',
                    'last_modified': 'to_char("treenode"."edition_time", \'DD-MM-YYYY HH24:MI\')'
                }) \
            .distinct()
        # Rationale for using .extra():
        # Since we don't use .order_by() for ordering, extra fields are not
        # included in the SELECT statement, and so .distinct() will work as
        # intended. See http://tinyurl.com/dj-distinct
        if should_sort:
            t = t.extra(order_by=[di + col \
                for (di, col) in zip(sorting_directions, sorting_cols)])

        if int(display_length) == -1:
            treenodes = list(t[display_start:])
        else:
            treenodes = list(t[display_start:display_start + display_length])

        # The number of results to be displayed should include items that are
        # filtered out.
        row_count = len(treenodes)

        # Filter out irrelevant treenodes if a label has been specified
        if 'labeled_as' in relation_map:
            response_on_error = 'Could not retrieve labels for project.'
            project_lables = TreenodeClassInstance.objects.filter(
                project=project_id,
                relation=relation_map['labeled_as']).values(
                    'treenode', 'class_instance__name')
            labels_by_treenode = {}  # Key: Treenode ID, Value: List of labels.
            for label in project_lables:
                if label['treenode'] not in labels_by_treenode:
                    labels_by_treenode[label['treenode']] = [
                        label['class_instance__name']
                    ]
                else:
                    labels_by_treenode[label['treenode']].append(
                        label['class_instance__name'])

            if filter_labels:

                def label_filter(treenode):
                    if treenode.id not in labels_by_treenode:
                        return False
                    return upper(filter_labels) in upper(' '.join(
                        labels_by_treenode[treenode.tid]))

                treenodes = filter(label_filter, treenodes)

        # Filter out irrelevant treenodes if a node type has been specified.

        # Count treenode's children to derive treenode types. The number of
        # children a treenode has determines its type. Types:
        # R : root (parent = null)
        # S : slab (has one child)
        # B : branch (has more than one child)
        # L : leaf (has no children)
        # X : undefined (uh oh!)
        if 0 == display_start and -1 == display_length:
            # All nodes are loaded: determine child_count from loaded nodes
            child_count = {}
            for treenode in treenodes:
                if treenode.parent is None:
                    continue
                n_children = child_count.get(treenode.parent_id, 0)
                child_count[treenode.parent_id] = n_children + 1
        else:
            # Query for parents
            response_on_error = 'Could not retrieve treenode parents.'
            child_count_query = Treenode.objects.filter(
                project=project_id, skeleton_id__in=skeleton_ids).annotate(
                    child_count=Count('children'))
            child_count = {}
            for treenode in child_count_query:
                child_count[treenode.id] = treenode.child_count

        # Determine type
        for treenode in treenodes:
            if None == treenode.parent_id:
                treenode.nodetype = 'R'  # Root
                continue
            children = child_count.get(treenode.tid, 0)
            if children == 1:
                treenode.nodetype = 'S'  # Slab
            elif children == 0:
                treenode.nodetype = 'L'  # Leaf
            elif children > 1:
                treenode.nodetype = 'B'  # Branch
            else:
                treenode.nodetype = 'X'  # Unknown, can never happen

        # Now that we've assigned node types, filter based on them:
        if filter_nodetype:
            filter_nodetype = upper(filter_nodetype)
            treenodes = [t for t in treenodes if t.nodetype in filter_nodetype]

        users = dict(User.objects.all().values_list('id', 'username'))
        users[-1] = "None"  # Rather than AnonymousUser

        # Get all reviews for the current treenode set
        treenode_ids = [t.id for t in treenodes]
        treenode_to_reviews = get_treenodes_to_reviews(treenode_ids,
                                                       umap=lambda r: users[r])

        if stack_id:
            response_on_error = 'Could not retrieve resolution and translation ' \
                'parameters for project.'
            resolution = get_object_or_404(Stack, id=int(stack_id)).resolution
            translation = get_object_or_404(ProjectStack,
                                            stack=int(stack_id),
                                            project=project_id).translation
        else:
            resolution = Double3D(1.0, 1.0, 1.0)
            translation = Double3D(0.0, 0.0, 0.0)

        def formatTreenode(tn):
            row = [str(tn.tid)]
            row.append(tn.nodetype)
            if tn.tid in labels_by_treenode:
                row.append(', '.join(map(str, labels_by_treenode[tn.tid])))
            else:
                row.append('')
            row.append(str(tn.confidence))
            row.append('%.2f' % tn.x)
            row.append('%.2f' % tn.y)
            row.append('%.2f' % tn.z)
            row.append(int((tn.z - translation.z) / resolution.z))
            row.append(str(tn.radius))
            row.append(tn.username)
            row.append(tn.last_modified)
            row.append(', '.join(treenode_to_reviews.get(tn.id, ["None"])))
            return row

        result = {
            'iTotalRecords': row_count,
            'iTotalDisplayRecords': row_count
        }
        response_on_error = 'Could not format output.'
        result['aaData'] = map(formatTreenode, treenodes)

        return HttpResponse(json.dumps(result))

    except Exception as e:
        raise Exception(response_on_error + ':' + str(e))
    def handle(self, *args, **options):

        if not options['user_id']:
            raise CommandError("You must specify a user ID with --user")

        user = User.objects.get(pk=options['user_id'])

        projects: Dict[str, Dict] = {
            'Default Project': {
                'stacks': []
            },
            'Evaluation data set': {
                'stacks': []
            },
            'Focussed Ion Beam (FIB)': {
                'stacks': []
            },
        }

        # Define the details of a stack for two of these projects:

        projects['Default Project']['stacks'].append({
            'title':
            'Original data.',
            'dimension':
            Integer3D(4096, 4096, 16),
            'resolution':
            Double3D(3.2614000000000001, 3.2614000000000001, 60),
            'comment':
            '''<p>&copy;2007 by Stephan Saalfeld.</p>
<p>Rendered with <a href="http://www.povray.org/">POV-Ray&nbsp;v3.6</a>
using this <a href="http://fly.mpi-cbg.de/~saalfeld/download/volume.tar.bz2">scene-file</a>.</p>''',
            'mirrors': [{
                'title':
                'Public copy',
                'image_base':
                'http://fly.mpi-cbg.de/map/evaluation/original/',
            }]
        })

        projects['Focussed Ion Beam (FIB)']['stacks'].append({
            'title':
            'Focussed Ion Beam (FIB) stack of Rat Striatum',
            'dimension':
            Integer3D(2048, 1536, 460),
            'resolution':
            Double3D(5, 5, 9),
            'comment':
            '''
<p>&copy;2009 <a href="http://people.epfl.ch/graham.knott">Graham Knott</a>.</p>
<p>Public INCF data set available at the
<a href="http://www.incf.org/about/nodes/switzerland/data">Swiss INCF Node</a>.</p>''',
            'mirrors': [{
                'title':
                'FIB Public copy',
                'image_base':
                'http://incf.ini.uzh.ch/image-stack-fib/',
            }]
        })

        # Make sure that each project and its stacks exist, and are
        # linked via ProjectStack:

        for project_title in projects:
            project_object, _ = Project.objects.get_or_create(
                title=project_title)
            for stack_dict in projects[project_title]['stacks']:
                stack, _ = Stack.objects.get_or_create(
                    title=stack_dict['title'],
                    defaults={
                        'dimension': stack_dict['dimension'],
                        'resolution': stack_dict['resolution'],
                    })
                mirrors = list(StackMirror.objects.filter(stack=stack))
                if not mirrors:
                    for m in stack_dict['mirrors']:
                        mirrors.append(
                            StackMirror.objects.create(
                                stack=stack,
                                title=m['title'],
                                image_base=m['image_base']))
                ProjectStack.objects.get_or_create(project=project_object,
                                                   stack=stack)
            projects[project_title]['project_object'] = project_object

        # Also set up the FIB project for tracing with treelines:

        tracing_project = projects['Focussed Ion Beam (FIB)']['project_object']

        call_command('catmaid_setup_tracing_for_project', '--project_id',
                     str(tracing_project.id), '--user', str(user.id))