Ejemplo n.º 1
0
    def get(self, request: HttpRequest, project_id,
            sampler_id) -> JsonResponse:
        """Get details on a particular sampler.
        ---
        parameters:
         - name: project_id
           description: The project to operate in.
           type: integer
           paramType: path
           required: false
         - name: sampler_id
           description: The sampler to return.
           type: integer
           paramType: path
           required: false
         - name: with_domains
           description: Optional flag to include all domains of all result sampler results.
           type: boolean
           paramType: form
           required: false
           defaultValue: false
         - name: with_intervals
           description: Optional flag to include all intervals of all domains. Implies with_domains.
           type: boolean
           paramType: form
           required: false
           default: false
           defaultValue: false
        """
        sampler_id = int(sampler_id)
        with_intervals = get_request_bool(request.GET, 'with_intervals', False)
        with_domains = get_request_bool(request.GET, 'with_domains',
                                        False) or with_intervals

        if with_domains:
            sampler = Sampler.objects.prefetch_related(
                'samplerdomain_set').get(pk=sampler_id)
        else:
            sampler = Sampler.objects.get(pk=sampler_id)

        sampler_detail = serialize_sampler(sampler)

        if with_domains:
            domains = []
            domains_and_ends = SamplerDomain.objects.filter(sampler=sampler_id) \
                    .prefetch_related('samplerdomainend_set')
            if with_intervals:
                domains_and_ends = domains_and_ends.prefetch_related(
                    'samplerinterval_set')

            for domain in domains_and_ends:
                domain_data = serialize_domain(domain,
                                               with_ends=True,
                                               with_intervals=with_intervals)
                domains.append(domain_data)
            sampler_detail['domains'] = domains

        return JsonResponse(sampler_detail)
Ejemplo n.º 2
0
    def post(self, request, project_id):
        """List all available point clouds or optionally a sub set.
        ---
        parameters:
          - name: project_id
            description: Project of the returned point clouds
            type: integer
            paramType: path
            required: true
          - name: simple
            description: Wheter or not only ID and name should be returned
            type: bool
            paramType: form
            required: false
            defaultValue: false
          - name: with_images
            description: Wheter linked images should returned as well.
            type: bool
            paramType: form
            required: false
            defaultValue: false
          - name: with_points
            description: Wheter linked points should returned as well.
            type: bool
            paramType: form
            required: false
            defaultValue: false
          - name: sample_ratio
            description: Number in [0,1] to optionally sample point cloud
            type: number
            paramType: form
            required: false
          - name: pointcloud_ids
            description: A list of point cloud IDs to which the query is constrained.
            type: array
            paramType: path
            required: false
        """
        with_images = get_request_bool(request.POST, 'with_images', False)
        with_points = get_request_bool(request.POST, 'with_points', False)
        sample_ratio = float(request.POST.get('sample_ratio', '1.0'))
        simple = get_request_bool(request.POST, 'simple', False)
        pointcloud_ids = get_request_list(request.POST,
                'pointcloud_ids', None, map_fn=int)

        pointclouds = list_pointclouds(project_id, request.user.id, simple,
                with_images, with_points, sample_ratio, pointcloud_ids)

        return JsonResponse(pointclouds, safe=False)
Ejemplo n.º 3
0
def add_relation_to_ontology(request:HttpRequest, project_id=None) -> JsonResponse:
    name = request.POST.get('relname', None)
    uri = request.POST.get('uri', '')
    description = request.POST.get('description', None)
    isreciprocal = bool(request.POST.get('isreciprocal', False))
    silent = get_request_bool(request.POST, 'silent', False)

    if name is None:
        raise Exception("Couldn't find name for new relation.")

    if trim_names:
        name = name.strip()

    if be_strict:
        # Make sure that there isn't already a relation with this name
        num_r = Relation.objects.filter(project_id = project_id,
            relation_name = name).count()
        if num_r > 0:
            if silent:
                return JsonResponse({
                    'already_present': True
                })
            else:
                raise Exception("A relation with the name '%s' already exists." % name)

    r = Relation.objects.create(user=request.user,
        project_id = project_id, relation_name = name, uri = uri,
        description = description, isreciprocal = isreciprocal)

    return JsonResponse({'relation_id': r.id})
Ejemplo n.º 4
0
def add_class_to_ontology(request:HttpRequest, project_id=None) -> JsonResponse:
    name = request.POST.get('classname', None)
    description = request.POST.get('description', None)
    silent = get_request_bool(request.POST, 'silent', False)

    if name is None:
        raise Exception("Couldn't find name for new class.")

    if trim_names:
        name = name.strip()

    if be_strict:
        # Make sure that there isn't already a class with this name
        num_c = Class.objects.filter(project_id = project_id,
            class_name = name).count()
        if num_c > 0:
            if silent:
                return JsonResponse({
                    'already_present': True,
                })
            else:
                raise Exception("A class with the name '%s' already exists." % name)

    c = Class.objects.create(user=request.user,
        project_id = project_id, class_name = name,
        description = description)

    return JsonResponse({'class_id': c.id})
Ejemplo n.º 5
0
def add_class_to_ontology(request, project_id=None):
    name = request.POST.get('classname', None)
    description = request.POST.get('description', None)
    silent = get_request_bool(request.POST, 'silent', False)

    if name is None:
        raise Exception("Couldn't find name for new class.")

    if trim_names:
        name = name.strip()

    if be_strict:
        # Make sure that there isn't already a class with this name
        num_c = Class.objects.filter(project_id = project_id,
            class_name = name).count()
        if num_c > 0:
            if silent:
                return JsonResponse({
                    'already_present': True,
                })
            else:
                raise Exception("A class with the name '%s' already exists." % name)

    c = Class.objects.create(user=request.user,
        project_id = project_id, class_name = name,
        description = description)

    return JsonResponse({'class_id': c.id})
Ejemplo n.º 6
0
def add_relation_to_ontology(request, project_id=None):
    name = request.POST.get('relname', None)
    uri = request.POST.get('uri', '')
    description = request.POST.get('description', None)
    isreciprocal = bool(request.POST.get('isreciprocal', False))
    silent = get_request_bool(request.POST, 'silent', False)

    if name is None:
        raise Exception("Couldn't find name for new relation.")

    if trim_names:
        name = name.strip()

    if be_strict:
        # Make sure that there isn't already a relation with this name
        num_r = Relation.objects.filter(project_id = project_id,
            relation_name = name).count()
        if num_r > 0:
            if silent:
                return JsonResponse({
                    'already_present': True
                })
            else:
                raise Exception("A relation with the name '%s' already exists." % name)

    r = Relation.objects.create(user=request.user,
        project_id = project_id, relation_name = name, uri = uri,
        description = description, isreciprocal = isreciprocal)

    return JsonResponse({'relation_id': r.id})
Ejemplo n.º 7
0
    def post(self, request: Request, project_id) -> Response:
        """Create a new project token.

        The request requires admin permissions in the project.
        ---
        serializer: SimpleProjectTokenSerializer
        """
        project = get_object_or_404(Project, pk=project_id)

        name = request.POST.get('name', '')
        needs_approval = get_request_bool(request.POST, 'needs_approval',
                                          False)
        default_permissions = set(
            get_request_list(request.POST, 'default_permissions', []))
        allowed_permissions = set(
            get_perms_for_model(Project).values_list('codename', flat=True))
        unknown_permissions = default_permissions - allowed_permissions
        if unknown_permissions:
            raise ValueError(
                f'Unknown permissions: {", ".join(unknown_permissions)}')

        token = ProjectToken.objects.create(
            **{
                'name': name,
                'user_id': request.user.id,
                'project_id': project.id,
                'needs_approval': needs_approval,
                'default_permissions': default_permissions,
            })
        if not name:
            token.name = f'Project token {token.id}'
            token.save()

        serializer = SimpleProjectTokenSerializer(token)
        return Response(serializer.data)
Ejemplo n.º 8
0
Archivo: log.py Proyecto: tomka/CATMAID
def list_logs(request, project_id=None):
    if 'user_id' in request.POST:
        user_id = int(request.POST.get('user_id', -1))  # We can see logs for different users
    else:
        user_id = None
    whitelist = get_request_bool(request.POST, 'whitelist', False)
    operation_type = request.POST.get('operation_type', "-1")
    search_freetext = request.POST.get('search_freetext', "")

    display_start = int(request.POST.get('iDisplayStart', 0))
    display_length = int(request.POST.get('iDisplayLength', -1))
    if display_length < 0:
        display_length = 2000  # Default number of result rows

    should_sort = request.POST.get('iSortCol_0', False)
    if should_sort:
        column_count = int(request.POST.get('iSortingCols', 0))
        sorting_directions = [request.POST.get('sSortDir_%d' % d, 'DESC') for d in range(column_count)]
        sorting_directions = map(lambda d: '-' if d.upper() == 'DESC' else '', sorting_directions)

        fields = ['user', 'operation_type', 'creation_time', 'x', 'y', 'z', 'freetext']
        sorting_index = [int(request.POST.get('iSortCol_%d' % d)) for d in range(column_count)]
        sorting_cols = map(lambda i: fields[i], sorting_index)

    log_query = Log.objects.for_user(request.user).filter(project=project_id)
    if user_id:
        log_query = log_query.filter(user=user_id)
    if whitelist:
        log_query = log_query.filter(user_id__in=ReviewerWhitelist.objects.filter(
                project_id=project_id, user_id=request.user.id).values_list('reviewer_id'))
    if not operation_type == "-1":
        log_query = log_query.filter(operation_type=operation_type)
    if not search_freetext == "":
        log_query = log_query.filter(freetext__contains=search_freetext)

    log_query = log_query.extra(tables=['auth_user'], where=['"log"."user_id" = "auth_user"."id"'], select={
        'x': '("log"."location")."x"',
        'y': '("log"."location")."y"',
        'z': '("log"."location")."z"',
        'username': '******'
    })
    if should_sort:
        log_query = log_query.extra(order_by=[di + col for (di, col) in zip(sorting_directions, sorting_cols)])

    result = list(log_query[display_start:display_start + display_length])

    response = {'iTotalRecords': len(result), 'iTotalDisplayRecords': len(result), 'aaData': []}
    for log in result:
        response['aaData'] += [[
            log.username,
            log.operation_type,
            str(log.creation_time.isoformat()),
            log.x,
            log.y,
            log.z,
            log.freetext
        ]]

    return JsonResponse(response)
Ejemplo n.º 9
0
    def get(self, request: HttpRequest, project_id) -> JsonResponse:
        """List all available point sets or optionally a sub set.
        ---
        parameters:
          - name: project_id
            description: Project of the returned point sets
            type: integer
            paramType: path
            required: true
          - name: simple
            description: Wheter or not only ID and name should be returned
            type: bool
            paramType: form
            required: false
            defaultValue: false
          - name: with_points
            description: Wheter linked points should returned as well.
            type: bool
            paramType: form
            required: false
            defaultValue: false
          - name: pointset_ids
            description: A list of point set IDs to which the query is constrained.
            type: array
            paramType: path
            required: false
          - name: order_by
            description: The field to order the response list by (name, id).
            type: string
            paramType: path
            required: false
            defaultValue: 'id'
        """
        with_points = get_request_bool(request.query_params, 'with_points',
                                       False)
        simple = get_request_bool(request.query_params, 'simple', False)
        pointset_ids = get_request_list(request.query_params,
                                        'pointset_ids',
                                        None,
                                        map_fn=int)
        order_by = request.query_params.get('order_by', 'id')

        pointsets = list_pointsets(project_id, request.user.id, simple,
                                   with_points, pointset_ids, order_by)

        return JsonResponse(pointsets, safe=False)
Ejemplo n.º 10
0
def user_list(request: HttpRequest) -> JsonResponse:
    """List registered users in this CATMAID instance. If accessed by an
    anonymous user, only the anonymous user is returned unless the anonymous
    user has can_browse permissions, which allows it to retrieve all users.

    An administrator can export users including their salted and encrpyted
    password. This is meant to import users into other CATMAID instances.
    ---
    parameters:
    - name: with_passwords
      description: |
        Export encrypted passwords. Requires admin access.
      required: false
      type: boolean,
      default: false
    """
    with_passwords = get_request_bool(request.GET, 'with_passwords', False)
    if with_passwords:
        # Make sure user is an admin and part of the staff
        if not request.user.is_staff and not request.user.is_superuser:
            raise PermissionError("Superuser permissions required to export "
                                  "encrypted user passwords")

    user = request.user
    can_see_all_users = user.is_authenticated and \
            (user != get_anonymous_user() or user.has_perm('catmaid.can_browse'))

    if can_see_all_users:
        result = []
        for u in User.objects.all().select_related('userprofile') \
                .order_by('last_name', 'first_name'):
            up = u.userprofile
            user_data = {
                "id": u.id,
                "login": u.username,
                "full_name": u.get_full_name(),
                "first_name": u.first_name,
                "last_name": u.last_name,
                "color": (up.color.r, up.color.g, up.color.b),
                "primary_group_id": up.primary_group_id,
            }
            if with_passwords:
                # Append encypted user password
                user_data['password'] = u.password
            result.append(user_data)
    else:
        up = user.userprofile
        result = [{
            "id": user.id,
            "login": user.username,
            "full_name": user.get_full_name(),
            "first_name": user.first_name,
            "last_name": user.last_name,
            "color": (up.color.r, up.color.g, up.color.b),
            "primary_group_id": up.primary_group_id
        }]

    return JsonResponse(result, safe=False)
Ejemplo n.º 11
0
    def delete(self, request: Request, name=None) -> Response:
        """Delete all key-value store datastores for the client.

        The request user must not be anonymous and must have browse, annotate
        or administer permissions for at least one project.
        ---
        parameters:
        - name: project_id
          description: |
            ID of a project to delete data from, if any.
          required: false
          type: integer
          paramType: form
        - name: ignore_user
          description: |
            Whether to clear dataassociated with the instance or the request
            user. Only project administrators can do this for project-associated
            instance data, and only super users can do this for global data
            (instance data not associated with any project).
          required: false
          type: boolean
          default: false
          paramType: form
        """
        if request.user == get_anonymous_user(
        ) or not request.user.is_authenticated:
            raise PermissionDenied('Unauthenticated or anonymous users ' \
                                   'can not delete datastores.')

        project_id = request.data.get('project_id', None)
        project = None
        if project_id:
            project_id = int(project_id)
            project = get_object_or_404(Project, pk=project_id)
            if not check_user_role(request.user, project,
                                   [UserRole.Browse, UserRole.Annotate]):
                raise PermissionDenied('User lacks the appropriate ' \
                                       'permissions for this project.')

        ignore_user = get_request_bool(request.data, 'ignore_user', False)
        if ignore_user and not project_id:
            if not request.user.is_superuser:
                raise PermissionDenied('Only super users can delete instance ' \
                                       'data.')
        if ignore_user:
            if not check_user_role(request.user, project, [UserRole.Admin]):
                raise PermissionDenied('Only administrators can delete ' \
                                       'project default data.')
        user = None if ignore_user else request.user

        datastore = ClientDatastore(name=name)
        n_deleted, _ = ClientData.objects.filter(datastore=datastore,
                                                 project=project,
                                                 user=user).delete()
        return Response({
            'n_deleted': n_deleted,
        })
Ejemplo n.º 12
0
    def get(self, request: Request, project_id) -> Response:
        """List deep-links available to the client.
        ---
        serializer: SimpleDeepLinkSerializer
        """
        only_own = get_request_bool(request.GET, 'only_own', False)
        only_private = get_request_bool(request.GET, 'only_private', False)

        filter_term = (Q(is_public=True)
                       | Q(user_id=request.user.id)) & Q(project_id=project_id)

        if only_own:
            filter_term = filter_term & Q(user_id=request.user.id)

        if only_private:
            filter_term = filter_term & Q(is_public=False)

        deep_links = DeepLink.objects.filter(filter_term)
        serializer = SimpleDeepLinkSerializer(deep_links, many=True)
        return Response(serializer.data)
Ejemplo n.º 13
0
    def test_request_bool_parsing(self):
        q1 = QueryDict('a=true&b=True&c=TRUE')
        self.assertEqual(get_request_bool(q1, 'a', False), True)
        self.assertEqual(get_request_bool(q1, 'b', False), True)
        self.assertEqual(get_request_bool(q1, 'c', False), True)

        q2 = QueryDict('a=false&b=False&c=FALSE')
        self.assertEqual(get_request_bool(q2, 'a', True), False)
        self.assertEqual(get_request_bool(q2, 'b', True), False)
        self.assertEqual(get_request_bool(q2, 'c', True), False)

        q3 = QueryDict()
        self.assertEqual(get_request_bool(q3, 'a', True), True)
        self.assertEqual(get_request_bool(q3, 'b', False), False)
Ejemplo n.º 14
0
    def test_request_bool_parsing(self):
        q1 = QueryDict('a=true&b=True&c=TRUE')
        self.assertEqual(get_request_bool(q1, 'a', False), True)
        self.assertEqual(get_request_bool(q1, 'b', False), True)
        self.assertEqual(get_request_bool(q1, 'c', False), True)

        q2 = QueryDict('a=false&b=False&c=FALSE')
        self.assertEqual(get_request_bool(q2, 'a', True), False)
        self.assertEqual(get_request_bool(q2, 'b', True), False)
        self.assertEqual(get_request_bool(q2, 'c', True), False)

        q3 = QueryDict()
        self.assertEqual(get_request_bool(q3, 'a', True), True)
        self.assertEqual(get_request_bool(q3, 'b', False), False)
Ejemplo n.º 15
0
def export_nrrd(request, project_id, skeleton_id):
    """Export a skeleton as NRRD file using the NAT R package. To make this
    work, R has to be intalled on the server. Within R the NAT package has to be
    installed and the easiest way to do this is by running the following R code:

    if(!require("devtools")) install.packages("devtools")
    devtools::source_gist("fdd1e5b6e009ff49e66be466a104fd92", filename = "install_flyconnectome_all.R")

    Also, CMTK has to be installed, which can be done either by installing their
    published packages or compiling it from source and making it available from
    /usr/local/lib/cmtk/bin for NAT to pick it up.
    """
    source_ref = request.POST['source_ref']
    target_ref = request.POST['target_ref']
    mirror = get_request_bool(request.POST, 'mirror', False)
    async_export = get_request_bool(request.POST, 'async_export', False)

    # Make sure the output path can be written to
    if not os.path.exists(output_path) or not os.access(output_path, os.W_OK):
        raise ValueError("The output path is not accessible")

    if async_export:
        export_skeleton_as_nrrd_async.delay(skeleton_id, source_ref,
                                            target_ref, request.user.id,
                                            mirror)

        return JsonResponse({'success': True})
    else:
        result = export_skeleton_as_nrrd(skeleton_id, source_ref, target_ref,
                                         request.user.id, mirror)

        if result['errors']:
            raise RuntimeError(
                "There were errors creating the NRRD file: {}".format(
                    '\n'.join(result['errors'])))

        return CleanUpHTTPResponse(result['nrrd_path'],
                                   result['nrrd_name'],
                                   content_type='application/octet-stream')
Ejemplo n.º 16
0
Archivo: nat.py Proyecto: tomka/CATMAID
def export_nrrd(request, project_id, skeleton_id):
    """Export a skeleton as NRRD file using the NAT R package. To make this
    work, R has to be intalled on the server. Within R the NAT package has to be
    installed and the easiest way to do this is by running the following R code:

    if(!require("devtools")) install.packages("devtools")
    devtools::source_gist("fdd1e5b6e009ff49e66be466a104fd92", filename = "install_flyconnectome_all.R")

    Also, CMTK has to be installed, which can be done either by installing their
    published packages or compiling it from source and making it available from
    /usr/local/lib/cmtk/bin for NAT to pick it up.
    """
    source_ref = request.POST['source_ref']
    target_ref = request.POST['target_ref']
    mirror = get_request_bool(request.POST, 'mirror', False)
    async_export = get_request_bool(request.POST, 'async_export', False)

    # Make sure the output path can be written to
    if not os.path.exists(output_path) or not os.access(output_path, os.W_OK):
        raise ValueError("The output path is not accessible")

    if async_export:
        export_skeleton_as_nrrd_async.delay(skeleton_id, source_ref, target_ref,
                request.user.id, mirror)

        return JsonResponse({
            'success': True
        })
    else:
        result = export_skeleton_as_nrrd(skeleton_id, source_ref, target_ref,
                request.user.id, mirror)

        if result['errors']:
            raise RuntimeError("There were errors creating the NRRD file: {}".format(
                    '\n'.join(result['errors'])))

        return CleanUpHTTPResponse(result['nrrd_path'], result['nrrd_name'],
                content_type='application/octet-stream')
Ejemplo n.º 17
0
def recompute_similarity(request, project_id, similarity_id):
    """Recompute the similarity matrix of the passed in NBLAST configuration.
    """
    simplify = get_request_bool(request.GET, 'simplify', True)
    required_branches = int(request.GET.get('required_branches', '10'))
    can_edit_or_fail(request.user, similarity_id, 'nblast_similarity')
    task = compute_nblast.delay(project_id, request.user.id, similarity_id,
            remove_target_duplicates=True, simplify=simplify,
            required_branches=required_branches)

    return JsonResponse({
        'status': 'queued',
        'task_id': task.task_id
    })
Ejemplo n.º 18
0
def plot_useranalytics(request: HttpRequest, project_id) -> HttpResponse:
    """ Creates an SVG image containing different plots for analzing the
    performance of individual users over time.
    """
    time_zone = pytz.utc

    userid = request.GET.get('userid', None)
    if not (userid and userid.strip()):
        raise ValueError("Need user ID")
    project = get_object_or_404(Project, pk=project_id) if project_id else None
    all_writes = get_request_bool(request.GET, 'all_writes', False)
    maxInactivity = int(request.GET.get('max_inactivity', 3))

    # Get the start date for the query, defaulting to 7 days ago.
    start_date = request.GET.get('start', None)
    if start_date:
        start_date = dateparser.parse(start_date)
        start_date = time_zone.localize(start_date)
    else:
        with timezone.override(time_zone):
            start_date = timezone.now() - timedelta(7)

    # Get the end date for the query, defaulting to now.
    end_date = request.GET.get('end', None)
    if end_date:
        end_date = dateparser.parse(end_date)
        end_date = time_zone.localize(end_date)
    else:
        with timezone.override(time_zone):
            end_date = timezone.now()

    # The API is inclusive and should return stats for the end date as
    # well. The actual query is easier with an exclusive end and therefore
    # the end date is set to the beginning of the next day.
    end_date = end_date + timedelta(days=1)

    if request.user.is_superuser or \
            project and request.user.has_perm('can_browse', project):
        f = generateReport(userid, project_id, maxInactivity, start_date,
                           end_date, all_writes)
    else:
        f = generateErrorImage('You lack permissions to view this report.')

    # Use raw text rather than SVG fonts or pathing.
    plt.rcParams['svg.fonttype'] = 'none'
    buf = io.BytesIO()
    plt.savefig(buf, format='svg')
    return HttpResponse(buf.getvalue(), content_type='image/svg+xml')
Ejemplo n.º 19
0
def plot_useranalytics(request, project_id):
    """ Creates an SVG image containing different plots for analzing the
    performance of individual users over time.
    """
    time_zone = pytz.utc

    userid = request.GET.get('userid', None)
    if not (userid and userid.strip()):
        raise ValueError("Need user ID")
    project = get_object_or_404(Project, pk=project_id) if project_id else None
    all_writes = get_request_bool(request.GET, 'all_writes', False)
    maxInactivity = int(request.GET.get('max_inactivity', 3))

    # Get the start date for the query, defaulting to 7 days ago.
    start_date = request.GET.get('start', None)
    if start_date:
        start_date = dateparser.parse(start_date)
        start_date = time_zone.localize(start_date)
    else:
        with timezone.override(time_zone):
            start_date = timezone.now() - timedelta(7)

    # Get the end date for the query, defaulting to now.
    end_date = request.GET.get('end', None)
    if end_date:
        end_date = dateparser.parse(end_date)
        end_date = time_zone.localize(end_date)
    else:
        with timezone.override(time_zone):
            end_date = timezone.now()

    # The API is inclusive and should return stats for the end date as
    # well. The actual query is easier with an exclusive end and therefore
    # the end date is set to the beginning of the next day.
    end_date = end_date + timedelta(days=1)

    if request.user.is_superuser or \
            project and request.user.has_perm('can_browse', project):
        f = generateReport( userid, project_id, maxInactivity, start_date,
                end_date, all_writes )
    else:
        f = generateErrorImage('You lack permissions to view this report.')

    # Use raw text rather than SVG fonts or pathing.
    plt.rcParams['svg.fonttype'] = 'none'
    buf = io.BytesIO()
    plt.savefig(buf, format='svg')
    return HttpResponse(buf.getvalue(), content_type='image/svg+xml')
Ejemplo n.º 20
0
def plot_useranalytics(request, project_id):
    """ Creates a PNG image containing different plots for analzing the
    performance of individual users over time.
    """
    time_zone = pytz.utc

    userid = request.GET.get('userid', None)
    if not (userid and userid.strip()):
        raise ValueError("Need user ID")
    project = get_object_or_404(Project, pk=project_id) if project_id else None
    all_writes = get_request_bool(request.GET, 'all_writes', False)
    maxInactivity = int(request.GET.get('max_inactivity', 3))

    # Get the start date for the query, defaulting to 7 days ago.
    start_date = request.GET.get('start', None)
    if start_date:
        start_date = dateparser.parse(start_date)
        start_date = time_zone.localize(start_date)
    else:
        with timezone.override(time_zone):
            start_date = timezone.now() - timedelta(7)

    # Get the end date for the query, defaulting to now.
    end_date = request.GET.get('end', None)
    if end_date:
        end_date = dateparser.parse(end_date)
        end_date = time_zone.localize(end_date)
    else:
        with timezone.override(time_zone):
            end_date = timezone.now()

    # The API is inclusive and should return stats for the end date as
    # well. The actual query is easier with an exclusive end and therefore
    # the end date is set to the beginning of the next day.
    end_date = end_date + timedelta(days=1)

    if request.user.is_superuser or \
            project and request.user.has_perm('can_browse', project):
        f = generateReport(userid, project_id, maxInactivity, start_date,
                           end_date, all_writes)
    else:
        f = generateErrorImage('You lack permissions to view this report.')

    canvas = FigureCanvasAgg(f)
    response = HttpResponse(content_type='image/png')
    canvas.print_png(response)
    return response
Ejemplo n.º 21
0
 def get(self, request, project_id):
     """List all available NBLAST configurations.
     ---
     parameters:
       - name: project_id
         description: Project of the returned configurations
         type: integer
         paramType: path
         required: true
       - name: simple
         description: Wheter or not only ID and name should be returned
         type: bool
         paramType: form
         required: false
         defaultValue: false
     """
     simple = get_request_bool(request.query_params, 'simple', False)
     return JsonResponse([serialize_config(c, simple) for c in
             NblastConfig.objects.filter(project_id=project_id)], safe=False)
Ejemplo n.º 22
0
def user_list(request: HttpRequest) -> JsonResponse:
    """List registered users in this CATMAID instance. Must be logged in.
    An administrator can export users including their encrpyted password. This
    is meant to import users into other CATMAID instances.
    ---
    parameters:
    - name: with_passwords
      description: |
        Export encrypted passwords. Requires admin access.
      required: false
      type: boolean,
      default: false
    """
    with_passwords = get_request_bool(request.GET, 'with_passwords', False)
    if with_passwords:
        # Make sure user is an admin and part of the staff
        if not request.user.is_staff and not request.user.is_superuser:
            raise PermissionError("Superuser permissions required to export "
                                  "encrypted user passwords")

    result = []
    for u in User.objects.all().select_related('userprofile') \
            .order_by('last_name', 'first_name'):
        up = u.userprofile
        user_data = {
            "id": u.id,
            "login": u.username,
            "full_name": u.get_full_name(),
            "first_name": u.first_name,
            "last_name": u.last_name,
            "color": (up.color.r, up.color.g, up.color.b),
            "primary_group_id": up.primary_group_id,
        }
        if with_passwords:
            # Append encypted user password
            user_data['password'] = u.password
        result.append(user_data)

    return JsonResponse(result, safe=False)
Ejemplo n.º 23
0
def user_list(request):
    """List registered users in this CATMAID instance. Requires to be logged in.
    An administrator can export users including their encrpyted password. This
    is meant to import users into other CATMAID instances.
    ---
    parameters:
    - name: with_passwords
      description: |
        Export encrypted passwords. Requires admin access.
      required: false
      type: boolean,
      default: false
    """
    with_passwords = get_request_bool(request.GET, 'with_passwords', False)
    if with_passwords:
        # Make sure user is an admin and part of the staff
        if not request.user.is_staff and not request.user.is_superuser:
            raise PermissionError("Superuser permissions required to export "
                    "encrypted user passwords")

    result = []
    for u in User.objects.all().select_related('userprofile') \
            .order_by('last_name', 'first_name'):
        up = u.userprofile
        user_data = {
            "id": u.id,
            "login": u.username,
            "full_name": u.get_full_name(),
            "first_name": u.first_name,
            "last_name": u.last_name,
            "color": (up.color.r, up.color.g, up.color.b)
        }
        if with_passwords:
            # Append encypted user password
            user_data['password'] = u.password
        result.append(user_data)

    return JsonResponse(result, safe=False)
Ejemplo n.º 24
0
    def post(self, request: Request) -> Response:
        """Apply a project token.

        serializer: SimpleProjectTokenSerializer
        """
        if request.user.is_anonymous:
            raise PermissionError("Anonymous users can't apply tokens")

        token = get_object_or_404(ProjectToken,
                                  token=request.POST.get('token'))
        favorite = get_request_bool(request.POST, 'favorite', True)

        if not token.enabled:
            raise ValueError("Can't apply token")

        for perm in token.default_permissions:
            assign_perm(perm, request.user, token.project)

        upt = UserProjectToken.objects.create(
            **{
                'user': request.user,
                'project_token': token,
                'enabled': not token.needs_approval,
            })

        if favorite:
            fp = FavoriteProject.objects.create(**{
                'project_id': token.project_id,
                'user_id': request.user.id,
            })

        return Response({
            'project_id': token.project_id,
            'project_name': token.project.title,
            'permissions': token.default_permissions,
            'needs_approval': token.needs_approval,
        })
Ejemplo n.º 25
0
def get_tile(request: HttpRequest,
             project_id=None,
             stack_id=None) -> HttpResponse:
    scale = float(request.GET.get('scale', '0'))
    height = int(request.GET.get('height', '0'))
    width = int(request.GET.get('width', '0'))
    x = int(request.GET.get('x', '0'))
    y = int(request.GET.get('y', '0'))
    z = int(request.GET.get('z', '0'))
    col = request.GET.get('col', 'y')
    row = request.GET.get('row', 'x')
    file_extension = request.GET.get('file_extension', 'png')
    basename = request.GET.get('basename', 'raw')
    data_format = request.GET.get('format', 'hdf5')
    upscale = get_request_bool(request.GET, 'upscale', False)

    if data_format == 'hdf5':
        tile = get_hdf5_tile(project_id, stack_id, scale, height, width, x, y,
                             z, col, row, file_extension, basename)
    elif data_format == 'cloudvolume':
        tile = get_cloudvolume_tile(project_id,
                                    stack_id,
                                    scale,
                                    height,
                                    width,
                                    x,
                                    y,
                                    z,
                                    col,
                                    row,
                                    file_extension,
                                    basename,
                                    upscale=upscale)
    else:
        raise ValueError(f'Unknown data format request: {data_format}')

    return tile
Ejemplo n.º 26
0
def update_confidence(request, project_id=None, treenode_id=None):
    """Update confidence of edge between a node to either its parent or its
    connectors.

    The connection between a node and its parent or the connectors it is linked
    to can be rated with a confidence value in the range 1-5. If connector links
    should be updated, one can limit the affected connections to a specific
    connector. Returned is an object, mapping updated partners to their old
    confidences.
    ---
    parameters:
      - name: new_confidence
        description: New confidence, value in range 1-5
        type: integer
        required: true
      - name: to_connector
        description: Whether all linked connectors instead of parent should be updated
        type: boolean
        required: false
      - name: partner_ids
        description: Limit update to a set of connectors if to_connector is true
        type: array
        items: integer
        required: false
      - name: partner_confidences
        description: Set different confidences to connectors in <partner_ids>
        type: array
        items: integer
        required: false
    type:
        message:
            type: string
            required: true
        updated_partners:
            type: object
            required: true
    """
    tnid = int(treenode_id)
    can_edit_treenode_or_fail(request.user, project_id, tnid)
    cursor = connection.cursor()

    state.validate_state(tnid, request.POST.get('state'),
            node=True, lock=True, cursor=cursor)

    to_connector = get_request_bool(request.POST, 'to_connector', False)
    partner_ids = get_request_list(request.POST, 'partner_ids', None, int)
    partner_confidences = get_request_list(request.POST, 'partner_confidences',
            None, int)

    new_confidence = int(request.POST.get('new_confidence', 0))

    # If partner confidences are specified, make sure there are exactly as many
    # as there are partners. Otherwise validate passed in confidence
    if partner_ids and partner_confidences:
        if len(partner_confidences) != len(partner_ids):
            raise ValueError("There have to be as many partner confidences as"
                             "there are partner IDs")
    else:
        if new_confidence < 1 or new_confidence > 5:
            raise ValueError('Confidence not in range 1-5 inclusive.')
        if partner_ids:
            # Prepare new confidences for connector query
            partner_confidences = (new_confidence,) * len(partner_ids)

    if to_connector:
        if partner_ids:
            partner_template = ",".join(("(%s,%s)",) * len(partner_ids))
            partner_data = [p for v in zip(partner_ids, partner_confidences) for p in v]
            cursor.execute('''
                UPDATE treenode_connector tc
                SET confidence = target.new_confidence
                FROM (SELECT x.id, x.confidence AS old_confidence,
                             new_values.confidence AS new_confidence
                      FROM treenode_connector x
                      JOIN (VALUES {}) new_values(cid, confidence)
                      ON x.connector_id = new_values.cid
                      WHERE x.treenode_id = %s) target
                WHERE tc.id = target.id
                RETURNING tc.connector_id, tc.edition_time, target.old_confidence
            '''.format(partner_template), partner_data + [tnid])
        else:
            cursor.execute('''
                UPDATE treenode_connector tc
                SET confidence = %s
                FROM (SELECT x.id, x.confidence AS old_confidence
                      FROM treenode_connector x
                      WHERE treenode_id = %s) target
                WHERE tc.id = target.id
                RETURNING tc.connector_id, tc.edition_time, target.old_confidence
            ''', (new_confidence, tnid))
    else:
        cursor.execute('''
            UPDATE treenode t
            SET confidence = %s, editor_id = %s
            FROM (SELECT x.id, x.confidence AS old_confidence
                  FROM treenode x
                  WHERE id = %s) target
            WHERE t.id = target.id
            RETURNING t.parent_id, t.edition_time, target.old_confidence
        ''', (new_confidence, request.user.id, tnid))

    updated_partners = cursor.fetchall()
    if len(updated_partners) > 0:
        location = Location.objects.filter(id=tnid).values_list(
                'location_x', 'location_y', 'location_z')[0]
        insert_into_log(project_id, request.user.id, "change_confidence",
                location, "Changed to %s" % new_confidence)
        return JsonResponse({
            'message': 'success',
            'updated_partners': {
                r[0]: {
                    'edition_time': r[1],
                    'old_confidence': r[2]
                } for r in updated_partners
            }
        })

    # Else, signal error
    if to_connector:
        raise ValueError('Failed to update confidence between treenode %s and '
                'connector.' % tnid)
    else:
        raise ValueError('Failed to update confidence at treenode %s.' % tnid)
Ejemplo n.º 27
0
def fork(request:HttpRequest, project_id) -> JsonResponse:
    """Attempt to create a new project based on the passed in project ID.
    ---
    parameters:
    - name: name
      description: Name of new project
      required: true
      type: string
    - name: copy_volumes
      description: Whether volumes will be copied to the new project
      required: false
      type: boolean
      defaultValue: false
    """
    name = request.POST.get('name')
    if not name:
        raise ValueError('Need new project name')

    copy_volumes = get_request_bool(request.POST, 'copy_volumes', False)

    current_p = get_object_or_404(Project, pk=project_id)
    new_p = get_object_or_404(Project, pk=project_id)

    new_p.id = None
    new_p.title = name
    new_p.save()

    # Copy all project-stack links
    ps_links = ProjectStack.objects.filter(project=current_p)
    for ps in ps_links:
        ps.id = None
        ps.project = new_p
        ps.save()

    # Assign read/write/import permissions for new fork
    assign_perm('can_browse', request.user, new_p)
    assign_perm('can_annotate', request.user, new_p)
    assign_perm('can_import', request.user, new_p)
    assign_perm('can_fork', request.user, new_p)
    assign_perm('can_administer', request.user, new_p)
    assign_perm('delete_project', request.user, new_p)

    # Creat basic classes and relations
    validate_project_setup(new_p.id, request.user.id, fix=True)

    # If the source project is a tracing project, make the clone as well one.
    # A local import is used here to avoid a high potential for circular imports.
    from catmaid.control.tracing import check_tracing_setup, setup_tracing
    if check_tracing_setup(project_id):
        setup_tracing(new_p.id)

    if copy_volumes:
        cursor = connection.cursor()
        cursor.execute("""
            INSERT INTO catmaid_volume (user_id, project_id, creation_time,
                    edition_time, editor_id, name, comment, geometry, area,
                    volume, watertight, meta_computed)
            SELECT user_id, %(new_project_id)s, creation_time, edition_time,
                    editor_id, name, comment, geometry, area, volume, watertight,
                    meta_computed
            FROM catmaid_volume
            WHERE project_id = %(project_id)s;
        """, {
            'project_id': project_id,
            'new_project_id': new_p.id
        })

    return JsonResponse({
        'new_project_id': new_p.id,
        'n_copied_stack_links': len(ps_links),
    })
Ejemplo n.º 28
0
def projects(request:HttpRequest) -> JsonResponse:
    """ List projects visible to the requesting user.
    ---
    models:
      project_api_stack_element:
        id: project_api_stack_element
        properties:
          id:
            type: integer
            description: Stack ID
            required: true
          title:
            type: string
            description: Stack title
            required: true
          comment:
            type: string
            description: Comment on stack
            required: true
      project_api_stackgroup_element:
        id: project_api_stackgroup_element
        properties:
          id:
            type: integer
            description: Stack group ID
            required: true
          title:
            type: string
            description: Stack group title
            required: true
          comment:
            type: string
            description: Comment on stack group
            required: true
      project_api_element:
        id: project_api_element
        properties:
          id:
            type: integer
            description: Project ID
            required: true
          title:
            type: string
            description: Project title
            required: true
          stacks:
            type: array
            items:
              $ref: project_api_stack_element
            required: true
          stackgroups:
            type: array
            items:
              $ref: project_api_stackgroup_element
            required: true
    parameters:
    - name: has_tracing_data
      description: Return only projects that have tracing data
      required: false
      defaultValue: false
      type: boolean
    - name: with_mirrors
      description: Whether to include stack mirror data in the response.
      required: false
      defaultValue: false
      type: boolean
    type:
      projects:
        type: array
        items:
          $ref: project_api_element
        required: true
    """

    # Get all projects that are visisble for the current user
    projects = get_project_qs_for_user(request.user).order_by('title')
    has_tracing_data = get_request_bool(request.GET, 'has_tracing_data', False)
    with_mirrors = get_request_bool(request.GET, 'with_mirrors', False)

    if has_tracing_data:
        projects = projects.annotate(
            no_locations=~Exists(Location.objects.filter(project=OuterRef('pk')))
        ).filter(
            no_locations=False
        )

    if 0 == len(projects):
        return JsonResponse([], safe=False)

    cursor = connection.cursor()
    user_project_ids = [p.id for p in projects]

    tracing_data_join = ''
    extra_where = []
    if has_tracing_data:
        tracing_data_join = '''
            INNER JOIN LATERAL (
                SELECT EXISTS (SELECT 1 FROM location WHERE project_id = ps.project_id)
            ) sub(has_tracing_data)
                ON TRUE
        '''
        extra_where.append('''
            sub.has_tracing_data = True
        ''')

    project_stack_mapping:Dict = dict()
    if with_mirrors:
        cursor.execute("""
            SELECT DISTINCT ON (ps.project_id, ps.stack_id) ps.project_id,
                ps.stack_id, s.title, s.comment, s.dimension, sm.mirrors
            FROM project_stack ps
            JOIN UNNEST(%(user_project_ids)s::integer[]) user_project(id)
                ON ps.project_id = user_project.id
            JOIN stack s
                ON ps.stack_id = s.id
            JOIN LATERAL (
                SELECT COALESCE(json_agg(row_to_json(stack_mirror) ORDER BY position ASC), '[]'::json) AS mirrors
                FROM stack_mirror
                WHERE stack_id = s.id
            ) sm
              ON TRUE;
        """, {
            'user_project_ids': user_project_ids,
        })
    else:
        cursor.execute("""
            SELECT DISTINCT ON (ps.project_id, ps.stack_id) ps.project_id,
            ps.stack_id, s.title, s.comment, s.dimension
            FROM project_stack ps
            JOIN UNNEST(%(user_project_ids)s::integer[]) user_project(id)
                ON ps.project_id = user_project.id
            JOIN stack s
                ON ps.stack_id = s.id
        """, {
            'user_project_ids': user_project_ids,
        })

    for row in cursor.fetchall():
        stacks = project_stack_mapping.get(row[0])
        if not stacks:
            stacks = []
            project_stack_mapping[row[0]] = stacks
        stack_data = {
            'id': row[1],
            'title': row[2],
            'comment': row[3],
            'dimensions': [row[4].x, row[4].y, row[4].z],
        }
        if with_mirrors:
            stack_data['mirrors'] = row[5]
        stacks.append(stack_data)

    # Get all stack groups for this project
    project_stack_groups:Dict = dict()
    cursor.execute("""
        SELECT DISTINCT ps.project_id, sg.id, sg.title, sg.comment
        FROM stack_group sg
        JOIN stack_stack_group ssg
          ON ssg.stack_group_id = sg.id
        JOIN project_stack ps
          ON ps.stack_id = ssg.stack_id
        INNER JOIN UNNEST(%(user_project_ids)s::integer[]) user_project(id)
          ON ps.project_id = user_project.id
    """, {
        'user_project_ids': user_project_ids,
    })
    for row in cursor.fetchall():
        groups = project_stack_groups.get(row[0])
        if not groups:
            groups = []
            project_stack_groups[row[0]] = groups
        groups.append({
            'id': row[1],
            'title': row[2],
            'comment': row[3],
        })

    result:List = []
    empty_tuple:Tuple = tuple()
    for p in projects:
        stacks = project_stack_mapping.get(p.id, empty_tuple)
        stackgroups = project_stack_groups.get(p.id, empty_tuple)

        result.append({
            'id': p.id,
            'title': p.title,
            'stacks': stacks,
            'stackgroups': stackgroups
        })

    return JsonResponse(result, safe=False, json_dumps_params={
        'sort_keys': True,
        'indent': 4
    })
Ejemplo n.º 29
0
def skeleton_graph(request, project_id=None):
    """Get a synaptic graph between skeletons compartmentalized by confidence.

    Given a set of skeletons, retrieve presynaptic-to-postsynaptic edges
    between them, annotated with count. If a confidence threshold is
    supplied, compartmentalize the skeletons at edges in the arbor
    below that threshold and report connectivity based on these
    compartments.

    When skeletons are split into compartments, nodes in the graph take an
    string ID like ``{skeleton_id}_{compartment #}``.
    ---
    parameters:
        - name: skeleton_ids[]
          description: IDs of the skeletons to graph
          required: true
          type: array
          items:
            type: integer
          paramType: form
        - name: confidence_threshold
          description: Confidence value below which to segregate compartments
          type: integer
          paramType: form
        - name: bandwidth
          description: Bandwidth in nanometers
          type: number
        - name: cable_spread
          description: Cable spread in nanometers
          type: number
        - name: expand[]
          description: IDs of the skeletons to expand
          type: array
          items:
            type: integer
        - name: link_types[]
          description: IDs of link types to respect
          type: array
          items:
            type: string
    models:
      skeleton_graph_edge:
        id: skeleton_graph_edge
        properties:
        - description: ID of the presynaptic skeleton or compartment
          type: integer|string
          required: true
        - description: ID of the postsynaptic skeleton or compartment
          type: integer|string
          required: true
        - description: number of synapses constituting this edge
          $ref: skeleton_graph_edge_count
          required: true
      skeleton_graph_edge_count:
        id: skeleton_graph_edge_count
        properties:
        - description: Number of synapses with confidence 1
          type: integer
          required: true
        - description: Number of synapses with confidence 2
          type: integer
          required: true
        - description: Number of synapses with confidence 3
          type: integer
          required: true
        - description: Number of synapses with confidence 4
          type: integer
          required: true
        - description: Number of synapses with confidence 5
          type: integer
          required: true
      skeleton_graph_intraedge:
        id: skeleton_graph_intraedge
        properties:
        - description: ID of the presynaptic skeleton or compartment
          type: integer|string
          required: true
        - description: ID of the postsynaptic skeleton or compartment
          type: integer|string
          required: true
    type:
      edges:
        type: array
        items:
          $ref: skeleton_graph_edge
        required: true
      nodes:
        type: array
        items:
          type: integer|string
        required: false
      intraedges:
        type: array
        items:
          $ref: skeleton_graph_intraedge
        required: false
      branch_nodes:
        type: array
        items:
          type: integer|string
        required: false
    """
    compute_risk = 1 == int(request.POST.get('risk', 0))
    if compute_risk:
        # TODO port the last bit: computing the synapse risk
        from graph import skeleton_graph as slow_graph
        return slow_graph(request, project_id)

    project_id = int(project_id)
    skeleton_ids = set(int(v) for k,v in request.POST.items() if k.startswith('skeleton_ids['))
    confidence_threshold = min(int(request.POST.get('confidence_threshold', 0)), 5)
    bandwidth = float(request.POST.get('bandwidth', 0)) # in nanometers
    cable_spread = float(request.POST.get('cable_spread', 2500)) # in nanometers
    path_confluence = int(request.POST.get('path_confluence', 10)) # a count
    expand = set(int(v) for k,v in request.POST.items() if k.startswith('expand['))
    with_overall_counts = get_request_bool(request.POST, 'with_overall_counts', False)
    expand = set(int(v) for k,v in request.POST.items() if k.startswith('expand['))
    link_types = get_request_list(request.POST, 'link_types', None)

    graph = _skeleton_graph(project_id, skeleton_ids,
        confidence_threshold, bandwidth, expand, compute_risk, cable_spread,
        path_confluence, with_overall_counts, link_types=link_types)

    if not graph:
        raise ValueError("Could not compute graph")

    return JsonResponse(graph)
Ejemplo n.º 30
0
def stats_nodecount(request, project_id=None):
    """ Get the total number of created nodes per user.
    ---
    parameters:
    - name: with_imports
      description: |
        Whether data added through imports should be respected.
      required: false
      default: false
      type: boolean
      paramType: form
    """
    cursor = connection.cursor()
    names = dict(User.objects.values_list('id', 'username'))
    with_imports = get_request_bool(request.GET, 'with_imports', False)

    cursor.execute('''
        WITH precomputed AS (
            SELECT user_id,
                MAX(date) AS date,
                SUM(n_treenodes) AS n_treenodes
            FROM catmaid_stats_summary
            WHERE project_id = %(project_id)s
            GROUP BY 1
        ),
        last_precomputation AS (
            SELECT COALESCE(
                date_trunc('hour', MAX(date)) + interval '1 hour',
                NULL) AS max_date
            FROM precomputed
        ),
        result_with_precomputation AS (
            SELECT p.user_id AS user_id,
                p.n_treenodes AS n_treenodes
            FROM precomputed p

            -- Don't expect duplicates, when adding rows for nodes traced after the
            -- last precomputation. This is only executed if there actually was a
            -- precomputation (max_Date is not null).
            UNION ALL
            SELECT t.user_id AS user_id,
                count(*) AS n_treenodes
            FROM treenode t, last_precomputation
            WHERE t.project_id = %(project_id)s
            AND last_precomputation.max_date IS NOT NULL
            AND t.creation_time >= last_precomputation.max_date
            GROUP BY t.user_id
        )
        SELECT user_id, SUM(n_treenodes)::float
        FROM result_with_precomputation, last_precomputation
        WHERE last_precomputation.max_date IS NOT NULL
        GROUP BY user_id

        -- If there was no precomputation (max_date is null), do a simpler
        -- counting that doesn't involve date comparisons. In this case
        -- duplicates are impossible.
        UNION ALL
        SELECT user_id, count(*)::float
        FROM treenode, last_precomputation
        WHERE project_id = %(project_id)s
        AND last_precomputation IS NULL
        GROUP BY user_id
    ''', dict(project_id=int(project_id)))

    node_stats = dict(cursor.fetchall())

    if not with_imports:
        # In case imports should be excluded, subtract the number imported nodes
        # for each entry. Otherwise the regular node count doesn't differentiate
        # between imported and createad nodes. This flag requires history
        # tracking to be enabled to work reliably.
        cursor.execute('''
            WITH precomputed AS (
                SELECT user_id,
                    date,
                    SUM(n_imported_treenodes) AS n_imported_treenodes
                FROM catmaid_stats_summary
                WHERE project_id = %(project_id)s
                -- This is required to not just take the last available cache
                -- entry, which might not contain a valid precomputed import
                -- cache field.
                AND n_imported_treenodes > 0
                GROUP BY 1, 2
            ),
            last_precomputation AS (
                SELECT COALESCE(
                    -- Select first start date after last precomputed hour/bucket
                    date_trunc('hour', MAX(date)) + interval '1 hour',
                    '-infinity') AS max_date
                FROM precomputed
            ),
            transactions AS (
                SELECT cti.transaction_id, cti.execution_time
                FROM last_precomputation
                JOIN catmaid_transaction_info cti
                    ON cti.execution_time >= last_precomputation.max_date
                WHERE cti.project_id = %(project_id)s
                AND label = 'skeletons.import'
            ),
            all_treenodes AS (
                SELECT p.user_id AS user_id,
                    p.n_imported_treenodes AS n_imported_treenodes
                FROM precomputed p

                -- Don't expect duplicates
                UNION ALL

                SELECT sorted_row_history.user_id AS user_id,
                    1 AS n_imported_treenodes
                FROM (
                    SELECT t.id, t.user_id,
                        ROW_NUMBER() OVER(PARTITION BY t.id ORDER BY t.edition_time) AS n
                    FROM last_precomputation,
                       transactions tx
                    JOIN treenode__with_history t
                    ON t.txid = tx.transaction_id
                    WHERE t.creation_time = tx.execution_time
                    AND t.creation_time >= last_precomputation.max_date
                ) sorted_row_history
                WHERE sorted_row_history.n = 1
            )
            SELECT user_id,
                -- Return float to make python side arithmetic easier
                SUM(n_imported_treenodes)::float AS n_imported_treenodes
            FROM all_treenodes
            GROUP BY user_id
        ''', dict(project_id=int(project_id)))

        for user_id, n_imported_nodes in cursor.fetchall():
            created_nodes = node_stats.get(user_id)
            if created_nodes:
                # The lower boundary of zero shouldn't be needed, but due to the
                # fact that general node counting doesn't take history into
                # account (deleted nodes are not counted), there are corner
                # cases in which more nodes have been imported than there are
                # created (and still available).
                node_stats[user_id] = max(0, created_nodes - n_imported_nodes)

    # Both SUM and COUNT are represented as floating point number in the
    # response, which works better with JSON than Decimal (which is converted to
    # a string by the JSON encoder).
    return JsonResponse(node_stats)
Ejemplo n.º 31
0
def label_update(request, project_id=None, location_id=None, ntype=None):
    """ location_id is the ID of a treenode or connector.
        ntype is either 'treenode' or 'connector'. """
    labeled_as_relation = Relation.objects.get(project=project_id, relation_name='labeled_as')
    p = get_object_or_404(Project, pk=project_id)

    # TODO will FAIL when a tag contains a coma by itself
    new_tags = request.POST['tags'].split(',')
    delete_existing_labels = get_request_bool(request.POST, 'delete_existing', True)

    kwargs = {'relation': labeled_as_relation,
              'class_instance__class_column__class_name': 'label'}

    table = get_link_model(ntype)
    if 'treenode' == ntype:
        kwargs['treenode__id'] = location_id
        node = Treenode.objects.get(id=location_id)
    elif 'connector' == ntype:
        kwargs['connector__id'] = location_id
        node = Connector.objects.get(id=location_id)

    if not table:
        raise Http404('Unknown node type: "%s"' % (ntype,))

    # Get the existing list of tags for the tree node/connector and delete any
    # that are not in the new list.
    existing_labels = table.objects.filter(**kwargs).select_related('class_instance')
    existing_names = set(ele.class_instance.name for ele in existing_labels)
    duplicate_labels = table.objects.filter(**kwargs).exclude(class_instance__name__in=new_tags).select_related('class_instance')

    other_labels = []
    deleted_labels = []
    if delete_existing_labels:
        # Iterate over all labels that should get deleted to check permission
        # on each one. Remember each label that couldn't be deleted in the
        # other_labels array.
        for l in duplicate_labels:
            try:
                can_edit_or_fail(request.user, l.id, table._meta.db_table)
                if remove_label(l.id, ntype):
                    deleted_labels.append(l)
                else:
                    other_labels.append(l)
            except:
                other_labels.append(l)

        # Create change requests for labels associated to the treenode by other users
        for label in other_labels:
            change_request_params = {
                'type': 'Remove Tag',
                'project': p,
                'user': request.user,
                'recipient': node.user,
                'location': Double3D(node.location_x, node.location_y, node.location_z),
                ntype: node,
                'description': "Remove tag '%s'" % label.class_instance.name,
                'validate_action': 'from catmaid.control.label import label_exists\n' +
                                   'is_valid = label_exists(%s, "%s")' % (str(label.id), ntype),
                'approve_action': 'from catmaid.control.label import remove_label\n' +
                                  'remove_label(%s, "%s")' % (str(label.id), ntype)
            }
            ChangeRequest(**change_request_params).save()

    # Add any new labels.
    label_class = Class.objects.get(project=project_id, class_name='label')
    kwargs = {'user': request.user,
              'project': p,
              'relation': labeled_as_relation,
              ntype: node}

    new_labels = []
    for tag_name in new_tags:
        if len(tag_name) > 0 and tag_name not in existing_names:
            # Make sure the tag instance exists
            existing_tags = tuple(ClassInstance.objects.filter(
                project=p,
                name=tag_name,
                class_column=label_class))
            if len(existing_tags) < 1:
                tag = ClassInstance(
                    project=p,
                    name=tag_name,
                    user=request.user,
                    class_column=label_class)
                tag.save()
            else:
                tag = existing_tags[0]

            # Associate the tag with the treenode/connector.
            kwargs['class_instance'] = tag
            tci = table(**kwargs) # creates new TreenodeClassInstance or ConnectorClassInstance
            tci.save()
            new_labels.append(tag_name)

            if node.user != request.user:
                # Inform the owner of the node that the tag was added and give them the option of removing it.
                change_request_params = {
                    'type': 'Add Tag',
                    'description': 'Added tag \'' + tag_name + '\'',
                    'project': p,
                    'user': request.user,
                    'recipient': node.user,
                    'location': Double3D(node.location_x, node.location_y, node.location_z),
                    ntype: node,
                    'validate_action': 'from catmaid.control.label import label_exists\n' +
                                       'is_valid = label_exists(%s, "%s")' % (str(tci.id), ntype),
                    'reject_action': 'from catmaid.control.label import remove_label\n' +
                                     'remove_label(%s, "%s")' % (str(tci.id), ntype)
                }
                ChangeRequest(**change_request_params).save()

    response = {
        'message': 'success',
        'new_labels': new_labels,
        'duplicate_labels': [l.class_instance.name for l in duplicate_labels
                             if l not in deleted_labels],
        'deleted_labels': [l.class_instance.name for l in deleted_labels],
    }

    # Check if any labels on this node violate cardinality restrictions on
    # its skeleton.
    if 'treenode' == ntype:
        limited_labels = {l: SKELETON_LABEL_CARDINALITY[l] for l in new_tags if l in SKELETON_LABEL_CARDINALITY}

        if limited_labels:
            ll_names, ll_maxes = zip(*limited_labels.items())
            cursor = connection.cursor()
            cursor.execute("""
                SELECT
                  ll.name,
                  COUNT(tci.treenode_id),
                  ll.max
                FROM
                  class_instance ci,
                  treenode_class_instance tci,
                  treenode tn,
                  unnest(%s::text[], %s::integer[]) AS ll (name, max)
                WHERE ci.name = ll.name
                  AND ci.project_id = %s
                  AND ci.class_id = %s
                  AND tci.class_instance_id = ci.id
                  AND tci.relation_id = %s
                  AND tn.id = tci.treenode_id
                  AND tn.skeleton_id = %s
                GROUP BY
                  ll.name, ll.max
                HAVING
                  COUNT(tci.treenode_id) > ll.max
            """, (
                list(ll_names),
                list(ll_maxes),
                p.id,
                label_class.id,
                labeled_as_relation.id,
                node.skeleton_id))

            if cursor.rowcount:
                response['warning'] = 'The skeleton has too many of the following tags: ' + \
                    ', '.join('{0} ({1}, max. {2})'.format(*row) for row in cursor.fetchall())

    return JsonResponse(response)
Ejemplo n.º 32
0
def delete_sampler(request, project_id, sampler_id):
    """Delete a sampler if permissions allow it.

    If the sampler was created with allowing the creation of new boundary nodes,
    these nodes are removed by default if they have not been modified since
    their insertion. This can optionally be disabled using the
    <delete_created_nodes> parameter.
    ---
    parameters:
     - name: delete_created_nodes
       description: |
         Optional flag to disable automatic removal of untouched
         nodes created for this sampler's intervals.
       type: boolean
       default: true
       paramType: form
       required: false
    """
    can_edit_or_fail(request.user, sampler_id, "catmaid_sampler")
    sampler = Sampler.objects.get(id=sampler_id)

    n_deleted_nodes = 0
    delete_created_nodes = get_request_bool(request.POST, 'delete_created_nodes', True)
    if delete_created_nodes and sampler.create_interval_boundaries:
        labeled_as_relation = Relation.objects.get(project=project_id, relation_name='labeled_as')
        label_class = Class.objects.get(project=project_id, class_name='label')
        label_class_instance = ClassInstance.objects.get(project=project_id,
                class_column=label_class, name=SAMPLER_CREATED_CLASS)
        # If the sampler was parameterized to created interval boundary nodes,
        # these nodes can now be removed if they are still collinear with their
        # child and parent node and have not been touched. These nodes are all
        # nodes that are referenced by intervals of this sampler that have the
        # SAMPLER_CREATED_CLASS tag with their creation time being the same as the
        # edition time. Such nodes can only be sampler interval start/end nodes.
        params = {
            'project_id': project_id,
            'sampler_id': sampler_id,
            'labeled_as_rel': labeled_as_relation.id,
            'label_class': label_class.id,
            'label_class_instance': label_class_instance.id
        }
        cursor = connection.cursor()

        # Get all created sampler interval boundary treenodes that have been
        # created during sampler creation. The result will also contain parent
        # and child locations. We need to set extra_float_digits to get enough
        # precision for the location data to do a collinearity test.
        cursor.execute("""
            SET extra_float_digits = 3;

            WITH sampler_treenode AS (
                -- Get all treenodes linked to intervals of this sampler. Only
                -- select those nodes that are referenced by no other sampler
                -- (using an anti join).
                SELECT DISTINCT all_added_nodes.id
                FROM (
                    SELECT DISTINCT UNNEST(ARRAY[i.start_node_id, i.end_node_id]) AS id
                    FROM catmaid_samplerinterval i
                    JOIN catmaid_samplerdomain d
                        ON i.domain_id = d.id
                    WHERE d.sampler_id = %(sampler_id)s
                ) all_added_nodes
                JOIN catmaid_samplerinterval csi
                    ON csi.start_node_id = all_added_nodes.id
                    OR csi.end_node_id = all_added_nodes.id
                JOIN catmaid_samplerdomain csd
                    ON csd.id = csi.domain_id
                GROUP BY all_added_nodes.id
                HAVING COUNT(DISTINCT csd.sampler_id) = 1
            ), sampler_created_treenode AS (
                -- Find all treenodes that were created by the sampler and are
                -- undmodified.
                SELECT st.id
                FROM sampler_treenode st
                JOIN treenode_class_instance tci
                    ON st.id = tci.treenode_id
                WHERE tci.relation_id = %(labeled_as_rel)s
                AND tci.class_instance_id = %(label_class_instance)s
            )
            SELECT
                t.id, t.location_x, t.location_y, t.location_z,
                c.id, c.location_x, c.location_y, c.location_z,
                p.id, p.location_x, p.location_y, p.location_z
            FROM (
                -- Make sure we look only at nodes that don't have multiple nodes.
                SELECT st.id
                FROM treenode tt
                JOIN sampler_created_treenode st
                    ON tt.parent_id = st.id
                GROUP BY st.id
                HAVING count(*) = 1

            ) non_branch_treenodes(id)
            JOIN treenode t
                ON t.id = non_branch_treenodes.id
            JOIN treenode p
                ON p.id = t.parent_id
            JOIN treenode c
                ON c.parent_id = t.id
            WHERE t.project_id = %(project_id)s;
        """, params)

        created_treenodes = [r for r in cursor.fetchall()]

        if created_treenodes:
            added_node_index = dict((n[0], n) for n in created_treenodes)
            # Find those created treenodes that are collinear with their parent and
            # child node. If they are, remove those nodes. Ideally, we would move
            # the collinearity test into SQL as well.
            nodes_to_remove = []
            parents_to_fix = []
            child, node, parent = Point3D(0, 0, 0), Point3D(0, 0, 0), Point3D(0, 0, 0)
            for n in created_treenodes:
                n_id, node.x, node.y, node.z = n[0], n[1], n[2], n[3]
                c_id, child.x, child.y, child.z = n[4], n[5], n[6], n[7]
                p_id, parent.x, parent.y, parent.z = n[8], n[9], n[10], n[11]

                child_is_original_node = c_id not in added_node_index
                if is_collinear(child, parent, node, True, 1.0):
                    nodes_to_remove.append(n_id)
                    # Only update nodes that don't get deleted anyway
                    if child_is_original_node:
                        parents_to_fix.append((c_id, p_id))
                else:
                    parents_to_fix.append((n_id, p_id))

            # Update parent in formation in parent relation updates. If present
            # parent IDs point to a removed node, the next real parent will be
            # used instead.
            parent_update = []
            for n, (c_id, p_id) in enumerate(parents_to_fix):
                parent_is_persistent = p_id not in added_node_index
                if parent_is_persistent:
                    parent_update.append((c_id, p_id))
                else:
                    # Find next existing node upstream
                    new_parent_id = p_id
                    while not parent_is_persistent:
                        parent_is_persistent = new_parent_id not in nodes_to_remove
                        node_data = added_node_index.get(new_parent_id)
                        # An added node would be used if it is not removed, e.g.
                        # du to not being collinear anymore.
                        if node_data and not parent_is_persistent:
                            new_parent_id = node_data[8]
                        else:
                            parent_update.append((c_id, new_parent_id))

            if nodes_to_remove:
                query_parts = []
                params = []
                if parent_update:
                    update_nodes_template = ",".join("(%s, %s)" for _ in parent_update)
                    update_nodes_flattened = list(chain.from_iterable(parent_update))
                    query_parts.append("""
                        UPDATE treenode
                        SET parent_id = nodes_to_update.parent_id
                        FROM (VALUES {}) nodes_to_update(child_id, parent_id)
                        WHERE treenode.id = nodes_to_update.child_id;
                    """.format(update_nodes_template))
                    params = update_nodes_flattened

                delete_nodes_template = ",".join("(%s)" for _ in nodes_to_remove)
                query_parts.append("""
                    DELETE
                    FROM treenode
                    WHERE id IN (
                        SELECT t.id
                        FROM treenode t
                        JOIN (VALUES {}) to_delete(id)
                            ON t.id = to_delete.id
                    )
                    RETURNING id;
                """.format(delete_nodes_template))
                params = params + nodes_to_remove

                cursor.execute("\n".join(query_parts), params)
                deleted_node_ids = [r[0] for r in cursor.fetchall()]
                n_deleted_nodes = len(deleted_node_ids)

    sampler.delete()

    return JsonResponse({
        'deleted_sampler_id': sampler_id,
        'deleted_interval_nodes': n_deleted_nodes
    })
Ejemplo n.º 33
0
def list_samplers(request, project_id):
    """Get a collection of available samplers.

    Optionally, the "skeleton_ids" parameter can provide a list of skeleton IDs.
    If this is the case, only samplers for the respective skeletons are returned.
    ---
    parameters:
     - name: skeleton_ids
       description: Optional skeleton IDs to constrain result set to.
       type: integer
       paramType: form
       required: false
     - name: with_domains
       description: Optional flag to include all domains of all result sampler results.
       type: boolean
       paramType: form
       required: false
     - name: with_intervals
       description: Optional flag to include all intervals of all domains. Implies with_domains.
       type: boolean
       paramType: form
       required: false
       default: false
    models:
      sampler_entity:
        id: sampler_entity
        description: A result sampler.
        properties:
          id:
            type: integer
            description: Id of sampler
          creation_time:
            type: string
            description: The point in time a sampler the created
            required: true
          edition_time:
            type: string
            description: The last point in time a sampler edited.
            required: true
          interval_length:
            type: integer
            description: The length of individual sampler intervals for this sampler.
            required: true
          interval_error:
            type: float
            description: The maximum allowed error of a single interval.
            required: true
          state_id:
            type: integer
            description: ID of state the sampler is in.
            required: true
          skeleton_id:
            type: integer
            description: Skeleton this sampler belongs to
            required: true
          user_id:
            type: integer
            description: User ID of sampler creator.
            required: true
    type:
      samplers:
        type: array
        items:
          $ref: sampler_entity
        description: Matching samplers
        required: true
    """
    skeleton_ids = get_request_list(request.GET, 'skeleton_ids', map_fn=int)
    with_intervals = get_request_bool(request.GET, 'with_intervals', False)
    with_domains = with_intervals or (get_request_bool(request.GET, 'with_domains', False))

    samplers = Sampler.objects.all()
    if skeleton_ids:
        samplers = samplers.filter(skeleton_id__in=skeleton_ids)

    domains = defaultdict(list)
    if with_domains:
        domain_query = SamplerDomain.objects.filter(sampler__in=samplers) \
                .prefetch_related('samplerdomainend_set')
        if with_intervals:
            domain_query = domain_query.prefetch_related('samplerinterval_set')

        for domain in domain_query:
            domain_ends = domain.samplerdomainend_set.all()
            domain_data = {
                "id": domain.id,
                "sampler_id": domain.sampler_id,
                "type_id": domain.domain_type_id,
                "parent_interval": domain.parent_interval_id,
                "start_node_id": domain.start_node_id,
                "user_id": domain.user_id,
                "project_id": domain.project_id,
                "ends": [{
                    "id": e.id,
                    "node_id": e.end_node_id
                } for e in domain_ends]
            }
            if with_intervals:
                domain_data['intervals'] = [[
                    i.id, i.start_node_id, i.end_node_id, i.interval_state_id
                ] for i in domain.samplerinterval_set.all()]

            domains[domain.sampler_id].append(domain_data)

    def exportSampler(s):
        s = {
           'id': s.id,
           'creation_time': float(s.creation_time.strftime('%s')),
           'edition_time': float(s.edition_time.strftime('%s')),
           'interval_length': s.interval_length,
           'interval_error': s.interval_error,
           'leaf_segment_handling': s.leaf_segment_handling,
           'merge_limit': s.merge_limit,
           'review_required': s.review_required,
           'create_interval_boundaries': s.create_interval_boundaries,
           'state_id': s.sampler_state_id,
           'skeleton_id': s.skeleton_id,
           'user_id': s.user_id,
        }

        if with_domains:
            s['domains'] = domains.get(s['id'], [])

        return s

    return JsonResponse([exportSampler(s) for s in samplers], safe=False)
Ejemplo n.º 34
0
    def get(self, request, project_id, pointcloud_id):
        """Return a point cloud.
        parameters:
          - name: project_id
            description: Project of the returned point cloud
            type: integer
            paramType: path
            required: true
          - name: simple
            description: Wheter or not only ID and name should be returned
            type: bool
            paramType: form
            required: false
            defaultValue: false
          - name: with_images
            description: Wheter linked images should returned as well.
            type: bool
            paramType: form
            required: false
            defaultValue: false
          - name: with_points
            description: Wheter linked points should returned as well.
            type: bool
            paramType: form
            required: false
            defaultValue: false
        """
        with_images = get_request_bool(request.query_params, 'with_images', False)
        with_points = get_request_bool(request.query_params, 'with_points', False)
        simple = get_request_bool(request.query_params, 'simple', False)
        sample_ratio = float(request.query_params.get('sample_ratio', '1.0'))
        simple = get_request_bool(request.query_params, 'simple', False)

        pointcloud = PointCloud.objects.get(pk=pointcloud_id, project_id=project_id)
        pointcloud_data = serialize_pointcloud(pointcloud, simple)

        # Check permissions. If there are no read permission assigned at all,
        # everyone can read.
        if 'can_read' not in get_perms(request.user, pointcloud) and \
                len(get_users_with_perms(pointcloud)) > 0:
            raise PermissionError('User "{}" not allowed to read point cloud #{}'.format(
                    request.user.username, pointcloud.id))

        if with_images:
            images = [serialize_image_data(i) for i in pointcloud.images.all()]
            pointcloud_data['images'] = images

        if with_points:
            if sample_ratio == 1.0:
                points = [serialize_point(p, compact=True) for p in pointcloud.points.all()]
                pointcloud_data['points'] = points
            else:
                n_points = PointCloudPoint.objects.filter(pointcloud_id=pointcloud.id).count()
                n_sample = int(n_points * sample_ratio)
                cursor = connection.cursor()
                # Select a random sample of N points in a repeatable fashion.
                cursor.execute("""
                    SELECT setseed(0);
                    SELECT id, location_x, location_y, location_z
                    FROM point p
                    JOIN (
                        SELECT pcp.point_id
                        FROM pointcloud_point pcp
                        WHERE pcp.pointcloud_id = %(pointcloud_id)s
                        ORDER BY random()
                    ) ordered_points(id)
                        USING(id)
                    LIMIT %(n_sample)s
                """, {
                    'pointcloud_id': pointcloud.id,
                    'n_sample': n_sample
                })
                pointcloud_data['points'] = cursor.fetchall()

        return JsonResponse(pointcloud_data)
Ejemplo n.º 35
0
def list_samplers(request: HttpRequest, project_id) -> JsonResponse:
    """Get a collection of available samplers.

    Optionally, the "skeleton_ids" parameter can provide a list of skeleton IDs.
    If this is the case, only samplers for the respective skeletons are returned.
    ---
    parameters:
     - name: skeleton_ids
       description: Optional skeleton IDs to constrain result set to.
       type: integer
       paramType: form
       required: false
     - name: with_domains
       description: Optional flag to include all domains of all result sampler results.
       type: boolean
       paramType: form
       required: false
     - name: with_intervals
       description: Optional flag to include all intervals of all domains. Implies with_domains.
       type: boolean
       paramType: form
       required: false
       default: false
    models:
      sampler_entity:
        id: sampler_entity
        description: A result sampler.
        properties:
          id:
            type: integer
            description: Id of sampler
          creation_time:
            type: string
            description: The point in time a sampler the created
            required: true
          edition_time:
            type: string
            description: The last point in time a sampler edited.
            required: true
          interval_length:
            type: integer
            description: The length of individual sampler intervals for this sampler.
            required: true
          interval_error:
            type: float
            description: The maximum allowed error of a single interval.
            required: true
          state_id:
            type: integer
            description: ID of state the sampler is in.
            required: true
          skeleton_id:
            type: integer
            description: Skeleton this sampler belongs to
            required: true
          user_id:
            type: integer
            description: User ID of sampler creator.
            required: true
    type:
      samplers:
        type: array
        items:
          $ref: sampler_entity
        description: Matching samplers
        required: true
    """
    skeleton_ids = get_request_list(request.GET, 'skeleton_ids', map_fn=int)
    with_intervals = get_request_bool(request.GET, 'with_intervals', False)
    with_domains = with_intervals or (get_request_bool(request.GET,
                                                       'with_domains', False))

    samplers = Sampler.objects.all()
    if skeleton_ids:
        samplers = samplers.filter(skeleton_id__in=skeleton_ids)

    domains: DefaultDict[Any, List] = defaultdict(list)
    if with_domains:
        domain_query = SamplerDomain.objects.filter(sampler__in=samplers) \
                .prefetch_related('samplerdomainend_set')
        if with_intervals:
            domain_query = domain_query.prefetch_related('samplerinterval_set')

        for domain in domain_query:
            domain_data = serialize_domain(domain,
                                           with_ends=True,
                                           with_intervals=with_intervals)
            domains[domain.sampler_id].append(domain_data)

    def exportSampler(s) -> Dict[str, Any]:
        s = serialize_sampler(s)

        if with_domains:
            s['domains'] = domains.get(s['id'], [])

        return s

    return JsonResponse([exportSampler(s) for s in samplers], safe=False)
Ejemplo n.º 36
0
def user_list(request:HttpRequest) -> JsonResponse:
    """List registered users in this CATMAID instance. If accessed by an
    anonymous user, only the anonymous user is returned unless the anonymous
    user has can_browse permissions, which allows it to retrieve all users.

    If the settings.py setting PROJECT_TOKEN_USER_VISIBILITY = True, logged in
    users will only see those users that share project tokens with them.

    An administrator can export users including their salted and encrpyted
    password. This is meant to import users into other CATMAID instances.
    ---
    parameters:
    - name: with_passwords
      description: |
        Export encrypted passwords. Requires admin access.
      required: false
      type: boolean,
      default: false
    """
    with_passwords = get_request_bool(request.GET, 'with_passwords', False)
    if with_passwords:
        # Make sure user is an admin and part of the staff
        if not request.user.is_staff and not request.user.is_superuser:
            raise PermissionError("Superuser permissions required to export "
                    "encrypted user passwords")

    user = request.user
    anon_user = get_anonymous_user()

    user_list = []
    # Super users get to see all users, regardless of the backend setting.
    if settings.PROJECT_TOKEN_USER_VISIBILITY and not user.is_superuser:
        cursor = connection.cursor()
        cursor.execute("""
            WITH project_tokens AS (
                SELECT DISTINCT project_token_id AS id
                FROM catmaid_user_project_token
                WHERE user_id = %(user_id)s

                UNION

                SELECT id
                FROM catmaid_project_token
                WHERE user_id = %(user_id)s
            )
            SELECT DISTINCT ON (au.id) au.id, au.username, au.first_name,
                au.last_name, (up.color).r, (up.color).g, (up.color).b,
                up.primary_group_id
            FROM project_tokens pt
            JOIN catmaid_user_project_token upt
                ON pt.id = upt.project_token_id
            JOIN auth_user au
                ON au.id = upt.user_id
            JOIN catmaid_userprofile up
                ON up.user_id = au.id

            UNION

            SELECT au.id, au.username, au.first_name, au.last_name,
                (up.color).r, (up.color).g, (up.color).b, up.primary_group_id
            FROM auth_user au
            JOIN catmaid_userprofile up
                ON up.user_id = au.id
            WHERE au.id = %(user_id)s OR au.id = %(anon_user_id)s
        """, {
            'user_id': user.id,
            'anon_user_id': anon_user.id,
        })
        user_list = list(map(lambda u: {
            "id": u[0],
            "login": u[1],
            "full_name": f'{u[2]} {u[3]}',
            "first_name": u[2],
            "last_name": u[3],
            "color": (u[4], u[5], u[6]),
            "primary_group_id": u[7],
        }, cursor.fetchall()))
    else:
        can_see_all_users = user.is_authenticated and \
                (user != anon_user or user.has_perm('catmaid.can_browse'))

        if can_see_all_users:
            for u in User.objects.all().select_related('userprofile') \
                    .order_by('last_name', 'first_name'):
                up = u.userprofile
                user_data = {
                    "id": u.id,
                    "login": u.username,
                    "full_name": u.get_full_name(),
                    "first_name": u.first_name,
                    "last_name": u.last_name,
                    "color": (up.color.r, up.color.g, up.color.b),
                    "primary_group_id": up.primary_group_id,
                }
                if with_passwords:
                    # Append encypted user password
                    user_data['password'] = u.password
                user_list.append(user_data)

    if not user_list:
        up = user.userprofile
        user_list = [{
            "id": user.id,
            "login": user.username,
            "full_name": user.get_full_name(),
            "first_name": user.first_name,
            "last_name": user.last_name,
            "color": (up.color.r, up.color.g, up.color.b),
            "primary_group_id": up.primary_group_id
        }]

    return JsonResponse(user_list, safe=False)
Ejemplo n.º 37
0
    def put(self, request, name=None, format=None):
        """Create or replace a key-value data entry for the client.

        Each entry is associated with a datastore, an optional project, an
        optional user, and a key. Creating a request that duplicates this
        quadruple will replace rather than create the value in the key-value
        pair.

        Entries associated with neither a project nor user are considered
        global; those associated with a project but no user are project-
        default; those associated with a user but no project are user-default;
        and those associated with both a project and a user are user-project
        specific. When listing key-value data, all four of these values, if
        existing, will be returned.
        ---
        parameters:
        - name: name
          description: |
            String key for the **datastore** with which this key-value entry is
            associated.
          required: true
          type: string
          paramType: path
        - name: project_id
          description: |
            ID of a project to associate this data with, if any.
          required: false
          type: integer
          paramType: form
        - name: ignore_user
          description: |
            Whether to associate this key-value entry with the instance rather
            than the request user. Only project administrators can do this
            for project-associated instance data, and only super users can do
            this for global data (instance data not associated with any
            project).
          required: false
          type: boolean
          default: false
          paramType: form
        - name: key
          description: A key for this entry.
          required: true
          type: string
          paramType: form
        - name: value
          description: A value for this entry. Must be valid JSON.
          required: true
          type: string
          paramType: form
        response_serializer: ClientDataSerializer
        """
        if request.user == get_anonymous_user() or not request.user.is_authenticated:
            raise PermissionDenied('Unauthenticated or anonymous users ' \
                                   'can not create data.')
        datastore = get_object_or_404(ClientDatastore, name=name)

        key = request.data.get('key', None)
        if not key:
            raise ValidationError('A key for the data must be provided.')

        value = request.data.get('value', None)
        if not value:
            raise ValidationError('A value for the data must be provided.')
        # Validate JSON by reserializing.
        try:
            value = json.loads(value)
        except ValueError as exc:
            raise ValidationError('Data value is invalid JSON: ' + str(exc))

        project_id = request.data.get('project_id', None)
        project = None
        if project_id:
            project_id = int(project_id)
            project = get_object_or_404(Project, pk=project_id)
            if not check_user_role(request.user,
                                   project,
                                   [UserRole.Browse, UserRole.Annotate]):
                raise PermissionDenied('User lacks the appropriate ' \
                                       'permissions for this project.')

        ignore_user = get_request_bool(request.data, 'ignore_user', False)
        if ignore_user and not project_id:
            if not request.user.is_superuser:
                raise PermissionDenied('Only super users can create instance ' \
                                       'data.')
        if ignore_user:
            if not check_user_role(request.user,
                                   project,
                                   [UserRole.Admin]):
                raise PermissionDenied('Only administrators can create ' \
                                       'project default data.')
        user = None if ignore_user else request.user

        try:
            data = ClientData.objects.get(datastore=datastore,
                                          key=key,
                                          project=project,
                                          user=user)
            data.value = value
            data.full_clean()
            data.save()
            return Response(status=status.HTTP_204_NO_CONTENT)
        except ClientData.DoesNotExist:
            data = ClientData(datastore=datastore,
                              key=key,
                              value=value,
                              project=project,
                              user=user)
            data.full_clean()
            data.save()
            serializer = ClientDataSerializer(data)
            return Response(serializer.data)
Ejemplo n.º 38
0
    def post(self, request: Request, project_id) -> Response:
        """Create a deep-link.

        The request user must not be anonymous and must have annotate
        permissions.
        ---
        serializer: DeepLinkSerializer
        """
        if request.user == get_anonymous_user(
        ) or not request.user.is_authenticated:
            raise PermissionError('Unauthenticated or anonymous users ' \
                                   'can not create persistent deep links.')

        project_id = int(project_id)

        alias = request.POST.get('alias')
        if alias:
            if not re.match(r'^[a-zA-Z0-9-_\.]+$', alias):
                raise ValueError(
                    "Only alphanumeric characters, '-', '_' and '.' allowed")
        else:
            n_links = DeepLink.objects.filter(project_id=project_id).count()
            alias = make_unique_id()

        params = {
            'project_id': project_id,
            'user': request.user,
            'alias': alias,
        }

        if 'is_public' in request.POST:
            params['is_public'] = get_request_bool(request.POST, 'is_public')

        if 'location_x' in request.POST:
            params['location_x'] = float(request.POST['location_x'])

        if 'location_y' in request.POST:
            params['location_y'] = float(request.POST['location_y'])

        if 'location_z' in request.POST:
            params['location_z'] = float(request.POST['location_z'])

        if 'active_treenode_id' in request.POST:
            params['active_treenode_id'] = int(
                request.POST['active_treenode_id'])

        if 'active_connector_id' in request.POST:
            params['active_connector_id'] = int(
                request.POST['active_connector_id'])

        if 'active_skeleton_id' in request.POST:
            params['active_skeleton_id'] = int(
                request.POST['active_skeleton_id'])

        if 'closest_node_to_location' in request.POST:
            params['closest_node_to_location'] = get_request_bool(
                request.POST, 'closest_node_to_location')

        if 'follow_id_history' in request.POST:
            params['follow_id_history'] = get_request_bool(
                request.POST, 'follow_id_history')

        if 'layered_stacks' in request.POST:
            params['layered_stacks'] = get_request_bool(
                request.POST, 'layered_stacks')

        if 'layout' in request.POST:
            params['layout'] = request.POST['layout']

        if 'tool' in request.POST:
            params['tool'] = request.POST['tool']

        if 'show_help' in request.POST:
            params['show_help'] = get_request_bool(request.POST, 'show_help')

        if 'password' in request.POST:
            params['password'] = make_password(request.POST('password'))

        if 'message' in request.POST:
            params['message'] = request.POST['message']

        # TBA: data_view

        deeplink = DeepLink(**params)
        deeplink.save()
        serializer = DeepLinkSerializer(deeplink)

        # Stacks
        stacks = get_request_list(request.POST, 'stacks', map_fn=float)
        if stacks:
            # Nested lists of 2-tuples: [[stack_id, scale_level]]
            for s in stacks:
                stack_link = DeepLinkStack(
                    **{
                        'project_id': project_id,
                        'user_id': request.user.id,
                        'deep_link': deeplink,
                        'stack_id': int(s[0]),
                        'zoom_level': s[1],
                    })
                stack_link.save()

        # Stack groups
        if 'stack_group' in request.POST:
            sg_id = int(request.POST['stack_group'])
            sg_zoom_levels = get_request_list(request.POST,
                                              'stack_group_scale_levels',
                                              map_fn=float)
            sg_link = DeepLinkStackGroup(
                **{
                    'project_id': project_id,
                    'user_id': request.user.id,
                    'deeplink': deeplink,
                    'stack_group_id': sg_id,
                    'zoom_levels': sg_zoom_levels,
                })
            sg_link.save()

        return Response(serializer.data)
Ejemplo n.º 39
0
def compare_skeletons(request, project_id):
    """Compare two sets of objects (skeletons or point clouds) and return an
    NBLAST scoring based on an existing NBLAST configuration.
    ---
    parameters:
      - name: project_id
        description: Project to operate in
        type: integer
        paramType: path
        required: true
      - name: config_id
        description: ID of the new NBLAST configuration to use
        type: integer
        paramType: form
        required: true
      - name: query_ids
        description: Set of objects (skeletons or point clouds) to query similarity for.
        type: array
        paramType: form
        required: true
      - name: target_ids
        description: Set of objects (skeletons or point clouds) or point clouds to compare against.
        type: array
        paramType: form
        required: true
      - name: target_type
        description: Type of target objects, 'skeleton' or 'pointcloud'.
        type: string
        paramType: form
        required: false
        defaultValue: 'skeleton'
      - name: name
        description: Name for the similarity lookup task
        type: string
        paramType: form
        required: false
      - name: normalized
        description: Whether and how scores should be normalized.
        type: string
        enum: [raw, normalized, mean]
        paramType: form
        required: false
        defaultValue: mean
      - name: use_alpha
        description: Whether to consider local directions in the similarity computation
        type: boolean
        paramType: form
        required: false
        defaultValue: false
      - name: query_type_id
        description: Type of query data
        enum: [skeleton, point-cloud]
        type: string
        paramType: form
        defaultValue: skeleton
        required: false
      - name: target_type_id
        description: Type of query data
        enum: [skeleton, point-cloud]
        type: string
        paramType: form
        defaultValue: skeleton
        required: false
      - name: query_meta
        description: Extra data for the selected query type. A JSON encoded string is expected.
        type: string
        paramType: form
        required: false
      - name: target_meta
        description: Extra data for the selected target type. A JSON encoded string is expected.
        type: string
        paramType: form
        required: false
      - name: remove_target_duplicates
        description: Remove all target objects that appear also in the query.
        type: boolean
        required: false
        defaultValue: true
      - name: simplify
        description: Whether or not to simplify neurons and remove parts below a specified branch point level.
        type: boolean
        required: false
        defaultValue: true
      - name: required_branches
        description: The required branch levels if neurons should be simplified.
        type: boolean
        required: false
        defaultValue: 10
    """
    name = request.POST.get('name', None)
    if not name:
        n_similarity_tasks = NblastSimilarity.objects.filter(
                project_id=project_id).count()
        name = 'Task {}'.format(n_similarity_tasks + 1)

    config_id = request.POST.get('config_id', None)
    if not config_id:
        raise ValueError("Need NBLAST configuration ID")
    else:
        config_id = int(config_id)

    simplify = get_request_bool(request.POST, 'simplify', True)
    required_branches = int(request.POST.get('required_branches', '10'))

    valid_type_ids = ('skeleton', 'pointcloud', 'pointset')

    query_type_id = request.POST.get('query_type_id', 'skeleton')
    if query_type_id not in valid_type_ids:
        raise ValueError("Need valid query type id ({})".format(', '.join(valid_type_ids)))

    target_type_id = request.POST.get('target_type_id', 'skeleton')
    if target_type_id not in valid_type_ids:
        raise ValueError("Need valid target type id ({})".format(', '.join(valid_type_ids)))

    # Read potential query and target IDs. In case of skeletons and point
    # clouds, no IDs need to be provided, in which case all skeletons and point
    # clouds, respectively, will be used.
    query_ids = get_request_list(request.POST, 'query_ids', [], map_fn=int)
    if not query_ids and query_type_id not in ('skeleton', 'pointcloud'):
        raise ValueError("Need set of query objects (skeletons or point clouds) to compare")

    target_ids = get_request_list(request.POST, 'target_ids', map_fn=int)
    if not target_ids and target_type_id not in ('skeleton', 'pointcloud'):
        raise ValueError("Need set of target objects (skeletons or point clouds) to compare against")

    config = NblastConfig.objects.get(project_id=project_id, pk=config_id)

    if not config.status == 'complete':
        raise ValueError("NBLAST config #" + config.id +
            "isn't marked as complete")

    # Make sure we have a scoring matrix
    if not config.scoring:
        raise ValueError("NBLAST config #" + config.id +
            " doesn't have a computed scoring.")

    # Load potential query or target meta data
    query_meta = request.POST.get('query_meta')
    if query_meta:
        if not query_type_id == 'pointset':
            raise ValueError("Did not expect 'query_meta' parameter with {} query type".format(query_type_id))
        query_meta = json.loads(query_meta)
    target_meta = request.POST.get('target_meta')
    if target_meta:
        if not target_type_id == 'pointset':
            raise ValueError("Did not expect 'query_meta' parameter with {} target type".format(query_type_id))
        target_meta = json.loads(target_meta)

    # Other parameters
    normalized = request.POST.get('normalized', 'mean')
    use_alpha = get_request_bool(request.POST, 'use_alpha', False)
    remove_target_duplicates = get_request_bool(request.POST,
            'remove_target_duplicates', True)

    with transaction.atomic():
        # In case of a pointset, new pointset model objects needs to be created
        # before the similariy query is created.
        if query_type_id == 'pointset':
            created_ids = []
            for pointset_id in query_ids:
                pointset_data = query_meta.get(str(pointset_id))
                if not pointset_data:
                    raise ValueError("Could not find data for pointset {}".format(pointset_id))
                flat_points = list(chain.from_iterable(pointset_data['points']))
                pointset = PointSet.objects.create(project_id=project_id,
                        user=request.user, name=pointset_data['name'],
                        description=pointset_data.get('description'),
                        points=flat_points)
                pointset.save()
                created_ids.append(pointset.id)
            query_ids = created_ids
        if target_type_id == 'pointset':
            created_ids = []
            for pointset_id in target_ids:
                pointset_data = target_meta.get(str(pointset_id))
                if not pointset_data:
                    raise ValueError("Could not find data for pointset {}".format(pointset_id))
                flat_points = list(chain.from_iterable(pointset_data['points']))
                pointset = PointSet.objects.create(project_id=project_id,
                        user=request.user, name=pointset_data['name'],
                        description=pointset_data.get('description'),
                        points=flat_points)
                pointset.save()
                created_ids.append(pointset.id)
            target_ids = created_ids

        similarity = NblastSimilarity.objects.create(project_id=project_id,
                user=request.user, name=name, status='queued', config_id=config_id,
                query_objects=query_ids, target_objects=target_ids,
                query_type_id=query_type_id, target_type_id=target_type_id,
                normalized=normalized, use_alpha=use_alpha)
        similarity.save()

    task = compute_nblast.delay(project_id, request.user.id, similarity.id,
            remove_target_duplicates, simplify, required_branches)

    return JsonResponse({
        'task_id': task.task_id,
        'similarity': serialize_similarity(similarity),
    })
Ejemplo n.º 40
0
def skeleton_graph(request, project_id=None):
    """Get a synaptic graph between skeletons compartmentalized by confidence.

    Given a set of skeletons, retrieve presynaptic-to-postsynaptic edges
    between them, annotated with count. If a confidence threshold is
    supplied, compartmentalize the skeletons at edges in the arbor
    below that threshold and report connectivity based on these
    compartments.

    When skeletons are split into compartments, nodes in the graph take an
    string ID like ``{skeleton_id}_{compartment #}``.
    ---
    parameters:
        - name: skeleton_ids[]
          description: IDs of the skeletons to graph
          required: true
          type: array
          items:
            type: integer
          paramType: form
        - name: confidence_threshold
          description: Confidence value below which to segregate compartments
          type: integer
          paramType: form
        - name: bandwidth
          description: Bandwidth in nanometers
          type: number
        - name: cable_spread
          description: Cable spread in nanometers
          type: number
        - name: expand[]
          description: IDs of the skeletons to expand
          type: array
          items:
            type: integer
        - name: link_types[]
          description: IDs of link types to respect
          type: array
          items:
            type: string
    models:
      skeleton_graph_edge:
        id: skeleton_graph_edge
        properties:
        - description: ID of the presynaptic skeleton or compartment
          type: integer|string
          required: true
        - description: ID of the postsynaptic skeleton or compartment
          type: integer|string
          required: true
        - description: number of synapses constituting this edge
          $ref: skeleton_graph_edge_count
          required: true
      skeleton_graph_edge_count:
        id: skeleton_graph_edge_count
        properties:
        - description: Number of synapses with confidence 1
          type: integer
          required: true
        - description: Number of synapses with confidence 2
          type: integer
          required: true
        - description: Number of synapses with confidence 3
          type: integer
          required: true
        - description: Number of synapses with confidence 4
          type: integer
          required: true
        - description: Number of synapses with confidence 5
          type: integer
          required: true
      skeleton_graph_intraedge:
        id: skeleton_graph_intraedge
        properties:
        - description: ID of the presynaptic skeleton or compartment
          type: integer|string
          required: true
        - description: ID of the postsynaptic skeleton or compartment
          type: integer|string
          required: true
    type:
      edges:
        type: array
        items:
          $ref: skeleton_graph_edge
        required: true
      nodes:
        type: array
        items:
          type: integer|string
        required: false
      intraedges:
        type: array
        items:
          $ref: skeleton_graph_intraedge
        required: false
      branch_nodes:
        type: array
        items:
          type: integer|string
        required: false
    """
    compute_risk = 1 == int(request.POST.get('risk', 0))
    if compute_risk:
        # TODO port the last bit: computing the synapse risk
        from graph import skeleton_graph as slow_graph
        return slow_graph(request, project_id)

    project_id = int(project_id)
    skeleton_ids = set(int(v) for k,v in request.POST.items() if k.startswith('skeleton_ids['))
    confidence_threshold = min(int(request.POST.get('confidence_threshold', 0)), 5)
    bandwidth = float(request.POST.get('bandwidth', 0)) # in nanometers
    cable_spread = float(request.POST.get('cable_spread', 2500)) # in nanometers
    path_confluence = int(request.POST.get('path_confluence', 10)) # a count
    expand = set(int(v) for k,v in request.POST.items() if k.startswith('expand['))
    with_overall_counts = get_request_bool(request.POST, 'with_overall_counts', False)
    expand = set(int(v) for k,v in request.POST.items() if k.startswith('expand['))
    link_types = get_request_list(request.POST, 'link_types', None)

    graph = _skeleton_graph(project_id, skeleton_ids,
        confidence_threshold, bandwidth, expand, compute_risk, cable_spread,
        path_confluence, with_overall_counts, link_types=link_types)

    if not graph:
        raise ValueError("Could not compute graph")

    return JsonResponse(graph)
Ejemplo n.º 41
0
def list_logs(request: HttpRequest, project_id=None) -> JsonResponse:
    if 'user_id' in request.POST:
        user_id: Optional[int] = int(request.POST.get(
            'user_id', -1))  # logs for different users
    else:
        user_id = None
    whitelist = get_request_bool(request.POST, 'whitelist', False)
    operation_type = request.POST.get('operation_type', "-1")
    search_freetext = request.POST.get('search_freetext', "")

    display_start = int(request.POST.get('iDisplayStart', 0))
    display_length = int(request.POST.get('iDisplayLength', -1))
    if display_length < 0:
        display_length = 2000  # Default number of result rows

    should_sort = request.POST.get('iSortCol_0', False)
    if should_sort:
        column_count = int(request.POST.get('iSortingCols', 0))
        sorting_directions = [
            request.POST.get('sSortDir_%d' % d, 'DESC')
            for d in range(column_count)
        ]
        sorting_directions = list(
            map(lambda d: '-'
                if d.upper() == 'DESC' else '', sorting_directions))

        fields = [
            'user', 'operation_type', 'creation_time', 'x', 'y', 'z',
            'freetext'
        ]
        sorting_index = [
            int(request.POST.get('iSortCol_%d' % d))
            for d in range(column_count)
        ]
        sorting_cols = map(lambda i: fields[i], sorting_index)

    log_query = Log.objects.for_user(request.user).filter(project=project_id)
    if user_id:
        log_query = log_query.filter(user=user_id)
    if whitelist:
        log_query = log_query.filter(
            user_id__in=ReviewerWhitelist.objects.filter(
                project_id=project_id, user_id=request.user.id).values_list(
                    'reviewer_id'))
    if not operation_type == "-1":
        log_query = log_query.filter(operation_type=operation_type)
    if not search_freetext == "":
        log_query = log_query.filter(freetext__contains=search_freetext)

    log_query = log_query.extra(tables=['auth_user'],
                                where=['"log"."user_id" = "auth_user"."id"'],
                                select={
                                    'x': '("log"."location")."x"',
                                    'y': '("log"."location")."y"',
                                    'z': '("log"."location")."z"',
                                    'username': '******'
                                })
    if should_sort:
        log_query = log_query.extra(order_by=[
            di + col for (di, col) in zip(sorting_directions, sorting_cols)
        ])

    result = list(log_query[display_start:display_start + display_length])

    response: Dict[str, Any] = {
        'iTotalRecords': len(result),
        'iTotalDisplayRecords': len(result),
        'aaData': []
    }
    for log in result:
        response['aaData'] += [[
            log.username, log.operation_type,
            str(log.creation_time.isoformat()), log.x, log.y, log.z,
            log.freetext
        ]]

    return JsonResponse(response)
Ejemplo n.º 42
0
def crop(request: HttpRequest, project_id=None) -> JsonResponse:
    """ Crops out the specified region of the stack. The region is expected to
    be given in terms of real world units (e.g. nm).
    """
    stack_ids = get_request_list(request.POST, "stack_ids", [], int)
    x_min = float(request.POST['min_x'])
    y_min = float(request.POST['min_y'])
    z_min = float(request.POST['min_z'])
    x_max = float(request.POST['max_x'])
    y_max = float(request.POST['max_y'])
    z_max = float(request.POST['max_z'])
    zoom_level = float(request.POST['zoom_level'])
    single_channel = get_request_bool(request.POST, 'single_channel', False)
    rotation_cw = float(request.GET.get('rotationcw', 0.0))

    # Make sure tmp dir exists and is writable
    if not os.path.exists(crop_output_path) or not os.access(
            crop_output_path, os.W_OK):
        if request.user.is_superuser:
            err_message = "Please make sure your output folder (%s) exists " \
                    "is writable." % crop_output_path
        else:
            err_message = "Sorry, the output path for the cropping tool " \
                    "isn't set up correctly. Please contact an administrator."
        return json_error_response(err_message)

    # Use first reachable stack mirrors
    stack_mirror_ids = []
    for sid in stack_ids:
        stack_mirrors = StackMirror.objects.select_related('stack').filter(
            stack_id=sid)
        for sm in stack_mirrors:
            # If mirror is reachable use it right away
            tile_source = get_tile_source(sm.tile_source_type)
            try:
                req = requests.head(tile_source.get_canary_url(sm),
                                    allow_redirects=True,
                                    verify=verify_ssl)
                reachable = req.status_code == 200
            except Exception as e:
                logger.error(e)
                reachable = False
            if reachable:
                stack_mirror_ids.append(sm.id)
                break
        if not reachable:
            raise ValueError(
                "Can't find reachable stack mirror for stack {}".format(sid))

    # Crate a new cropping job
    job = CropJob(request.user, project_id, stack_mirror_ids, x_min, x_max,
                  y_min, y_max, z_min, z_max, rotation_cw, zoom_level,
                  single_channel)

    # Parameter check
    errors = sanity_check(job)
    if len(errors) > 0:
        err_message = "Some problems with the cropping parameters were found: "
        for n, errtxt in enumerate(errors):
            if n == 0:
                err_message += str(n + 1) + ". " + errtxt
            else:
                err_message += ", " + str(n + 1) + ". " + errtxt
        err_response = json_error_response(err_message)
        return err_response

    result = start_asynch_process(job)
    return result
Ejemplo n.º 43
0
def connectors_in_bounding_box(request:HttpRequest, project_id:Union[int,str]) -> JsonResponse:
    """Get a list of all connector nodes that intersect with the passed in
    bounding box.
    ---
    parameters:
    - name: limit
      description: |
        Limit the number of returned nodes.
      required: false
      type: integer
      defaultValue: 0
      paramType: form
    - name: minx
      description: |
        Minimum world space X coordinate
      required: true
      type: float
      paramType: form
    - name: miny
      description: |
        Minimum world space Y coordinate
      required: true
      type: float
      paramType: form
    - name: minz
      description: |
        Minimum world space Z coordinate
      required: true
      type: float
      paramType: form
    - name: maxx
      description: |
        Maximum world space X coordinate
      required: true
      type: float
      paramType: form
    - name: maxy
      description: |
        Maximum world space Y coordinate
      required: true
      type: float
      paramType: form
    - name: maxz
      description: |
        Maximum world space Z coordinate
      required: true
      type: float
      paramType: form
    - name: with_locations
      description: |
        Whether to return the location of each connector.
      required: false
      type: bool
      defaultValue: false
      paramType: form
    - name: with_links
      description: |
        Whether to return every individual link or null for unlinked connectors
        (if part of response).
      required: false
      type: bool
      defaultValue: false
      paramType: form
    - name: only_linked
      description: |
        Whether to return only connectors with linked treenodes. By default all
        connectors are returned and link information is null for unlinked nodes.
      required: false
      type: bool
      defaultValue: false
      paramType: form
    - name: skeleton_ids
      description: Skeletons linked to connectors
      type: array
      items:
        type: integer
      paramType: form
      required: false
    type:
        - type: array
          items:
          type: integer
          description: array of skeleton IDs or links
          required: true
    """
    project_id = int(project_id)
    data = request.GET if request.method == 'GET' else request.POST

    params = {
        'project_id': project_id,
        'limit': data.get('limit', 0),
        'with_locations': get_request_bool(data, 'with_locations', False),
        'with_links': get_request_bool(data, 'with_links', False),
    }
    for p in ('minx', 'miny', 'minz', 'maxx', 'maxy', 'maxz'):
        params[p] = float(data.get(p, 0))
    params['halfzdiff'] = abs(params['maxz'] - params['minz']) * 0.5
    params['halfz'] = params['minz'] + (params['maxz'] - params['minz']) * 0.5

    skeleton_ids = get_request_list(data, 'skeleton_ids', map_fn=int)
    if skeleton_ids:
        params['skeleton_ids'] = skeleton_ids

    params['only_linked'] = get_request_bool(data, 'only_linked', False)

    connector_ids = get_connectors_in_bb_postgis3d(params)
    return JsonResponse(connector_ids, safe=False)
Ejemplo n.º 44
0
def list_connectors(request:HttpRequest, project_id=None) -> JsonResponse:
    """Get a collection of connectors.

    The `connectors` field of the returned object contains a list of all result
    nodes, each represented as a list of the form:

    `[id, x, y, z, confidence, creator_id, editor_id, creation_time, edition_time]`

    Both edition time and creation time are returned as UTC epoch values. If
    tags are requested, the `tags` field of the response object will contain a
    mapping of connector IDs versus tag lists. If partners are requested, the
    `partners` field of the response object will contain a mapping of connector
    IDs versus lists of partner links. Each partner link is an array of the
    following format:

    `[link_id, treenode_id, skeleton_id, relation_id, confidence]`

    If both `skeleton_ids` and `relation_type` are used, the linked skeletons
    need to be linked by the specified relation. Without `relation_type`,
    linked skeletons can have any relation and without `skeleton_ids` a
    connector needs to have a least one link with the specified relation.
    ---
    parameters:
      - name: project_id
        description: Project of connectors
        type: integer
        paramType: path
        required: true
      - name: skeleton_ids
        description: Skeletons linked to connectors
        type: array
        items:
          type: integer
        paramType: form
        required: false
      - name: tags
        description: Require a set of tags
        type: array
        items:
          type: string
        paramType: form
        required: false
      - name: relation_type
        description: Relation of linked skeletons to connector.
        type: string
        paramType: form
        required: false
      - name: without_relation_types
        description: |
            Relations to linked skeletons that connectors must not have.
        type: array
        items:
          type: string
        paramType: form
        required: false
      - name: with_tags
        description: If connector tags should be fetched
        type: boolean
        paramType: form
        defaultValue: true
        required: false
      - name: with_partners
        description: If partner node and link information should be fetched
        type: boolean
        paramType: form
        defaultValue: false
        required: false
    type:
      connectors:
        type: array
        items:
          type: array
          items:
            type: string
        description: Matching connector links
        required: true
      tags:
         type array
      partners:
         type array
    """
    project_id = int(project_id)
    skeleton_ids = get_request_list(request.POST, 'skeleton_ids', map_fn=int)
    tags = get_request_list(request.POST, 'tags')
    relation_type = request.POST.get('relation_type')
    without_relation_types = get_request_list(request.POST, 'without_relation_types')
    with_tags = get_request_bool(request.POST, 'with_tags', True)
    with_partners = get_request_bool(request.POST, 'with_partners', False)

    cursor = connection.cursor()
    class_map = get_class_to_id_map(project_id, cursor=cursor)
    relation_map = get_relation_to_id_map(project_id, cursor=cursor)

    # Query connectors
    constraints = []
    extra_where = []
    params:Dict = {
        'project_id': project_id,
    }

    if relation_type:
        relation_id = relation_map.get(relation_type)
        params['relation_id'] = relation_id
        if not relation_id:
            raise ValueError("Unknown relation: " + relation_type)

    if skeleton_ids:
        constraints.append('''
            JOIN treenode_connector tc
                ON tc.connector_id = c.id
            JOIN UNNEST(%(skeleton_ids)s::bigint[]) q_skeleton(id)
                ON tc.skeleton_id = q_skeleton.id
        ''')
        params['skeleton_ids'] = skeleton_ids
        if relation_type:
            constraints.append('''
                AND tc.relation_id = %(relation_id)s
            ''')
    elif relation_type:
        constraints.append('''
            JOIN treenode_connector tc_rel
                ON tc_rel.connector_id = c.id
                AND tc_rel.relation_id = %(relation_id)s
        ''')

    if without_relation_types:
        # Only connectors without the passed in relations. This is done through
        # an anti-join.
        try:
            wo_rel_ids = list(map(lambda x: relation_map[x], without_relation_types))
        except KeyError:
            missing_relations = ", ".join(filter(lambda x: x not in relation_map, without_relation_types))
            raise ValueError(f'Unknown relation: {missing_relations}')
        constraints.append('''
            LEFT JOIN treenode_connector tc_wo
                ON tc_wo.connector_id = c.id
                AND tc_wo.relation_id  = ANY (%(wo_rel_ids)s::bigint[])
        ''')
        extra_where.append('''
            tc_wo.id IS NULL
        ''')

        params['wo_rel_ids'] = wo_rel_ids

    if tags:
        constraints.append('''
            JOIN connector_class_instance cci
                ON cci.connector_id = c.id
            JOIN class_instance label
                ON label.id = class_instance_id
                AND cci.relation_id = %(labeled_as)s
            JOIN (
                SELECT class_instance.id
                FROM class_instance
                JOIN UNNEST(%(tag_names)s::text[]) tag(name)
                    ON tag.name = class_instance.name
                WHERE project_id = %(project_id)s
                    AND class_id = %(label)s
            ) q_label(id) ON label.id = q_label.id
        ''')
        params['labeled_as'] = relation_map['labeled_as']
        params['tag_names'] = tags
        params['label'] = class_map['label']

    constlines = "\n".join(constraints)
    extra_where_lines = ("AND " + " AND ".join(extra_where)) if extra_where else ""
    cursor.execute(f'''
        SELECT DISTINCT ON (c.id) c.id, c.location_x, c.location_y, c.location_z, c.confidence,
            c.user_id, c.editor_id, EXTRACT(EPOCH FROM c.creation_time),
            EXTRACT(EPOCH FROM c.edition_time)
        FROM connector c
        {constlines}
        WHERE c.project_id = %(project_id)s
        {extra_where_lines}
        ORDER BY c.id
    ''', params)

    connectors = cursor.fetchall()

    connector_ids = [c[0] for c in connectors]
    tags = defaultdict(list)
    if connector_ids and with_tags:
        c_template = ",".join("(%s)" for _ in connector_ids)
        cursor.execute(f'''
            SELECT cci.connector_id, ci.name
            FROM connector_class_instance cci
            JOIN (VALUES {c_template}) q_connector(id)
                ON cci.connector_id = q_connector.id
            JOIN (VALUES (%s)) q_relation(id)
                ON cci.relation_id = q_relation.id
            JOIN class_instance ci
                ON cci.class_instance_id = ci.id
        ''', connector_ids + [relation_map['labeled_as']])

        for row in cursor.fetchall():
            tags[row[0]].append(row[1])

        # Sort labels by name
        for connector_id, labels in tags.items():
            labels.sort(key=lambda k: k.upper())

    partners:DefaultDict[Any, List] = defaultdict(list)
    if connector_ids and with_partners:
        c_template = ",".join("(%s)" for _ in connector_ids)
        cursor.execute(f'''
            SELECT tc.connector_id, tc.id, tc.treenode_id, tc.skeleton_id,
                tc.relation_id, tc.confidence, tc.user_id,
                EXTRACT(EPOCH FROM tc.creation_time),
                EXTRACT(EPOCH FROM tc.edition_time)
            FROM treenode_connector tc
            JOIN (VALUES {c_template}) c(id)
                ON tc.connector_id = c.id
        ''', connector_ids)

        for row in cursor.fetchall():
            partners[row[0]].append(row[1:])

    return JsonResponse({
        "connectors": connectors,
        "tags": tags,
        "partners": partners
    }, safe=False)
Ejemplo n.º 45
0
def list_connector_links(request:HttpRequest, project_id=None) -> JsonResponse:
    """Get connectors linked to a set of skeletons.

    The result data set includes information about linked connectors on a given
    input set of skeletons. These links are further constrained by relation
    type, with currently support available for: postsynaptic_to,
    presynaptic_to, abutting, gapjunction_with, tightjunction_with,
    desmosome_with.

    Returned is an object containing an array of links to connectors and a set
    of tags for all connectors found (if not disabled). The link array contains
    one array per connector link with the following content: [Linked skeleton ID,
    Connector ID, Connector X, Connector Y, Connector Z, Link confidence, Link
    creator ID, Linked treenode ID, Link edit time].

    A POST handler is able to accept large lists of skeleton IDs.
    ---
    parameters:
      - name: skeleton_ids
        description: Skeletons to list connectors for
        type: array
        items:
          type: integer
        paramType: form
        required: true
      - name: relation_type
        description: Relation of listed connector links
        type: string
        paramType: form
        required: true
      - name: with_tags
        description: If connector tags should be fetched
        type: boolean
        paramType: form
        defaultValue: true
        required: false
    type:
      links:
        type: array
        items:
          type: array
          items:
            type: string
        description: Matching connector links
        required: true
      tags:
         type array
    """
    data = request.POST if request.method == 'POST' else request.GET
    skeleton_ids = get_request_list(data, 'skeleton_ids', map_fn=int)

    if not skeleton_ids:
        raise ValueError("At least one skeleton ID required")

    relation_type = data.get('relation_type', 'presynaptic_to')
    with_tags = get_request_bool(data, 'with_tags', True)

    cursor = connection.cursor()
    relation_map = get_relation_to_id_map(project_id, cursor=cursor)
    relation_id = relation_map.get(relation_type)
    if not relation_id:
        raise ValueError("Unknown relation: " + relation_type)
    sk_template = ",".join(("(%s)",) * len(skeleton_ids))

    cursor.execute(f'''
        SELECT tc.skeleton_id, c.id, c.location_x, c.location_y, c.location_z,
              tc.confidence, tc.user_id, tc.treenode_id, tc.creation_time,
              tc.edition_time
        FROM treenode_connector tc
        JOIN (VALUES {sk_template}) q_skeleton(id)
            ON tc.skeleton_id = q_skeleton.id
        JOIN (VALUES (%s)) q_relation(id)
            ON tc.relation_id = q_relation.id
        JOIN connector c
            ON tc.connector_id = c.id
    ''', skeleton_ids + [relation_id])

    links = []
    for row in cursor.fetchall():
        lst = list(row)
        lst[8] = lst[8].isoformat()
        lst[9] = lst[9].isoformat()
        links.append(lst)

    connector_ids = [link[1] for link in links]
    tags:DefaultDict[Any, List] = defaultdict(list)
    if connector_ids and with_tags:
        c_template = ",".join(("(%s)",) * len(connector_ids))
        cursor.execute(f'''
            SELECT cci.connector_id, ci.name
            FROM connector_class_instance cci
            JOIN (VALUES {c_template}) q_connector(id)
                ON cci.connector_id = q_connector.id
            JOIN (VALUES (%s)) q_relation(id)
                ON cci.relation_id = q_relation.id
            JOIN class_instance ci
                ON cci.class_instance_id = ci.id
        ''', connector_ids + [relation_map['labeled_as']])

        for row in cursor.fetchall():
            tags[row[0]].append(row[1])

        # Sort labels by name
        for connector_id, labels in tags.items():
            labels.sort(key=lambda k: k.upper())

    return JsonResponse({
        "links": links,
        "tags": tags
    }, safe=False)
Ejemplo n.º 46
0
def list_connectors(request, project_id=None):
    """Get a collection of connectors.

    The `connectors` field of the returned object contains a list of all result
    nodes, each represented as a list of the form:

    `[id, x, y, z, confidence, creator_id, editor_id, creation_time, edition_time]`

    Both edition time and creation time are returned as UTC epoch values. If
    tags are requested, the `tags` field of the response object will contain a
    mapping of connector IDs versus tag lists. If partners are requested, the
    `partners` field of the response object will contain a mapping of connector
    IDs versus lists of partner links. Each partner link is an array of the
    following format:

    `[link_id, treenode_id, skeleton_id, relation_id, confidence]`

    If both `skeleton_ids` and `relation_type` are used, the linked skeletons
    need to be linked by the specified relation. Without `relation_type`,
    linked skeletons can have any relation and without `skeleton_ids` a
    connector needs to have a least one link with the specified relation.
    ---
    parameters:
      - name: project_id
        description: Project of connectors
        type: integer
        paramType: path
        required: true
      - name: skeleton_ids
        description: Skeletons linked to connectors
        type: array
        items:
          type: integer
        paramType: form
        required: false
      - name: tags
        description: Require a set of tags
        type: array
        items:
          type: string
        paramType: form
        required: false
      - name: relation_type
        description: Relation of linked skeletons to connector.
        type: string
        paramType: form
        required: false
      - name: with_tags
        description: If connector tags should be fetched
        type: boolean
        paramType: form
        defaultValue: true
        required: false
      - name: with_partners
        description: If partner node and link information should be fetched
        type: boolean
        paramType: form
        defaultValue: false
        required: false
    type:
      connectors:
        type: array
        items:
          type: array
          items:
            type: string
        description: Matching connector links
        required: true
      tags:
         type array
      partners:
         type array
    """
    project_id = int(project_id)
    skeleton_ids = get_request_list(request.POST, 'skeleton_ids', map_fn=int)
    tags = get_request_list(request.POST, 'tags')
    relation_type = request.POST.get('relation_type')
    with_tags = get_request_bool(request.POST, 'with_tags', True)
    with_partners = get_request_bool(request.POST, 'with_partners', False)

    cursor = connection.cursor()
    class_map = get_class_to_id_map(project_id, cursor=cursor)
    relation_map = get_relation_to_id_map(project_id, cursor=cursor)

    if relation_type:
        relation_id = relation_map.get(relation_type)
        if not relation_id:
            raise ValueError("Unknown relation: " + relation_type)

    # Query connectors
    constraints = []
    params = []

    if skeleton_ids:
        sk_template = ",".join("(%s)" for _ in skeleton_ids)
        constraints.append('''
            JOIN treenode_connector tc
                ON tc.connector_id = c.id
            JOIN (VALUES {}) q_skeleton(id)
                ON tc.skeleton_id = q_skeleton.id
        '''.format(sk_template))
        params.extend(skeleton_ids)
        if relation_type:
            constraints.append('''
                AND tc.relation_id = %s
            ''')
            params.append(relation_id)
    elif relation_type:
        constraints.append('''
            JOIN treenode_connector tc
                ON tc.connector_id = c.id
                AND tc.relation_id = %s
        ''')
        params.append(relation_id)

    if tags:
        tag_template = ",".join("%s" for _ in tags)
        constraints.append('''
            JOIN connector_class_instance cci
                ON cci.connector_id = c.id
            JOIN class_instance label
                ON label.id = class_instance_id
                AND cci.relation_id = %s
            JOIN (
                SELECT id
                FROM class_instance
                WHERE name IN ({})
                    AND project_id = %s
                    AND class_id = %s
            ) q_label(id) ON label.id = q_label.id
        '''.format(tag_template))
        params.append(relation_map['labeled_as'])
        params.extend(tags)
        params.append(project_id)
        params.append(class_map['label'])

    query = '''
        SELECT DISTINCT c.id, c.location_x, c.location_y, c.location_z, c.confidence,
            c.user_id, c.editor_id, EXTRACT(EPOCH FROM c.creation_time),
            EXTRACT(EPOCH FROM c.edition_time)
        FROM connector c
        {}
        WHERE c.project_id = %s
        ORDER BY c.id
    '''.format('\n'.join(constraints))
    params.append(project_id)

    cursor.execute(query, params)

    connectors = cursor.fetchall()

    connector_ids = [c[0] for c in connectors]
    tags = defaultdict(list)
    if connector_ids and with_tags:
        c_template = ",".join("(%s)" for _ in connector_ids)
        cursor.execute('''
            SELECT cci.connector_id, ci.name
            FROM connector_class_instance cci
            JOIN (VALUES {}) q_connector(id)
                ON cci.connector_id = q_connector.id
            JOIN (VALUES (%s)) q_relation(id)
                ON cci.relation_id = q_relation.id
            JOIN class_instance ci
                ON cci.class_instance_id = ci.id
        '''.format(c_template), connector_ids + [relation_map['labeled_as']])

        for row in cursor.fetchall():
            tags[row[0]].append(row[1])

        # Sort labels by name
        for connector_id, labels in tags.items():
            labels.sort(key=lambda k: k.upper())

    partners = defaultdict(list)
    if connector_ids and with_partners:
        c_template = ",".join("(%s)" for _ in connector_ids)
        cursor.execute('''
            SELECT tc.connector_id, tc.id, tc.treenode_id, tc.skeleton_id,
                tc.relation_id, tc.confidence, tc.user_id,
                EXTRACT(EPOCH FROM tc.creation_time),
                EXTRACT(EPOCH FROM tc.edition_time)
            FROM treenode_connector tc
            JOIN (VALUES {}) c(id)
                ON tc.connector_id = c.id
        '''.format(c_template), connector_ids)

        for row in cursor.fetchall():
            partners[row[0]].append(row[1:])

    return JsonResponse({
        "connectors": connectors,
        "tags": tags,
        "partners": partners
    }, safe=False)
Ejemplo n.º 47
0
    def put(self,
            request: Request,
            name: Optional[int] = None,
            format=None) -> Response:
        """Create or replace a key-value data entry for the client.

        Each entry is associated with a datastore, an optional project, an
        optional user, and a key. Creating a request that duplicates this
        quadruple will replace rather than create the value in the key-value
        pair.

        Entries associated with neither a project nor user are considered
        global; those associated with a project but no user are project-
        default; those associated with a user but no project are user-default;
        and those associated with both a project and a user are user-project
        specific. When listing key-value data, all four of these values, if
        existing, will be returned.
        ---
        parameters:
        - name: name
          description: |
            String key for the **datastore** with which this key-value entry is
            associated.
          required: true
          type: string
          paramType: path
        - name: project_id
          description: |
            ID of a project to associate this data with, if any.
          required: false
          type: integer
          paramType: form
        - name: ignore_user
          description: |
            Whether to associate this key-value entry with the instance rather
            than the request user. Only project administrators can do this
            for project-associated instance data, and only super users can do
            this for global data (instance data not associated with any
            project).
          required: false
          type: boolean
          default: false
          paramType: form
        - name: key
          description: A key for this entry.
          required: true
          type: string
          paramType: form
        - name: value
          description: A value for this entry. Must be valid JSON.
          required: true
          type: string
          paramType: form
        - name: format
          description: This function parameter is ignored
          required: false
          type: Any
          default: None
        response_serializer: ClientDataSerializer
        """
        if request.user == get_anonymous_user(
        ) or not request.user.is_authenticated:
            raise PermissionDenied('Unauthenticated or anonymous users ' \
                                   'can not create data.')
        datastore = get_object_or_404(ClientDatastore, name=name)

        key = request.data.get('key', None)
        if not key:
            raise ValidationError('A key for the data must be provided.')

        value = request.data.get('value', None)
        if not value:
            raise ValidationError('A value for the data must be provided.')
        # Validate JSON by reserializing.
        try:
            value = json.loads(value)
        except ValueError as exc:
            raise ValidationError('Data value is invalid JSON: ' + str(exc))

        project_id = request.data.get('project_id', None)
        project = None
        if project_id:
            project_id = int(project_id)
            project = get_object_or_404(Project, pk=project_id)
            if not check_user_role(request.user, project,
                                   [UserRole.Browse, UserRole.Annotate]):
                raise PermissionDenied('User lacks the appropriate ' \
                                       'permissions for this project.')

        ignore_user = get_request_bool(request.data, 'ignore_user', False)
        if ignore_user and not project_id:
            if not request.user.is_superuser:
                raise PermissionDenied('Only super users can create instance ' \
                                       'data.')
        if ignore_user:
            if not check_user_role(request.user, project, [UserRole.Admin]):
                raise PermissionDenied('Only administrators can create ' \
                                       'project default data.')
        user = None if ignore_user else request.user

        try:
            data = ClientData.objects.get(datastore=datastore,
                                          key=key,
                                          project=project,
                                          user=user)
            data.value = value
            data.full_clean()
            data.save()
            return Response(status=status.HTTP_204_NO_CONTENT)
        except ClientData.DoesNotExist:
            data = ClientData(datastore=datastore,
                              key=key,
                              value=value,
                              project=project,
                              user=user)
            data.full_clean()
            data.save()
            serializer = ClientDataSerializer(data)
            return Response(serializer.data)
Ejemplo n.º 48
0
def list_connector_links(request, project_id=None):
    """Get connectors linked to a set of skeletons.

    The result data set includes information about linked connectors on a given
    input set of skeletons. These links are further constrained by relation
    type, with currently support available for: postsynaptic_to,
    presynaptic_to, abutting, gapjunction_with, tightjunction_with,
    desmosome_with.

    Returned is an object containing an array of links to connectors and a set
    of tags for all connectors found (if not disabled). The link array contains
    one array per connector link with the following content: [Linked skeleton ID,
    Connector ID, Connector X, Connector Y, Connector Z, Link confidence, Link
    creator ID, Linked treenode ID, Link edit time].

    A POST handler is able to accept large lists of skeleton IDs.
    ---
    parameters:
      - name: skeleton_ids
        description: Skeletons to list connectors for
        type: array
        items:
          type: integer
        paramType: form
        required: true
      - name: relation_type
        description: Relation of listed connector links
        type: string
        paramType: form
        required: true
      - name: with_tags
        description: If connector tags should be fetched
        type: boolean
        paramType: form
        defaultValue: true
        required: false
    type:
      links:
        type: array
        items:
          type: array
          items:
            type: string
        description: Matching connector links
        required: true
      tags:
         type array
    """
    data = request.POST if request.method == 'POST' else request.GET
    skeleton_ids = get_request_list(data, 'skeleton_ids', map_fn=int)

    if not skeleton_ids:
        raise ValueError("At least one skeleton ID required")

    relation_type = data.get('relation_type', 'presynaptic_to')
    with_tags = get_request_bool(data, 'with_tags', True)

    cursor = connection.cursor()
    relation_map = get_relation_to_id_map(project_id, cursor=cursor)
    relation_id = relation_map.get(relation_type)
    if not relation_id:
        raise ValueError("Unknown relation: " + relation_type)
    sk_template = ",".join(("(%s)",) * len(skeleton_ids))

    cursor.execute('''
        SELECT tc.skeleton_id, c.id, c.location_x, c.location_y, c.location_z,
              tc.confidence, tc.user_id, tc.treenode_id, tc.creation_time,
              tc.edition_time
        FROM treenode_connector tc
        JOIN (VALUES {}) q_skeleton(id)
            ON tc.skeleton_id = q_skeleton.id
        JOIN (VALUES (%s)) q_relation(id)
            ON tc.relation_id = q_relation.id
        JOIN connector c
            ON tc.connector_id = c.id
    '''.format(sk_template), skeleton_ids + [relation_id])

    links = []
    for row in cursor.fetchall():
        l = list(row)
        l[8] = l[8].isoformat()
        l[9] = l[9].isoformat()
        links.append(l)

    connector_ids = [l[1] for l in links]
    tags = defaultdict(list)
    if connector_ids and with_tags:
        c_template = ",".join(("(%s)",) * len(connector_ids))
        cursor.execute('''
            SELECT cci.connector_id, ci.name
            FROM connector_class_instance cci
            JOIN (VALUES {}) q_connector(id)
                ON cci.connector_id = q_connector.id
            JOIN (VALUES (%s)) q_relation(id)
                ON cci.relation_id = q_relation.id
            JOIN class_instance ci
                ON cci.class_instance_id = ci.id
        '''.format(c_template), connector_ids + [relation_map['labeled_as']])

        for row in cursor.fetchall():
            tags[row[0]].append(row[1])

        # Sort labels by name
        for connector_id, labels in tags.items():
            labels.sort(key=lambda k: k.upper())

    return JsonResponse({
        "links": links,
        "tags": tags
    }, safe=False)