コード例 #1
0
ファイル: user_evaluation.py プロジェクト: tomka/CATMAID
def _evaluate(project_id, user_id, start_date, end_date, max_gap, min_nodes):

    # Obtain neurons that are fully reviewed at the moment
    # and to which the user contributed nodes within the date range.

    # 1. Find out skeleton_ids towards which the user contributed
    #    within the date range
    ts = (
        Treenode.objects.filter(user_id=user_id, creation_time__range=(start_date, end_date))
        .values_list("skeleton")
        .annotate(Count("skeleton"))
    )

    # Pick only skeletons for which the user contributed at least min_nodes
    skeleton_ids = set(skid for skid, count in ts if count > min_nodes)

    if not skeleton_ids:
        return None

    # Find the subset of fully reviewed skeletons
    ts = (
        Treenode.objects.filter(skeleton__in=skeleton_ids)
        .values_list("skeleton", "reviewer_id")
        .annotate(Count("reviewer_id"))
    )

    review_status = defaultdict(partial(defaultdict, int))
    for skid, reviewer_id, count in ts:
        review_status[skid][reviewer_id] = count

    not_fully_reviewed = set()
    for skid, reviewers in review_status.iteritems():
        if -1 in reviewers:
            not_fully_reviewed.add(skid)

    skeleton_ids = skeleton_ids - not_fully_reviewed

    if not skeleton_ids:
        return None

    relations = dict(
        Relation.objects.filter(
            project_id=project_id, relation_name__in=["presynaptic_to", "postsynaptic_to"]
        ).values_list("relation_name", "id")
    )

    # 2. Load each fully reviewed skeleton one at a time
    evaluations = {
        skid: _evaluate_arbor(user_id, skid, tree, relations, max_gap)
        for skid, tree in lazy_load_trees(
            skeleton_ids,
            ("location", "creation_time", "user_id", "reviewer_id", "review_time", "editor_id", "edition_time"),
        )
    }

    # 3. Extract evaluations for the user_id over time
    # Each evaluation contains an instance of EpochOps namedtuple, with members:
    # 'review_date_range', 'creation_date_range', 'user_node_counts',
    # 'splits', 'merges', 'appended', 'node_count'

    # The X axis is the last (user) creation date within the review epoch
    # The Y axis is multiple, and includes:
    #  * skeleton_id
    #  * reviewer_id
    #  * time of the last node created by the user_id in skeleton_id
    #  * nodes contributed by the user that were reviewed within the epoch
    #  * number of nodes missed by the user (which were added by the reviewer)
    #  * splits onto the user's nodes
    #  * merges onto the user's nodes
    #  * additions by the reviewer onto nodes of this user (another form of merges)
    #  * total number of presynaptic relations of skeleton_id
    #  * total number of postsynaptic relations of skeleton_id
    #  * number of presynaptic_to relations created by the reviewer within the review period onto treenodes created by user_id
    #  * number of postsynaptic_to relations created by the reviewer within the review period onto treenodes created by user_id
    #  * newer_synapses: number of synapses created by someone else onto treenodes created by user_id, after the creation of the treenode

    d = []

    for skid, arbor_epoch_ops in evaluations.iteritems():
        for epoch_ops in arbor_epoch_ops:
            if 0 == epoch_ops.user_node_counts[user_id]:
                # user did not contribute at all to this chunk
                continue
            appended = epoch_ops.appended[user_id]
            print appended
            d.append(
                {
                    "skeleton_id": skid,
                    "reviewer_id": epoch_ops.reviewer_id,
                    "timepoint": epoch_ops.creation_date_range[user_id]["end"].strftime("%Y-%m-%d"),
                    "n_created_nodes": epoch_ops.user_node_counts[user_id],
                    "n_nodes": epoch_ops.node_count,
                    "n_missed_nodes": sum(appended),
                    "n_splits": epoch_ops.splits[user_id],
                    "n_merges": epoch_ops.merges[user_id] + len(appended),
                    "n_pre": epoch_ops.n_pre,
                    "n_post": epoch_ops.n_post,
                    "reviewer_n_pre": epoch_ops.reviewer_n_pre.get(user_id, 0),
                    "reviewer_n_post": epoch_ops.reviewer_n_post.get(user_id, 0),
                    "newer_pre": epoch_ops.newer_pre_count.get(user_id, 0),
                    "newer_post": epoch_ops.newer_post_count.get(user_id, 0),
                }
            )

    return d
コード例 #2
0
ファイル: user_evaluation.py プロジェクト: catsop/CATMAID
def _evaluate(project_id, user_id, start_date, end_date, max_gap, min_nodes):

    # Obtain neurons that are fully reviewed at the moment
    # and to which the user contributed nodes within the date range.

    # 1. Find out skeleton_ids towards which the user contributed
    #    within the date range
    ts = Treenode.objects.filter(
            user_id=user_id,
            creation_time__range = (start_date, end_date)) \
         .values_list('skeleton') \
         .annotate(Count('skeleton'))

    # Pick only skeletons for which the user contributed at least min_nodes
    skeleton_ids = set(skid for skid, count in ts if count > min_nodes)

    if not skeleton_ids:
        return None

    # Find the subset of fully reviewed (union without evaluated user) skeletons
    review_status = get_review_status(skeleton_ids)

    not_fully_reviewed = set()
    for skid, status in review_status.iteritems():
        if status != 100:
            not_fully_reviewed.add(skid)

    skeleton_ids = skeleton_ids - not_fully_reviewed

    if not skeleton_ids:
        return None

    # Get review information and organize it by skeleton ID and treenode ID
    reviews = defaultdict(lambda: defaultdict(list))
    for r in Review.objects.filter(skeleton_id__in=skeleton_ids):
        reviews[r.skeleton_id][r.treenode_id].append(r)

    # Sort all reviews of all treenodes by review time, most recent first
    for skid, tid_to_rs in reviews.iteritems():
        for tid, rs in tid_to_rs.iteritems():
            rs.sort(key=lambda r: r.review_time)
            rs.reverse()

    relations = dict(Relation.objects.filter(project_id=project_id, relation_name__in=['presynaptic_to', 'postsynaptic_to']).values_list('relation_name', 'id'))

    # 2. Load each fully reviewed skeleton one at a time
    evaluations = {skid: _evaluate_arbor(user_id, skid, tree, reviews[skid], relations, max_gap) \
        for skid, tree in lazy_load_trees(skeleton_ids, ('location', 'creation_time', 'user_id', 'editor_id', 'edition_time'))}

    # 3. Extract evaluations for the user_id over time
    # Each evaluation contains an instance of EpochOps namedtuple, with members:
    # 'review_date_range', 'creation_date_range', 'user_node_counts',
    # 'splits', 'merges', 'appended', 'node_count'

    # The X axis is the last (user) creation date within the review epoch
    # The Y axis is multiple, and includes:
    #  * skeleton_id
    #  * reviewer_id
    #  * time of the last node created by the user_id in skeleton_id
    #  * nodes contributed by the user that were reviewed within the epoch
    #  * number of nodes missed by the user (which were added by the reviewer)
    #  * splits onto the user's nodes
    #  * merges onto the user's nodes
    #  * additions by the reviewer onto nodes of this user (another form of merges)
    #  * total number of presynaptic relations of skeleton_id
    #  * total number of postsynaptic relations of skeleton_id
    #  * number of presynaptic_to relations created by the reviewer within the review period onto treenodes created by user_id
    #  * number of postsynaptic_to relations created by the reviewer within the review period onto treenodes created by user_id
    #  * newer_synapses: number of synapses created by someone else onto treenodes created by user_id, after the creation of the treenode

    d = []

    for skid, arbor_epoch_ops in evaluations.iteritems():
        for epoch_ops in arbor_epoch_ops:
            if 0 == epoch_ops.user_node_counts[user_id]:
                # user did not contribute at all to this chunk
                continue
            appended = epoch_ops.appended[user_id]
            print appended
            d.append({'skeleton_id': skid,
                      'reviewer_id': epoch_ops.reviewer_id,
                      'timepoint': epoch_ops.creation_date_range[user_id]['end'].strftime('%Y-%m-%d'),
                      'n_created_nodes': epoch_ops.user_node_counts[user_id],
                      'n_nodes': epoch_ops.node_count,
                      'n_missed_nodes': sum(appended),
                      'n_splits': epoch_ops.splits[user_id],
                      'n_merges': epoch_ops.merges[user_id] + len(appended),
                      'n_pre': epoch_ops.n_pre,
                      'n_post': epoch_ops.n_post,
                      'reviewer_n_pre': epoch_ops.reviewer_n_pre.get(user_id, 0),
                      'reviewer_n_post': epoch_ops.reviewer_n_post.get(user_id, 0),
                      'newer_pre': epoch_ops.newer_pre_count.get(user_id, 0),
                      'newer_post': epoch_ops.newer_post_count.get(user_id, 0)})

    return d
コード例 #3
0
ファイル: user_evaluation.py プロジェクト: fferen/CATMAID
def _evaluate(project_id, user_id, start_date, end_date, max_gap, min_nodes):

    # Obtain neurons that are fully reviewed at the moment
    # and to which the user contributed nodes within the date range.

    # 1. Find out skeleton_ids towards which the user contributed
    #    within the date range
    ts = Treenode.objects.filter(
            user_id=user_id,
            creation_time__range = (start_date, end_date)) \
         .values_list('skeleton') \
         .annotate(Count('skeleton'))

    # Pick only skeletons for which the user contributed at least min_nodes
    skeleton_ids = set(skid for skid, count in ts if count > min_nodes)

    if not skeleton_ids:
        return None

    # Find the subset of fully reviewed skeletons
    ts = Treenode.objects.filter(skeleton__in=skeleton_ids) \
         .values_list('skeleton', 'reviewer_id') \
         .annotate(Count('reviewer_id'))

    review_status = defaultdict(partial(defaultdict, int))
    for skid, reviewer_id, count in ts:
        review_status[skid][reviewer_id] = count

    not_fully_reviewed = set()
    for skid, reviewers in review_status.iteritems():
        if -1 in reviewers:
            not_fully_reviewed.add(skid)

    skeleton_ids = skeleton_ids - not_fully_reviewed

    if not skeleton_ids:
        return None

    relations = dict(Relation.objects.filter(project_id=project_id, relation_name__in=['presynaptic_to', 'postsynaptic_to']).values_list('relation_name', 'id'))

    # 2. Load each fully reviewed skeleton one at a time
    evaluations = {skid: _evaluate_arbor(user_id, skid, tree, relations, max_gap) for skid, tree in lazy_load_trees(skeleton_ids, ('location', 'creation_time', 'user_id', 'reviewer_id', 'review_time', 'editor_id', 'edition_time'))}

    # 3. Extract evaluations for the user_id over time
    # Each evaluation contains an instance of EpochOps namedtuple, with members:
    # 'review_date_range', 'creation_date_range', 'user_node_counts',
    # 'splits', 'merges', 'appended', 'node_count'

    # The X axis is the last (user) creation date within the review epoch
    # The Y axis is multiple, and includes:
    #  * skeleton_id
    #  * reviewer_id
    #  * time of the last node created by the user_id in skeleton_id
    #  * nodes contributed by the user that were reviewed within the epoch
    #  * number of nodes missed by the user (which were added by the reviewer)
    #  * splits onto the user's nodes
    #  * merges onto the user's nodes
    #  * additions by the reviewer onto nodes of this user (another form of merges)
    #  * total number of presynaptic relations of skeleton_id
    #  * total number of postsynaptic relations of skeleton_id
    #  * number of presynaptic_to relations created by the reviewer within the review period onto treenodes created by user_id
    #  * number of postsynaptic_to relations created by the reviewer within the review period onto treenodes created by user_id
    #  * newer_synapses: number of synapses created by someone else onto treenodes created by user_id, after the creation of the treenode

    d = []

    for skid, arbor_epoch_ops in evaluations.iteritems():
        for epoch_ops in arbor_epoch_ops:
            if 0 == epoch_ops.user_node_counts[user_id]:
                # user did not contribute at all to this chunk
                continue
            appended = epoch_ops.appended[user_id]
            print appended
            d.append({'skeleton_id': skid,
                      'reviewer_id': epoch_ops.reviewer_id,
                      'timepoint': epoch_ops.creation_date_range[user_id]['end'].strftime('%Y-%m-%d'),
                      'n_created_nodes': epoch_ops.user_node_counts[user_id],
                      'n_nodes': epoch_ops.node_count,
                      'n_missed_nodes': sum(appended),
                      'n_splits': epoch_ops.splits[user_id],
                      'n_merges': epoch_ops.merges[user_id] + len(appended),
                      'n_pre': epoch_ops.n_pre,
                      'n_post': epoch_ops.n_post,
                      'reviewer_n_pre': epoch_ops.reviewer_n_pre.get(user_id, 0),
                      'reviewer_n_post': epoch_ops.reviewer_n_post.get(user_id, 0),
                      'newer_pre': epoch_ops.newer_pre_count.get(user_id, 0),
                      'newer_post': epoch_ops.newer_post_count.get(user_id, 0)})

    return d