def aggregate(project_id, token, href, metadata, environment): project = AggregationAPI(project_id, environment=environment) project.__aggregate__() tarpath = project.__csv_output__(compress=True) response = send_uploading(metadata, token, href) url = response.json()["media"][0]["src"] with open(tarpath, 'rb') as tarball: requests.put(url, headers={'Content-Type': 'application/x-gzip'}, data=tarball) os.remove(tarpath) send_finished(metadata, token, href)
def aggregate(project_id, token, href, metadata, environment): project = AggregationAPI(project_id, environment=environment) project.__aggregate__() tarpath = project.__csv_output__(compress=True) response = send_uploading(metadata, token, href) url = response.json()["media"][0]["src"] with open(tarpath, 'rb') as tarball: requests.put(url, headers={'Content-Type': 'application/x-gzip'}, data=tarball) os.remove(tarpath) send_finished(metadata, token, href)
#!/usr/bin/env python import sys sys.path.append("/home/greg/github/reduction/engine") sys.path.append("/home/ggdhines/PycharmProjects/reduction/engine") __author__ = 'greg' from aggregation_api import AggregationAPI import numpy workflow_id = 6 wildebeest = AggregationAPI(6) aggregations = wildebeest.__aggregate__(workflows = [6],store_values=False) marking_task = wildebeest.workflows[workflow_id][1].keys()[0] tools = wildebeest.workflows[workflow_id][1][marking_task] workflows,versions,instructions,updated_at_timestamps = wildebeest.__get_workflow_details__(workflow_id) tools_labels = instructions[workflow_id][marking_task]["tools"] for j,subject_id in enumerate(aggregations): overall_votes = {int(t_index): [] for t_index in range(len(tools))} for annotation in wildebeest.__get_raw_classifications__(subject_id,workflow_id): tool_votes = {int(t_index): 0 for t_index in range(len(tools))} for task in annotation: if task["task"] == marking_task: for marking in task["value"]: tool_votes[int(marking["tool"])] += 1 for t_index in tool_votes: overall_votes[t_index].append(tool_votes[t_index])