Ejemplo n.º 1
0
def process(specific_info, data, *args):
    inputs = get_inputs(data, specific_info)

    tfidf_worker_id = inputs.get('tfidf_worker_id')
    queue = inputs.get('__read_from_queue')
    connection = args[0].get('connection')
    all_words = {}

    all_data = [] #for bioportal

    if queue:
        queue_values = connection.zrange(queue, 0, -1)
        for queue_raw_data in queue_values:
            queue_data = json_loads(queue_raw_data)
            all_data.append(queue_data) #for bioportal

            tfidf = select_dict_el(queue_data, 'workers_output.%s.tfidf' % tfidf_worker_id)
            for word, value in tfidf:
                if not all_words.get(word):
                    all_words[word] = []
                all_words[word].append(value)

    max_apperance = max([len(values) for (word, values) in all_words.iteritems()])/5
    tfidf_results = [(word, 1.0*sum(values)/len(values)*0.65 + 0.35*min(len(values)/max_apperance, 1)) for (word, values) in all_words.iteritems()]
    tfidf_results.sort(key=lambda tup: -tup[1])

    # Bioportal
    bioportal_worker_id = inputs.get('bioportal_worker_id')
    bioportal_mesh_names_url = inputs.get('bioporta_mesh_names_url')
    mesh_names = json_loads(requests.get(bioportal_mesh_names_url).content)
    bioportal_merged = {}
    for queue_data in all_data:
        bioportal_annotated = select_dict_el(queue_data, 'workers_output.%s.bioportal_annotated' % bioportal_worker_id)
        for mesh_data in bioportal_annotated.get('data'):
            ontology_id = mesh_data.get('ontology_quote_id')
            if not bioportal_merged.get(ontology_id):
                if not mesh_names.get(ontology_id):
                    continue
                bioportal_merged[ontology_id] = {
                    'ontology_quote_id': ontology_id,
                    'matched_terms': [],
                    'total_frequency': 0,
                    'included_in_documents': 0,
                    'name': mesh_names.get(ontology_id)
                }
            bioportal_merged[ontology_id]['total_frequency'] += mesh_data.get('frequency')
            bioportal_merged[ontology_id]['included_in_documents'] += 1
            bioportal_merged[ontology_id]['matched_terms'] = list(set(mesh_data.get('matched_terms')+bioportal_merged[ontology_id]['matched_terms']))
    to_return_bioportal = sorted(
        bioportal_merged.values(), key=lambda k: k['included_in_documents'], reverse=True
    )
    return [{'group_tfidf': tfidf_results, 'bioportal_merged': to_return_bioportal},]
def process(specific_info, data, *args):
    inputs = get_inputs(data, specific_info)
    mongo_connection = args[0].get('mongo_connection')

    data_filter = inputs.get('data_filter', {})
    fields = inputs.get('fields', [])
    names = inputs.get('names', [])
    collection = inputs.get('collection', [])
    target_file = inputs.get('target_file')
    data_selector = dict([(field, 1) for field in fields])

    matrix_id = inputs.get('matrix_id')

    if matrix_id:
        matrix_documents = set()
        matrix = mongo_connection.matrix.find_one({'_id': ObjectId(inputs.get('matrix_id'))})
        matrix_documents |= set([
            matrix_el.get('id') for matrix_el in select_dict_el(matrix, 'matrix_dict.matrix', [])
        ])
        matrix_documents |= set([
            matrix_el.get('id')
            for matrix_el in select_dict_el(matrix, 'matrix_dict.studies_order', [])
        ])
        data_filter.update({"id": {"$in": list(matrix_documents)}})

    if target_file:
        target_file = open(target_file, 'w')
        target_file.write("\t".join(names)+"\n")

    for episte_data in mongo_connection[collection].find(data_filter, data_selector):
        if target_file:
            text = u"\t".join([
                to_unicode(
                    select_dict_el(episte_data, field) or ''
                ).replace('\r\n', ' ').replace('\n', '').replace('\t', ' ') for field in fields
            ]).encode('utf-8')
            target_file.write(text+"\n")
        else:
            yield dict(
                [
                    (names[i], (select_dict_el(episte_data, field) or ''))
                    for i, field in enumerate(fields)
                ]
            )
    if target_file:
        yield {'episte_data_target_file': inputs.get('target_file')}