예제 #1
0
def process_method(method, properties, body) -> dict:
    metadata = json.loads(body.decode('utf-8'))
    job_id = metadata['job_id']
    app_logger.info('New Job Queued: {}'.format(job_id))
    meta = read_data_from_s3(metadata['file_meta'], s3_bucket_name, index_column_first=False)
    counts = read_data_from_s3(metadata['file_counts'], s3_bucket_name, index_column_first=True)

    subsampler = Subsampler(bool(metadata['log']),
                            int(metadata['num_pc']),
                            int(metadata['num_cells']) if metadata.get('num_cells', False) else None
                            ) if metadata.get('subsampling', False) else None

    database_version = metadata.get('database_version', 'latest')

    if database_version not in list_local_versions() + ['latest']:
        database_version = 'latest'

    app = cpdb_app.create_app(verbose=False, database_file=find_database_for(database_version))

    if metadata['iterations']:
        response = statistical_analysis(app, meta, counts, job_id, metadata, subsampler)
    else:
        response = non_statistical_analysis(app, meta, counts, job_id, metadata, subsampler)

    return response
예제 #2
0
def process_plot(method, properties, body) -> dict:
    metadata = json.loads(body.decode('utf-8'))
    job_id = metadata['job_id']
    app_logger.info('New Plot Queued: {}'.format(job_id))

    plot_type = metadata.get('type', None)

    if plot_type == 'dot_plot':
        return dot_plot_results(metadata.get('file_means'),
                                metadata.get('file_pvalues'),
                                metadata.get('file_rows', None),
                                metadata.get('file_columns', None),
                                job_id
                                )

    if plot_type == 'heatmaps_plot':
        return heatmaps_plot_results(metadata.get('file_meta'),
                                     metadata.get('file_pvalues'),
                                     job_id
                                     )

    return {
        'job_id': job_id,
        'success': False,
        'error': {
            'id': 'UnknownPlotType',
            'message': 'Given plot type does not exist: {}'.format(plot_type)
        }
    }
예제 #3
0
        def wrapper(namefile='', data_path=''):
            app_logger.info('Collecting {}'.format(method_name))
            if not namefile:
                namefile = '{}.csv'.format(method_name)

            if not data_path:
                data_path = data_dir

            data = pd.read_csv('{}/{}'.format(data_path, namefile))

            getattr(cellphonedb_app.cellphonedb.collect, method_name)(data)
예제 #4
0
def process_job(method, properties, body) -> dict:
    metadata = json.loads(body.decode('utf-8'))
    job_id = metadata['job_id']
    app_logger.info('New Job Queued: {}'.format(job_id))
    meta = read_data_from_s3(metadata['file_meta'], s3_bucket_name)
    counts = read_data_from_s3(metadata['file_counts'], s3_bucket_name)
    counts = counts.astype(dtype=pd.np.float64, copy=False)

    if metadata['iterations']:
        response = statistical_analysis(meta, counts, job_id, metadata)
    else:
        response = non_statistical_analysis(meta, counts, job_id, metadata)

    return response
        def wrapper(namefile='', data_path=''):
            app_logger.info('Collecting {}'.format(method_name))
            if not namefile:
                namefile = '{}_input.csv'.format(method_name)

            if not data_path:
                data_path = data_dir

            data = utils.read_data_table_from_file('{}/{}'.format(
                data_path, namefile))

            if self.database_file:
                getattr(
                    create_app(True, self.database_file, True).collect,
                    method_name)(data)
            else:
                getattr(cellphonedb_app.cellphonedb.collect, method_name)(data)
def process_job(method, properties, body) -> dict:
    metadata = json.loads(body.decode('utf-8'))
    job_id = metadata['job_id']
    app_logger.info('New Job Queued: {}'.format(job_id))
    meta_raw = read_data_from_s3(metadata['file_meta'], s3_bucket_name)
    try:
        meta = pd.DataFrame(index=meta_raw.index)
        meta['cell_type'] = meta_raw.iloc[:, 0]

    except:
        raise ParseMetaException

    counts = read_data_from_s3(metadata['file_counts'], s3_bucket_name)

    if metadata['iterations']:
        response = statistical_analysis(meta, counts, job_id, metadata)
    else:
        response = non_statistical_analysis(meta, counts, job_id, metadata)

    return response
예제 #7
0
jobs_runned = 0

while jobs_runned < 3:
    job = channel.basic_get(queue=jobs_queue_name, no_ack=True)

    if all(job):
        try:
            job_response = process_job(*job)
            # TODO: Find more elegant solution
            connection = create_rabbit_connection()
            channel = connection.channel()
            channel.basic_qos(prefetch_count=1)

            channel.basic_publish(exchange='', routing_key=result_queue_name, body=json.dumps(job_response))
            app_logger.info('JOB %s PROCESSED' % job_response['job_id'])
        except Exception as e:
            error_response = {
                'job_id': json.loads(job[2].decode('utf-8'))['job_id'],
                'success': False,
                'error': {
                    'id': 'unknown_error',
                    'message': ''
                }
            }
            print(traceback.print_exc(file=sys.stdout))
            app_logger.error('[-] ERROR DURING PROCESSING JOB %s' % error_response['job_id'])
            if connection.is_closed:
                connection = create_rabbit_connection()
                channel = connection.channel()
                channel.basic_qos(prefetch_count=1)
예제 #8
0
    def __getattribute__(self, name):
        method = object.__getattribute__(self, name)
        if hasattr(method, '__call__'):
            app_logger.info('Launching Method {}'.format(name))

        return method
예제 #9
0
def signal_handler(sig, frame):
    app_logger.info(
        '{} signal received. No more jobs will be consumed.'.format(sig))
    global consume_more_jobs
    consume_more_jobs = False