Beispiel #1
0
    def test_fcs(self):
        """test FCS parsing"""
        # store test files and expectations
        test_files = {
            'normal.fcs': {
                'columns_string':
                'FSC-A,SSC-A,FL1-A,FL2-A,FL3-A,FL4-A,FSC-H,SSC-H,FL1-H,FL2-H,FL3-H,FL4-H,Width,Time',
                'in_body': '<th>FL3-H</th>',
                'in_meta_keys': '#P1MaxUsefulDataChannel',
                'in_meta_values': '491519',
                'has_warnings': False,
            },
            'meta_only.fcs': {
                'in_meta_keys': '_channel_names_',
                'in_meta_values':
                'Compensation Controls_G710 Stained Control.fcs',
                'has_warnings': True,
            },
        }
        for file in test_files:
            in_file = os.path.join(BASE_DIR, 'fcs', file)

            with open(in_file, mode='rb') as fcs:
                body, info = extract_fcs(fcs)
                if body != "":
                    assert test_files[file]['in_body'] in body
                    assert not test_files[file].get('has_warnings')
                else:
                    assert test_files[file]['has_warnings']
                    assert info['warnings']
                assert test_files[file]['in_meta_keys'] in info[
                    'metadata'].keys()
                assert test_files[file]['in_meta_values'] in info[
                    'metadata'].values()
                # when there's a body, check if columns only works
                if test_files[file].get('in_body'):
                    # move to start so we can use the file-like a second time
                    fcs.seek(0)
                    body, info = extract_fcs(fcs, as_html=False)
                    assert body == test_files[file]['columns_string']
Beispiel #2
0
def maybe_get_contents(bucket, key, ext, *, etag, version_id, s3_client, size):
    """get the byte contents of a file if it's a target for deep indexing"""
    if ext.endswith('.gz'):
        compression = 'gz'
        ext = ext[:-len('.gz')]
    else:
        compression = None

    content = ""
    inferred_ext = infer_extensions(key, ext)
    if inferred_ext in CONTENT_INDEX_EXTS:
        if inferred_ext == ".fcs":
            obj = retry_s3("get",
                           bucket,
                           key,
                           size,
                           etag=etag,
                           s3_client=s3_client,
                           version_id=version_id)
            body, info = extract_fcs(get_bytes(obj["Body"], compression),
                                     as_html=False)
            # be smart and just send column names to ES (instead of bloated full schema)
            # if this is not an HTML/catalog preview
            content = trim_to_bytes(f"{body}\n{info}", ELASTIC_LIMIT_BYTES)
        if inferred_ext == ".ipynb":
            content = trim_to_bytes(
                # we have no choice but to fetch the entire notebook, because we
                # are going to parse it
                # warning: huge notebooks could spike memory here
                get_notebook_cells(bucket,
                                   key,
                                   size,
                                   compression,
                                   etag=etag,
                                   s3_client=s3_client,
                                   version_id=version_id),
                ELASTIC_LIMIT_BYTES)
        elif inferred_ext == ".parquet":
            if size >= get_available_memory():
                print(
                    f"{bucket}/{key} too large to deserialize; skipping contents"
                )
                # at least index the key and other stats, but don't overrun memory
                # and fail indexing altogether
                return ""
            obj = retry_s3("get",
                           bucket,
                           key,
                           size,
                           etag=etag,
                           s3_client=s3_client,
                           version_id=version_id)
            body, info = extract_parquet(get_bytes(obj["Body"], compression),
                                         as_html=False,
                                         skip_rows=(inferred_ext
                                                    in SKIP_ROWS_EXTS))
            # be smart and just send column names to ES (instead of bloated full schema)
            # if this is not an HTML/catalog preview
            columns = ','.join(list(info['schema']['names']))
            content = trim_to_bytes(f"{columns}\n{body}", ELASTIC_LIMIT_BYTES)
        else:
            content = get_plain_text(bucket,
                                     key,
                                     size,
                                     compression,
                                     etag=etag,
                                     s3_client=s3_client,
                                     version_id=version_id)

    return content
Beispiel #3
0
def lambda_handler(request):
    """
    dynamically handle preview requests for bytes in S3
    caller must specify input_type (since there may be no file extension)

    Returns:
        JSON response
    """
    url = request.args['url']
    input_type = request.args.get('input')
    compression = request.args.get('compression')
    separator = request.args.get('sep') or ','
    exclude_output = request.args.get('exclude_output') == 'true'
    try:
        max_bytes = int(request.args.get('max_bytes', CATALOG_LIMIT_BYTES))
    except ValueError as error:
        return make_json_response(400, {
            'title': 'Unexpected max_bytes= value',
            'detail': str(error)
        })

    parsed_url = urlparse(url, allow_fragments=False)
    if not (parsed_url.scheme == 'https'
            and parsed_url.netloc.endswith(S3_DOMAIN_SUFFIX)
            and parsed_url.username is None and parsed_url.password is None):
        return make_json_response(
            400, {'title': 'Invalid url=. Expected S3 virtual-host URL.'})

    try:
        line_count = _str_to_line_count(
            request.args.get('line_count', str(CATALOG_LIMIT_LINES)))
    except ValueError as error:
        # format https://jsonapi.org/format/1.1/#error-objects
        return make_json_response(400, {
            'title': 'Unexpected line_count= value',
            'detail': str(error)
        })

    # stream=True saves memory almost equal to file size
    resp = requests.get(url, stream=True)
    if resp.ok:
        content_iter = resp.iter_content(CHUNK)
        if input_type == 'csv':
            html, info = extract_csv(
                get_preview_lines(content_iter, compression, line_count,
                                  max_bytes), separator)
        elif input_type == 'excel':
            html, info = extract_excel(get_bytes(content_iter, compression))
        elif input_type == 'fcs':
            html, info = extract_fcs(get_bytes(content_iter, compression))
        elif input_type == 'ipynb':
            html, info = extract_ipynb(get_bytes(content_iter, compression),
                                       exclude_output)
        elif input_type == 'parquet':
            html, info = extract_parquet(get_bytes(content_iter, compression))
        elif input_type == 'vcf':
            html, info = extract_vcf(
                get_preview_lines(content_iter, compression, line_count,
                                  max_bytes))
        elif input_type in TEXT_TYPES:
            html, info = extract_txt(
                get_preview_lines(content_iter, compression, line_count,
                                  max_bytes))
        else:
            assert False, f'unexpected input_type: {input_type}'

        assert isinstance(html, str), 'expected html parameter as string'
        assert isinstance(info, dict), 'expected info metadata to be a dict'

        ret_val = {
            'info': info,
            'html': html,
        }
    else:
        ret_val = {
            'error': resp.reason,
            'text': resp.text,
        }

    return make_json_response(resp.status_code, ret_val)
Beispiel #4
0
def maybe_get_contents(bucket, key, ext, *, etag, version_id, s3_client, size):
    """get the byte contents of a file if it's a target for deep indexing"""
    logger_ = get_quilt_logger()

    if ext.endswith('.gz'):
        compression = 'gz'
        ext = ext[:-len('.gz')]
    else:
        compression = None
    logger_.debug(
        "Entering maybe_get_contents (could run out of mem.) %s %s %s", bucket, key, version_id
    )
    content = ""
    inferred_ext = infer_extensions(key, ext)
    if inferred_ext in get_content_index_extensions(bucket_name=bucket):
        def _get_obj():
            return retry_s3(
                "get",
                bucket,
                key,
                size,
                etag=etag,
                s3_client=s3_client,
                version_id=version_id,
            )

        if inferred_ext == ".fcs":
            obj = _get_obj()
            body, info = extract_fcs(get_bytes(obj["Body"], compression), as_html=False)
            # be smart and just send column names to ES (instead of bloated full schema)
            # if this is not an HTML/catalog preview
            content = trim_to_bytes(f"{body}\n{info}", get_content_index_bytes(bucket_name=bucket))
        elif inferred_ext == ".ipynb":
            content = trim_to_bytes(
                # we have no choice but to fetch the entire notebook, because we
                # are going to parse it
                # warning: huge notebooks could spike memory here
                get_notebook_cells(
                    bucket,
                    key,
                    size,
                    compression,
                    etag=etag,
                    s3_client=s3_client,
                    version_id=version_id
                ),
                get_content_index_bytes(bucket_name=bucket),
            )
        elif inferred_ext == ".parquet":
            if size >= get_available_memory():
                print(f"{bucket}/{key} too large to deserialize; skipping contents")
                # at least index the key and other stats, but don't overrun memory
                # and fail indexing altogether
                return ""
            obj = _get_obj()
            body, info = extract_parquet(
                get_bytes(obj["Body"], compression),
                as_html=False,
                skip_rows=(inferred_ext in SKIP_ROWS_EXTS),
                max_bytes=get_content_index_bytes(bucket_name=bucket),
            )
            # be smart and just send column names to ES (instead of bloated full schema)
            # if this is not an HTML/catalog preview
            columns = ','.join(list(info['schema']['names']))
            content = trim_to_bytes(f"{columns}\n{body}", get_content_index_bytes(bucket_name=bucket))
        elif inferred_ext == ".pdf":
            obj = _get_obj()
            content = trim_to_bytes(
                extract_pdf(get_bytes(obj["Body"], compression)),
                get_content_index_bytes(bucket_name=bucket),
            )
        elif inferred_ext in (".xls", ".xlsx"):
            obj = _get_obj()
            body, _ = extract_excel(get_bytes(obj["Body"], compression), as_html=False)
            content = trim_to_bytes(
                body,
                get_content_index_bytes(bucket_name=bucket),
            )
        elif inferred_ext == ".pptx":
            obj = _get_obj()
            content = extract_pptx(get_bytes(obj["Body"], compression), get_content_index_bytes(bucket_name=bucket))
        else:
            content = get_plain_text(
                bucket,
                key,
                size,
                compression,
                etag=etag,
                s3_client=s3_client,
                version_id=version_id
            )

    return content