def filecrawl(path=None):
    source_name = request.args.get("source")
    sources = (
        filter(
            lambda x: x.name == urllib.parse.unquote_plus(source_name),
            item_sources,
        )
        if source_name
        else item_sources
    )
    # loop all data sources --
    rendered_sources = [
        render_item_source(item_source, path) for item_source in sources
    ]  # will we need to make this async in the page???
    rendered_html = "\n".join(rendered_sources)

    resp = make_response(
        render_template(
            "filecrawl.html",
            extra_scripts=get_extra_scripts(),
            rendered_html=rendered_html,
            path=path,
        )
    )
    set_no_cache(resp)
    return resp
 def test_GIVEN_bare_string_THEN_throws_Exception(self):
     with self.assertRaises(Exception) as context:
         self.assertEqual(get_extra_scripts(), [])
     self.assertEqual(
         'Error parsing GATEWAY_EXTRA_SCRIPTS, expected JSON array e.g. ["https://example.com/path/to/script.js"]',
         str(context.exception),
     )
def index():
    return render_template(
        "index.html",
        ip=env.ip,
        cellxgene_data=env.cellxgene_data,
        extra_scripts=get_extra_scripts(),
    )
示例#4
0
def do_view(path, source_name=None):
    source = matching_source(source_name)
    match = cache.check_path(source, path)

    if match is None:
        lookup = source.lookup(path)
        if lookup is None:
            raise CellxgeneException(
                f"Could not find item for path {path} in source {source.name}",
                404,
            )
        key = CacheKey.for_lookup(source, lookup)
        print(
            f"view path={path}, source_name={source_name}, dataset={key.file_path}, annotation_file= {key.annotation_file_path}, key={key.descriptor}, source={key.source_name}"
        )
        with entry_lock:
            match = cache.check_entry(key)
            if match is None:
                uascripts = get_extra_scripts()
                match = cache.create_entry(key, uascripts)

    match.timestamp = current_time_stamp()

    if (match.status == CacheEntryStatus.loaded
            or match.status == CacheEntryStatus.loading):
        return match.serve_content(path)
    elif match.status == CacheEntryStatus.error:
        raise ProcessException.from_cache_entry(match)
示例#5
0
def filecrawl():
    entries = recurse_dir(env.cellxgene_data)
    rendered_html = render_entries(entries)
    resp = make_response(
        render_template(
            "filecrawl.html",
            extra_scripts=get_extra_scripts(),
            rendered_html=rendered_html,
        ))
    return set_no_cache(resp)
def handle_invalid_usage(error):

    message = f"{error.http_status} Error : {error.message}"

    return (
        render_template(
           "cellxgene_error.html",
            extra_scripts=get_extra_scripts(),
            message=message,
        ),
        error.http_status,
    )
示例#7
0
def do_filecrawl(path):
    filecrawl_path = os.path.join(env.cellxgene_data, path)
    if not os.path.isdir(filecrawl_path):
        raise CellxgeneException("Path is not directory: " + filecrawl_path,
                                 status.HTTP_400_BAD_REQUEST)
    entries = recurse_dir(filecrawl_path)
    rendered_html = render_entries(entries)
    return render_template(
        "filecrawl.html",
        extra_scripts=get_extra_scripts(),
        rendered_html=rendered_html,
        path=path,
    )
示例#8
0
def index():
    users = [
        name for name in os.listdir(env.cellxgene_data)
        if os.path.isdir(os.path.join(env.cellxgene_data, name))
    ]
    return render_template(
        "index.html",
        ip=env.ip,
        cellxgene_data=env.cellxgene_data,
        extra_scripts=get_extra_scripts(),
        users=users,
        enable_upload=env.enable_upload,
    )
示例#9
0
def filecrawl():
    entries = recurse_dir(env.cellxgene_data)
    rendered_html = render_entries(entries)
    resp = make_response(
        render_template(
            "filecrawl.html",
            extra_scripts=get_extra_scripts(),
            rendered_html=rendered_html,
        ))
    resp.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
    resp.headers["Pragma"] = "no-cache"
    resp.headers["Expires"] = "0"
    resp.headers['Cache-Control'] = 'public, max-age=0'
    return resp
示例#10
0
def do_view(path):
    key = get_key(path)
    print(
        f"view path={path}, dataset={key.dataset}, annotation_file= {key.annotation_file}, key={key.pathpart}"
    )
    with entry_lock:
        match = cache.check_entry(key)
        if match is None:
            uascripts = get_extra_scripts()
            match = cache.create_entry(key, uascripts)

    match.timestamp = current_time_stamp()

    if match.status == "loaded" or match.status == "loading":
        return match.serve_content(path)
    elif match.status == "error":
        raise ProcessException.from_cache_entry(match)
def handle_invalid_process(error):

    message = []

    message.append(error.message)
    message.append(f"{error.http_status} Error.")
    message.append(f"Stdout: {error.stdout}")
    message.append(f"Stderr: {error.stderr}")

    return (
        render_template(
            "process_error.html",
            extra_scripts=get_extra_scripts(),
            message=error.message,
            http_status=error.http_status,
            stdout=error.stdout,
            stderr=error.stderr,
            relaunch_url=error.key.relaunch_url(),
            annotation_file=error.key.annotation_descriptor,
        ),
        error.http_status,
    )
示例#12
0
def filecrawl(path=None):
    source_name = request.args.get("source")
    sources = (filter(
        lambda x: x.name == urllib.parse.unquote_plus(source_name),
        item_sources,
    ) if source_name else item_sources)
    # loop all data sources --
    rendered_sources = [
        render_item_source(item_source, path) for item_source in sources
    ]  # will we need to make this async in the page???
    rendered_html = "\n".join(rendered_sources)

    resp = make_response(
        render_template(
            "filecrawl.html",
            extra_scripts=get_extra_scripts(),
            rendered_html=rendered_html,
            path=path,
        ))
    resp.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
    resp.headers["Pragma"] = "no-cache"
    resp.headers["Expires"] = "0"
    resp.headers["Cache-Control"] = "public, max-age=0"
    return resp
def do_GET_status():
    return render_template(
        "cache_status.html",
        entry_list=cache.entry_list,
        extra_scripts=get_extra_scripts(),
    )
 def test_GIVEN_two_scripts_THEN_returns_two_strings(self):
     self.assertEqual(get_extra_scripts(), ["abc", "def"])
 def test_GIVEN_empty_string_THEN_returns_empty_array(self):
     self.assertEqual(get_extra_scripts(), [])
 def test_GIVEN_two_scripts_THEN_returns_two_strings(self):
     self.assertEqual(get_extra_scripts(), ['abc', 'def'])