Exemple #1
0
def gn_gen(mode):
    os.environ["DENO_BUILD_MODE"] = mode

    # Rather than using gn gen --args we write directly to the args.gn file.
    # This is to avoid quoting/escaping complications when passing overrides as
    # command-line arguments.
    args_filename = os.path.join(build_path(), "args.gn")

    # Check if args.gn exists, and if it was auto-generated or handcrafted.
    existing_gn_args, hand_edited = read_gn_args(args_filename)

    # If args.gn wasn't handcrafted, regenerate it.
    if hand_edited:
        print "%s: Using gn options from hand edited '%s'." % (mode,
                                                               args_filename)
        gn_args = existing_gn_args
    else:
        print "%s: Writing gn options to '%s'." % (mode, args_filename)
        gn_args = generate_gn_args(mode)
        if gn_args != existing_gn_args:
            write_gn_args(args_filename, gn_args)

    for line in gn_args:
        print "  " + line

    run([third_party.gn_path, "gen", build_path()],
        env=third_party.google_env())
Exemple #2
0
def ucs(source, target, graph):
    """ Uniform-cost graph search """
    queue = PriorityQueue() # fringe
    queue.put((0, source))

    parent = {source:None}
    visited = {}

    while not queue.empty():
        (d, v_in) = queue.get()

        if v_in not in visited or d < visited[v_in]:

            if v_in == target:
                return (d, build_path(parent, target))

            for v_out in graph.adj(v_in):
                cost = graph.distance(v_in, v_out) + d
                if v_out not in visited:
                    queue.put((cost, v_out))
                    parent[v_out] = v_in

            visited[v_in] = cost

    return None
def iterative_deepening(source, target, graph):
    """ 
        Iterative deepening depth-first graph search algorithm
    """
    depth = 0

    while True:
        end = True
        fringe = [(source, 0)]
        parent = {source:None}
        
        while fringe:
            (v_in, curr_depth) = fringe.pop()

            if v_in == target:
                return build_path(parent, target)

            curr_depth += 1
            if curr_depth > depth:
                if graph.adj(v_in):
                    end = False
                continue

            for v_out in graph.adj(v_in):
                if v_out not in parent:
                    fringe.append((v_out, curr_depth))
                    parent[v_out] = v_in

        if end: return None

        depth += 1
Exemple #4
0
def astar(source, target, graph, heuristic=null_heuristic):
    """ A* algorithm """
    queue = PriorityQueue()
    queue.put((0, source))
    
    parent = {source:None}
    visited = {}

    while not queue.empty():
        (d, v_in) = queue.get()

        if v_in not in visited or d < visited[v_in]:

            if v_in == target:
                return (d, build_path(parent, target))

            for v_out in graph.adj(v_in):
                cost = graph.distance(v_in, v_out) + d
                fn = cost + heuristic(v_out, graph) # only diference in retion to UCS
                if v_out not in visited:
                    queue.put((fn, v_out))
                    parent[v_out] = v_in

            visited[v_in] = cost

    return None
Exemple #5
0
def clear_path(graph, reg, loc1, loc2):
    '''
    Check that the path from loc1 to loc2 is clear.
    We have to check that there is no side effect between the two location
    points. We also have to check that the variable `reg` is not redefined
    along one of the possible pathes from loc1 to loc2.
    '''
    node1 = graph.get_node_from_loc(loc1)
    node2 = graph.get_node_from_loc(loc2)
    # If both instructions are in the same node, we only have to check that the
    # path is clear inside the node
    if node1 is node2:
        return clear_path_node(graph, reg, loc1, loc2)

    # If instructions are in different nodes, we also have to check the nodes
    # in the path between the two locations.
    # We try to return as early as possible to avoid unnecessary computations.
    if not clear_path_node(graph, reg, loc1, node1.ins_range[1]):
        return False
    path = build_path(graph, node1, node2)
    for node in path:
        locs = node.ins_range
        if not clear_path_node(graph, reg, locs[0], max(loc2, locs[1])):
            return False
    return True
Exemple #6
0
def main(argv):
    enable_ansi_colors()

    third_party.fix_symlinks()

    ninja_args = argv[1:]
    if not "-C" in ninja_args:
        if not os.path.isdir(build_path()):
            print("Build directory '%s' does not exist." % build_path(),
                  "Run tools/setup.py")
            sys.exit(1)
        ninja_args = ["-C", build_path()] + ninja_args

    run([third_party.ninja_path] + ninja_args,
        env=third_party.google_env(),
        quiet=True)
def depth_limited_search(source, target, graph, depth):
    """ 
        Depth-limited graph search.
        The minimum depth parameter is 0, corresponding 
        to the root node (source).
    """
    fringe = [(source, 0)]
    parent = {source:None}

    while fringe:
        (v_in, curr_depth) = fringe.pop()

        if v_in == target:
            return build_path(parent, target)

        curr_depth += 1

        if curr_depth > depth:
            continue

        for v_out in graph.adj(v_in):
            if v_out not in parent:
                fringe.append((v_out, curr_depth))
                parent[v_out] = v_in
                
    return None
Exemple #8
0
def get_db_path(base_path, language='en_US'):
    """Get gameStats database path."""

    return os.path.join(
        util.build_path(base_path),
        "deploy/assets/data/gameStats",
        'gameStats_%s.sqlite' % language,
    )
Exemple #9
0
 def __init__(self, relative_csv_path=""):
     csv.field_size_limit(sys.maxsize)
     self.image_paths = []
     self.saved_path = os.getcwd()
     self.can_download = False
     self.allowed_image_types = ["jpg", "png", "jpeg", "gif"]
     self.csv_file_path = util.build_path(relative_csv_path)
     self.temp_dir_name = self.csv_file_path.split(".")[0]
     self.completed = False
Exemple #10
0
    def publish(self, context = None):
        """Writes out index file after building appropriate context"""

        if context is None:
            context = self.context

        self.__write_out(util.build_path(self.config['outdir'],
                                         self.context['permalink']))
        return True
Exemple #11
0
def get_fontconfig_path(base_path, language='en_US'):
    return os.path.join(
        util.build_path(
            base_path,
            'lol_game_client_' + language,
            'managedfiles',
        ),
        "data/menu/fontconfig_%s.txt" % language,
    )
    def process_efetch_result(raw_result):
        """
        Metadata for each article should be stored in its own XML file.
        """
        root = ET.fromstring(raw_result.encode('utf-8'))
        for article in root.findall('PubmedArticle'):
            newTree = ET.ElementTree(element=copy.deepcopy(article))
            pmid = article.find('MedlineCitation/PMID').text

            treePath = build_path(term, year, '%s.xml' % pmid, OPATH, make=True)
            newTree.write(treePath, encoding='utf-8')
Exemple #13
0
    def publish(self, context = None):
        """Writes out index file after building appropriate context"""

        if context is None:
            context = self.context

        tagsdir = os.path.join(self.config['outdir'], self.tagprefix)
        if not os.path.exists(tagsdir):
            os.makedirs(tagsdir)
        self.__write_out(util.build_path(self.config['outdir'],
                                         os.path.join(self.tagprefix, self.context['permalink'])))
        return True
Exemple #14
0
    def publish(self, context = None):
        """Sets published and pubdate, updates header and context
        appropriately, and renders the page"""
        
        if context is None:
            context = self.context

        if not context['pubdate']:
            context['pubdate'] = datetime.now().strftime(util.time_isofmt)
        context = self.__update_context(context)
        self.__update_header(context)
        self.__write_out(util.build_path(self.config['outdir'],
                                         self.context['permalink']))
        return True
Exemple #15
0
def dfs(source, target, graph):
    """ Depth-first graph search """
    fringe = [source]
    parent = {source:None}

    while fringe:
        x = fringe.pop()

        if x == target:
            return build_path(parent, target)

        for v in graph.adj(x):
            if v not in parent:
                fringe.append(v)
                parent[v] = x
    return None
Exemple #16
0
def main(argv):
    if len(argv) == 2:
        build_dir = sys.argv[1]
    elif len(argv) == 1:
        build_dir = build_path()
    else:
        print "Usage: tools/benchmark.py [build_dir]"
        sys.exit(1)

    sha1 = run_output(["git", "rev-parse", "HEAD"],
                      exit_on_fail=True).out.strip()
    http_server.spawn()

    deno_exe = os.path.join(build_dir, "deno")

    os.chdir(root_path)

    new_data = {
        "created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
        "sha1": sha1,
    }

    # TODO(ry) The "benchmark" benchmark should actually be called "exec_time".
    # When this is changed, the historical data in gh-pages branch needs to be
    # changed too.
    new_data["benchmark"] = run_exec_time(deno_exe, build_dir)

    new_data["binary_size"] = get_binary_sizes(build_dir)
    new_data["bundle_size"] = bundle_benchmark(deno_exe)

    # Cannot run throughput benchmark on windows because they don't have nc or
    # pipe.
    if os.name != 'nt':
        new_data["throughput"] = run_throughput(deno_exe)
        run_http(build_dir, new_data)

    if "linux" in sys.platform:
        run_strace_benchmarks(deno_exe, new_data)
        new_data["max_memory"] = run_max_mem_benchmark(deno_exe)

    print "===== <BENCHMARK RESULTS>"
    print json.dumps(new_data, indent=2)
    print "===== </BENCHMARK RESULTS>"

    write_json(os.path.join(build_dir, "bench.json"), new_data)
Exemple #17
0
def main(argv):
    if len(argv) == 2:
        build_dir = sys.argv[1]
    elif len(argv) == 1:
        build_dir = build_path()
    else:
        print "Usage: tools/test.py [build_dir]"
        sys.exit(1)

    deno_dir = os.path.join(build_dir, ".deno_test")
    if os.path.isdir(deno_dir):
        rmtree(deno_dir)
    os.environ["DENO_DIR"] = deno_dir

    enable_ansi_colors()

    http_server.spawn()

    deno_exe = os.path.join(build_dir, "deno" + executable_suffix)
    check_exists(deno_exe)
    deno_ns_exe = os.path.join(build_dir, "deno_ns" + executable_suffix)
    check_exists(deno_ns_exe)

    # Internal tools testing
    setup_test()
    util_test()
    benchmark_test(build_dir, deno_exe)

    test_cc = os.path.join(build_dir, "test_cc" + executable_suffix)
    check_exists(test_cc)
    run([test_cc])

    test_rs = os.path.join(build_dir, "test_rs" + executable_suffix)
    check_exists(test_rs)
    run([test_rs])

    unit_tests(deno_exe)

    check_output_test(deno_exe)
    check_output_test(deno_ns_exe)

    rmtree(deno_dir)

    deno_dir_test(deno_exe, deno_dir)
Exemple #18
0
def parse_test_args(argv=None):
    if argv is None:
        argv = sys.argv[1:]

    args = TestArgParser.parse_args(argv)

    if args.executable and args.release:
        raise argparse.ArgumentError(
            None, "Path to executable is inferred from "
            "--release, cannot provide both.")

    if not args.build_dir:
        args.build_dir = build_path()

    if not args.executable:
        args.executable = os.path.join(args.build_dir,
                                       "deno" + executable_suffix)

    if not os.path.isfile(args.executable):
        raise argparse.ArgumentError(
            None, "deno executable not found at {}".format(args.executable))

    return args
Exemple #19
0
def main(argv):
    if len(argv) == 2:
        build_dir = sys.argv[1]
    elif len(argv) == 1:
        build_dir = build_path()
    else:
        print "Usage: tools/benchmark.py [build_dir]"
        sys.exit(1)

    deno_path = os.path.join(build_dir, "deno")
    benchmark_file = os.path.join(build_dir, "benchmark.json")

    os.chdir(root_path)
    import_data_from_gh_pages()
    # TODO: Use hyperfine in //third_party
    run(["hyperfine", "--export-json", benchmark_file, "--warmup", "3"] +
        [deno_path + " " + " ".join(args) for [_, args] in benchmarks])
    all_data = read_json(data_file)
    benchmark_data = read_json(benchmark_file)
    sha1 = run_output(["git", "rev-parse", "HEAD"]).strip()
    new_data = {
        "created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
        "sha1": sha1,
        "binary_size": os.path.getsize(deno_path),
        "benchmark": {}
    }
    for [[name, _], data] in zip(benchmarks, benchmark_data["results"]):
        new_data["benchmark"][name] = {
            "mean": data["mean"],
            "stddev": data["stddev"],
            "user": data["user"],
            "system": data["system"],
            "min": data["min"],
            "max": data["max"]
        }
    all_data.append(new_data)
    write_json(data_file, all_data)
    with open(MESH_TERMS, 'r') as f:
        terms = [line.strip() for line in f.readlines() if len(line) > 1]

    for term in terms:
        for year in xrange(START_YEAR, END_YEAR):
            sys.stdout.flush()
            ty_dirpath = os.path.join(DATAPATH, term, str(year))
            if not os.path.exists(ty_dirpath):
                print '\r skipping', term, year, ' (no PubMed response)',
                continue

            for fname in os.listdir(ty_dirpath):
                if not fname.endswith('xml'):
                    continue

                opath = build_path(term, year, fname.replace('.xml', '.txt'), OPATH, make=True)
                # if os.path.exists(opath):    # Already done.
                #     print '\r skipping', term, year, ' (already done)',
                #     continue

                r = ET.parse(build_path(term, year, fname, DATAPATH, make=False)).getroot()
                aparts = r.findall('.//AbstractText')

                if len(aparts) == 0:
                    continue

                abstext = u'\n\n'.join([apart.text for apart in aparts if apart.text]).strip()
                if len(abstext) < 2:
                    continue

                with codecs.open(opath, 'w', encoding="utf-8") as f:
Exemple #21
0
def main():
    deno_exe = os.path.join(build_path(), "deno" + executable_suffix)
    repl_tests(deno_exe)
Exemple #22
0
def main(argv):
    if len(argv) == 2:
        build_dir = sys.argv[1]
    elif len(argv) == 1:
        build_dir = build_path()
    else:
        print "Usage: tools/benchmark.py [build_dir]"
        sys.exit(1)

    http_server.spawn()

    deno_path = os.path.join(build_dir, "deno")
    benchmark_file = os.path.join(build_dir, "benchmark.json")

    os.chdir(root_path)
    import_data_from_gh_pages()

    hyperfine = prebuilt.load_hyperfine()

    run([
        hyperfine, "--ignore-failure", "--export-json", benchmark_file,
        "--warmup", "3"
    ] + [
        deno_path + " " + " ".join(args) for [_, args] in exec_time_benchmarks
    ])
    all_data = read_json(all_data_file)
    benchmark_data = read_json(benchmark_file)
    sha1 = run_output(["git", "rev-parse", "HEAD"]).strip()
    new_data = {
        "created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
        "sha1": sha1,
        "binary_size": {},
        "thread_count": {},
        "syscall_count": {},
        "benchmark": {}
    }
    for [[name, _], data] in zip(exec_time_benchmarks,
                                 benchmark_data["results"]):
        new_data["benchmark"][name] = {
            "mean": data["mean"],
            "stddev": data["stddev"],
            "user": data["user"],
            "system": data["system"],
            "min": data["min"],
            "max": data["max"]
        }

    new_data["binary_size"] = get_binary_sizes(build_dir)
    # Cannot run throughput benchmark on windows because they don't have nc or
    # pipe.
    if os.name != 'nt':
        hyper_hello_path = os.path.join(build_dir, "hyper_hello")
        core_http_bench_exe = os.path.join(build_dir, "deno_core_http_bench")
        new_data["throughput"] = run_throughput(deno_path)
        stats = http_benchmark(deno_path, hyper_hello_path,
                               core_http_bench_exe)
        new_data["req_per_sec"] = {
            k: v["req_per_sec"]
            for k, v in stats.items()
        }
        new_data["max_latency"] = {
            k: v["max_latency"]
            for k, v in stats.items()
        }
    if "linux" in sys.platform:
        # Thread count test, only on linux
        new_data["thread_count"] = run_thread_count_benchmark(deno_path)
        new_data["syscall_count"] = run_syscall_count_benchmark(deno_path)

    all_data.append(new_data)
    write_json(all_data_file, all_data)
    write_json(recent_data_file, all_data[-20:])
Exemple #23
0
def main():
    print "Permissions prompt tests"
    deno_exe = os.path.join(build_path(), "deno" + executable_suffix)
    complex_permissions_test(deno_exe)
Exemple #24
0
#!/usr/bin/env python
# Copyright 2018 the Deno authors. All rights reserved. MIT license.
from __future__ import print_function
import os
import sys
import third_party
from util import build_path, run

third_party.fix_symlinks()

ninja_args = sys.argv[1:]
if not "-C" in ninja_args:
    if not os.path.isdir(build_path()):
        print("Build directory '%s' does not exist." % build_path(),
              "Run tools/setup.py")
        sys.exit(1)
    ninja_args = ["-C", build_path()] + ninja_args

run([third_party.ninja_path] + ninja_args,
    env=third_party.google_env(),
    quiet=True)
    with open(MESH_TERMS, 'r') as f:
        terms = [line.strip() for line in f.readlines() if len(line) > 1]

    # We need this in here, since we want to use names from this namespace.
    def process_efetch_result(raw_result):
        """
        Metadata for each article should be stored in its own XML file.
        """
        root = ET.fromstring(raw_result.encode('utf-8'))
        for article in root.findall('PubmedArticle'):
            newTree = ET.ElementTree(element=copy.deepcopy(article))
            pmid = article.find('MedlineCitation/PMID').text

            treePath = build_path(term, year, '%s.xml' % pmid, OPATH, make=True)
            newTree.write(treePath, encoding='utf-8')

    for term in terms:
        for year in xrange(START_YEAR, END_YEAR):
            # NCBI permits no more than 3 requests per second.
            time.sleep(0.5)
            sys.stdout.flush()

            df = pd.read_csv(build_path(term, year, 'sample.csv', DATAPATH))
            print '\rterm:', term, 'year:', year,
            for i in xrange(0, df.size, 200):
                existing = os.listdir(build_path(term, year, '%s.xml' % pmid, OPATH, make=False))
                pmids = set(df.PMID[i:i+200])
                pmids = list(pmids )
                efetch(id=pmids, db='pubmed', rettype='xml',
                       handler=process_efetch_result)
Exemple #26
0
    # Check if ccache is in the path, and if so we cc_wrapper.
    ccache_path = distutils.spawn.find_executable("ccache")
    if ccache_path:
        out += [r'cc_wrapper="%s"' % ccache_path]

    print "DENO_BUILD_ARGS:", out

    return out


# gn gen.
for mode in ["release", "debug"]:
    os.environ["DENO_BUILD_MODE"] = mode

    gn_args = get_gn_args()

    # mkdir $build_path(). We do this so we can write args.gn before running gn gen.
    if not os.path.isdir(build_path()):
        os.makedirs(build_path())

    # Rather than using gn gen --args we manually write the args.gn override file.
    # This is to avoid quoting/escaping complications when passing overrides as
    # command-line arguments.
    args_filename = os.path.join(build_path(), "args.gn")
    if not os.path.exists(args_filename) or gn_args:
        with open(args_filename, "w+") as f:
            f.write("\n".join(gn_args) + "\n")

    run([third_party.gn_path, "gen", build_path()],
        env=third_party.google_env())
            if not os.path.exists(ty_dirpath):
                # Some term-year combinations yielded no records.
                print '\r skipping', term, year, ' (no PubMed response)',
                continue

            for fname in os.listdir(ty_dirpath):
                tried += 1.    # For monitoring only.

                if not fname.endswith('xml'):    # Skip hidden/unrelated files.
                    continue

                pmid = fname.split('.')[0]

                # PubMed XML records are already on disk, separated into
                #  separate files for each record.
                rec_path = build_path(term, year, fname, DATAPATH, make=False)
                r = ET.parse(rec_path).getroot()

                # Only around 20% of records actually have grant information.
                #  This improves as we move forward to more recent publications.
                grantlist = r.find('.//GrantList')
                if grantlist is None:
                    continue

                # There can be several grants per publication.
                for grant in grantlist.findall('.//Grant'):
                    # Grant records vary in their level of completeness.
                    grant_id = getattr(grant.find('.//GrantID'), 'text', None)
                    acronym = getattr(grant.find('.//Acronym'), 'text', None)
                    agency = getattr(grant.find('.//Agency'), 'text', None)
                    country = getattr(grant.find('.//Country'), 'text', None)
Exemple #28
0
# Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
import os
import sys
import third_party
from util import run_output, build_path

out_filename = sys.argv[1]

args_list = run_output([
    third_party.gn_path, "args",
    build_path(), "--list", "--short", "--overrides-only"
],
                       quiet=True,
                       env=third_party.google_env(),
                       exit_on_fail=True).out

with open(out_filename, "w") as f:
    f.write(args_list)
Exemple #29
0
        assertEqual(code, 0)

    def test_exit_command(self):
        out, err, code = self.input(".exit", "'ignored'", exit=False)
        assertEqual(out, '')
        assertEqual(err, '')
        assertEqual(code, 0)

    def run(self):
        print('repl_test.py')
        test_names = [name for name in dir(self) if name.startswith("test_")]
        for t in test_names:
            self.__getattribute__(t)()
            sys.stdout.write(".")
            sys.stdout.flush()
        print(' {}\n'.format(green_ok()))


def assertEqual(left, right):
    if left != right:
        raise AssertionError("{} != {}".format(repr(left), repr(right)))


def repl_tests(deno_exe):
    Repl(deno_exe).run()


if __name__ == "__main__":
    deno_exe = os.path.join(build_path(), "deno" + executable_suffix)
    repl_tests(deno_exe)
Exemple #30
0
#!/usr/bin/env python
# Copyright 2018 the Deno authors. All rights reserved. MIT license.
import os
import sys
from os.path import join
import third_party
from util import root_path, run, run_output, build_path

third_party.fix_symlinks()

print "DENO_BUILD_PATH:", build_path()
if not os.path.isdir(build_path()):
    print "DENO_BUILD_PATH does not exist. Run tools/setup.py"
    sys.exit(1)
os.chdir(build_path())


def maybe_add_default_target(args):
    lines = run_output(
        [third_party.ninja_path, "-t", "targets"],
        env=third_party.google_env(),
        quiet=True).split("\n")
    targets = [l.rsplit(":", 1)[0] for l in lines]
    deno_targets = [target for target in targets if target.startswith(":")]
    deno_targets += [target.lstrip(":") for target in deno_targets]

    target_specified = False
    for a in args:
        if a in deno_targets:
            target_specified = True
            break
Exemple #31
0
def main(argv):
    if len(argv) == 2:
        build_dir = sys.argv[1]
    elif len(argv) == 1:
        build_dir = build_path()
    else:
        print "Usage: tools/test.py [build_dir]"
        sys.exit(1)

    deno_dir = os.path.join(build_dir, ".deno_test")
    if os.path.isdir(deno_dir):
        rmtree(deno_dir)
    os.environ["DENO_DIR"] = deno_dir

    enable_ansi_colors()

    http_server.spawn()

    deno_exe = os.path.join(build_dir, "deno" + executable_suffix)
    check_exists(deno_exe)

    exec_path_test(deno_exe)

    # Internal tools testing
    run([
        "node", "./node_modules/.bin/ts-node", "--project",
        "tools/ts_library_builder/tsconfig.json",
        "tools/ts_library_builder/test.ts"
    ])
    setup_test()
    util_test()
    benchmark_test(build_dir, deno_exe)

    test_cc = os.path.join(build_dir, "test_cc" + executable_suffix)
    check_exists(test_cc)
    run([test_cc])

    test_rs = os.path.join(build_dir, "test_rs" + executable_suffix)
    check_exists(test_rs)
    run([test_rs])

    deno_core_test = os.path.join(build_dir,
                                  "deno_core_test" + executable_suffix)
    check_exists(deno_core_test)
    run([deno_core_test])

    unit_tests(deno_exe)

    prefetch_test(deno_exe)
    fmt_test(deno_exe)

    integration_tests(deno_exe)

    # TODO We currently skip testing the prompt and IsTTY in Windows completely.
    # Windows does not support the pty module used for testing the permission
    # prompt.
    if os.name != 'nt':
        from permission_prompt_test import permission_prompt_test
        from is_tty_test import is_tty_test
        permission_prompt_test(deno_exe)
        is_tty_test(deno_exe)

    repl_tests(deno_exe)

    rmtree(deno_dir)

    deno_dir_test(deno_exe, deno_dir)

    test_no_color(deno_exe)
Exemple #32
0
def main():
    deno_exe = os.path.join(build_path(), "deno" + executable_suffix)
    http_server.spawn()
    complex_permissions_test(deno_exe)
        'db': 'pubmed',
        'retmax': retmax,    # Number of results.
        'term': term,
        'field': 'Mesh',
        'mindate': year,
        'maxdate': year,    # Ranges are inclusive in NCBI.
        'datetype': 'pdat',    # Publication date.
    }
    return esearch(**params)


if __name__ == '__main__':
    DATAPATH = '/Users/erickpeirson/modelorganisms/ncbi/data/diseases'
    MESH_TERMS = 'mesh_diseases.txt'
    START_YEAR = 1975   # Starting in this year.
    END_YEAR = 2016    # Up to but not including this year.

    with open(MESH_TERMS, 'r') as f:
        terms = [line.strip() for line in f.readlines() if len(line) > 1]

    for term in terms:
        for year in xrange(START_YEAR, END_YEAR):
            # NCBI permits no more than 3 requests per second.
            time.sleep(0.5)
            print '\rterm:', term, 'year:', year,
            sys.stdout.flush()
            pmids = pubmed_for_mesh(term, year)
            outpath = build_path(term, year, 'pmids.txt', DATAPATH, make=True)
            with open(outpath, 'w') as f:
                f.write('\n'.join(pmids))
#!/usr/bin/env python
# Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
import os
import json
from util import build_path


def read_json(filename):
    with open(filename) as json_file:
        return json.load(json_file)


def write_json(filename, data):
    with open(filename, 'w') as outfile:
        json.dump(data, outfile)


current_data_file = os.path.join(build_path(), "bench.json")
all_data_file = "gh-pages/data.json"  # Includes all benchmark data.
recent_data_file = "gh-pages/recent.json"  # Includes recent 20 benchmark data.

assert os.path.exists(current_data_file)
assert os.path.exists(all_data_file)

new_data = read_json(current_data_file)
all_data = read_json(all_data_file)
all_data.append(new_data)

write_json(all_data_file, all_data)
write_json(recent_data_file, all_data[-20:])
import pandas as pd
import os

from util import build_path


if __name__ == '__main__':
    DATAPATH = '/Users/erickpeirson/modelorganisms/ncbi/data/diseases'
    MESH_TERMS = 'mesh_diseases.txt'
    START_YEAR = 1975   # Starting in this year.
    END_YEAR = 2016    # Up to but not including this year.

    with open(MESH_TERMS, 'r') as f:
        terms = [line.strip() for line in f.readlines() if len(line) > 1]

    for term in terms:
        for year in xrange(START_YEAR, END_YEAR):
            dpath = build_path(term, year, 'pmids.txt', DATAPATH)
            with open(dpath, 'r') as f:
                pmids = [line.strip() for line in f.readlines() if len(line) > 1]
            df = pd.DataFrame(data=pmids, columns=['PMID'])
            if df.size == 0:
                print 'no results for', term, year
                continue
            elif df.size < 4000:
                print 'small set for', term, year, 'with', df.size
            dfpath = build_path(term, year, 'sample_b.csv', DATAPATH)
            df.sample(min(4000, df.size)).to_csv(dfpath)
Exemple #36
0
def main(argv):
    if len(argv) == 2:
        build_dir = sys.argv[1]
    elif len(argv) == 1:
        build_dir = build_path()
    else:
        print "Usage: tools/benchmark.py [build_dir]"
        sys.exit(1)

    http_server.spawn()

    deno_path = os.path.join(build_dir, "deno")
    benchmark_file = os.path.join(build_dir, "benchmark.json")

    os.chdir(root_path)
    import_data_from_gh_pages()

    hyperfine = prebuilt.load_hyperfine()

    run([
        hyperfine, "--ignore-failure", "--export-json", benchmark_file,
        "--warmup", "3"
    ] + [
        deno_path + " " + " ".join(args) for [_, args] in exec_time_benchmarks
    ])
    all_data = read_json(all_data_file)
    benchmark_data = read_json(benchmark_file)
    sha1 = run_output(["git", "rev-parse", "HEAD"]).strip()
    new_data = {
        "created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
        "sha1": sha1,
        "binary_size": {},
        "thread_count": {},
        "syscall_count": {},
        "benchmark": {}
    }
    for [[name, _], data] in zip(exec_time_benchmarks,
                                 benchmark_data["results"]):
        new_data["benchmark"][name] = {
            "mean": data["mean"],
            "stddev": data["stddev"],
            "user": data["user"],
            "system": data["system"],
            "min": data["min"],
            "max": data["max"]
        }

    new_data["binary_size"] = get_binary_sizes(build_dir)
    # Cannot run throughput benchmark on windows because they don't have nc or
    # pipe.
    if os.name != 'nt':
        hyper_hello_path = os.path.join(build_dir, "hyper_hello")
        core_http_bench_exe = os.path.join(build_dir, "deno_core_http_bench")
        new_data["throughput"] = run_throughput(deno_path)
        new_data["req_per_sec"] = http_benchmark(deno_path, hyper_hello_path,
                                                 core_http_bench_exe)
    if "linux" in sys.platform:
        # Thread count test, only on linux
        new_data["thread_count"] = run_thread_count_benchmark(deno_path)
        new_data["syscall_count"] = run_syscall_count_benchmark(deno_path)

    all_data.append(new_data)
    write_json(all_data_file, all_data)
    write_json(recent_data_file, all_data[-20:])
Exemple #37
0
def main():
    deno_exe = os.path.join(build_path(), "deno" + executable_suffix)
    repl_tests(deno_exe)
Exemple #38
0
def get_raf_path(base_path):
    return util.build_path(base_path, 'lol_game_client', 'filearchives')
Exemple #39
0
#!/usr/bin/env python
# Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
import os
from util import run, root_path, build_path

os.chdir(os.path.join(root_path, "website"))
deno_exe = os.path.join(build_path(), "deno")
run([deno_exe, "bundle", "app.ts", "app.bundle.js"])
Exemple #40
0
def main():
    deno_exe = os.path.join(build_path(), "deno" + executable_suffix)
    is_tty_test(deno_exe)
Exemple #41
0
def main(argv):
    if len(argv) == 2:
        build_dir = sys.argv[1]
    elif len(argv) == 1:
        build_dir = build_path()
    else:
        print "Usage: tools/test.py [build_dir]"
        sys.exit(1)

    deno_dir = os.path.join(build_dir, ".deno_test")
    if os.path.isdir(deno_dir):
        rmtree(deno_dir)
    os.environ["DENO_DIR"] = deno_dir

    enable_ansi_colors()

    http_server.spawn()

    deno_exe = os.path.join(build_dir, "deno" + executable_suffix)
    check_exists(deno_exe)

    exec_path_test(deno_exe)

    # Internal tools testing
    run([
        "node", "./node_modules/.bin/ts-node", "--project",
        "tools/ts_library_builder/tsconfig.json",
        "tools/ts_library_builder/test.ts"
    ])
    setup_test()
    util_test()
    benchmark_test(build_dir, deno_exe)

    test_cc = os.path.join(build_dir, "test_cc" + executable_suffix)
    check_exists(test_cc)
    run([test_cc])

    test_rs = os.path.join(build_dir, "test_rs" + executable_suffix)
    check_exists(test_rs)
    run([test_rs])

    unit_tests(deno_exe)

    prefetch_test(deno_exe)
    fmt_test(deno_exe)

    integration_tests(deno_exe)

    # TODO We currently skip testing the prompt and IsTTY in Windows completely.
    # Windows does not support the pty module used for testing the permission
    # prompt.
    if os.name != 'nt':
        from permission_prompt_test import permission_prompt_test
        from is_tty_test import is_tty_test
        permission_prompt_test(deno_exe)
        is_tty_test(deno_exe)

    repl_tests(deno_exe)

    rmtree(deno_dir)

    deno_dir_test(deno_exe, deno_dir)

    test_no_color(deno_exe)
Exemple #42
0
def main():
    deno_exe = os.path.join(build_path(), "deno" + executable_suffix)
    permission_prompt_test(deno_exe)
def main():
    deno_exe = os.path.join(build_path(), "deno" + executable_suffix)
    permission_prompt_test(deno_exe)