Ejemplo n.º 1
0
    def _open(self):
        """ DO NOT USE THIS UNLESS YOU close() FIRST"""
        if self.settings.host.startswith("mysql://"):
            # DECODE THE URI: mysql://username:password@host:optional_port/database_name
            up = strings.between(self.settings.host, "mysql://", "@")
            if ":" in up:
                self.settings.username, self.settings.password = unquote(
                    up).split(":")
            else:
                self.settings.username = up

            url = strings.between(self.settings.host, "@", None)
            hp, self.settings.schema = url.split("/", 1)
            if ":" in hp:
                self.settings.host, self.settings.port = hp.split(":")
                self.settings.port = int(self.settings.port)
            else:
                self.settings.host = hp

        # SSL PEM
        if self.settings.host in ("localhost", "mysql", '127.0.0.1'):
            ssl_context = None
        else:
            if self.settings.ssl and not self.settings.ssl.pem:
                Log.error("Expecting 'pem' property in ssl")
            # ssl_context = ssl.create_default_context(**get_ssl_pem_file(self.settings.ssl.pem))
            filename = File(".pem") / URL(self.settings.ssl.pem).host
            filename.write_bytes(http.get(self.settings.ssl.pem).content)
            ssl_context = {"ca": filename.abspath}

        try:
            self.db = connect(
                host=self.settings.host,
                port=self.settings.port,
                user=coalesce(self.settings.username, self.settings.user),
                passwd=coalesce(self.settings.password, self.settings.passwd),
                db=coalesce(self.settings.schema, self.settings.db),
                read_timeout=coalesce(self.settings.read_timeout,
                                      (EXECUTE_TIMEOUT / 1000) -
                                      10 if EXECUTE_TIMEOUT else None, 5 * 60),
                charset=u"utf8",
                use_unicode=True,
                ssl=ssl_context,
                cursorclass=cursors.SSCursor)
        except Exception as e:
            if self.settings.host.find("://") == -1:
                Log.error(u"Failure to connect to {{host}}:{{port}}",
                          host=self.settings.host,
                          port=self.settings.port,
                          cause=e)
            else:
                Log.error(
                    u"Failure to connect.  PROTOCOL PREFIX IS PROBABLY BAD", e)
        self.cursor = None
        self.partial_rollback = False
        self.transaction_level = 0
        self.backlog = [
        ]  # accumulate the write commands so they are sent at once
        if self.readonly:
            self.begin()
Ejemplo n.º 2
0
    def _load_functions(self):
        global _load_extension_warning_sent
        library_loc = File.new_instance(sys.modules[__name__].__file__,
                                        "../..")
        full_path = File.new_instance(
            library_loc, "vendor/sqlite/libsqlitefunctions.so").abspath
        try:
            trace = get_stacktrace(0)[0]
            if self.upgrade:
                if os.name == "nt":
                    file = File.new_instance(
                        trace["file"],
                        "../../vendor/sqlite/libsqlitefunctions.so")
                else:
                    file = File.new_instance(
                        trace["file"],
                        "../../vendor/sqlite/libsqlitefunctions")

                full_path = file.abspath
                self.db.enable_load_extension(True)
                self.db.execute(
                    text(SQL_SELECT + "load_extension" +
                         sql_iso(quote_value(full_path))))
        except Exception as e:
            if not _load_extension_warning_sent:
                _load_extension_warning_sent = True
                Log.warning(
                    "Could not load {{file}}, doing without. (no SQRT for you!)",
                    file=full_path,
                    cause=e,
                )
Ejemplo n.º 3
0
 def __init__(self, file):
     """
     USE with Profiler("myfile.tab"): TO ENABLE PER-PARSER PROFILING
     :param file:
     """
     self.file = File(file).set_extension("tab")
     self.previous_parse = None
Ejemplo n.º 4
0
class Profiler(object):
    def __init__(self, file):
        """
        USE with Profiler("myfile.tab"): TO ENABLE PER-PARSER PROFILING
        :param file:
        """
        self.file = File(file).set_extension("tab")
        self.previous_parse = None

    def __enter__(self):
        timing.clear()
        self.previous_parse = ParserElement._parse
        ParserElement._parse = _profile_parse

    def __exit__(self, exc_type, exc_val, exc_tb):
        ParserElement._parse = self.previous_parse
        profile = jx.sort(
            [{
                "parser": text(parser),
                "cache_hits": cache,
                "matches": match,
                "failures": fail,
                "call_count": match + fail + cache,
                "total_parse": parse,
                "total_overhead": all - parse,
                "per_parse": parse / (match + fail),
                "per_overhead": (all - parse) / (match + fail + cache),
            } for parser, (cache, match, fail, parse, all) in timing.items()],
            {"total_parse": "desc"},
        )
        self.file.add_suffix(Date.now().format("%Y%m%d_%H%M%S")).write(
            convert.list2tab(profile))
Ejemplo n.º 5
0
def setup_ssl():
    config.flask.ssl_context = None

    if not config.flask.ssl_context:
        return

    ssl_flask = config.flask.copy()
    ssl_flask.debug = False
    ssl_flask.port = 443

    if isinstance(config.flask.ssl_context, Mapping):
        # EXPECTED PEM ENCODED FILE NAMES
        # `load_cert_chain` REQUIRES CONCATENATED LIST OF CERTS
        tempfile = NamedTemporaryFile(delete=False, suffix=".pem")
        try:
            tempfile.write(
                File(ssl_flask.ssl_context.certificate_file).read_bytes())
            if ssl_flask.ssl_context.certificate_chain_file:
                tempfile.write(
                    File(ssl_flask.ssl_context.certificate_chain_file).
                    read_bytes())
            tempfile.flush()
            tempfile.close()

            context = SSLContext(PROTOCOL_SSLv23)
            context.load_cert_chain(
                tempfile.name,
                keyfile=File(ssl_flask.ssl_context.privatekey_file).abspath)

            ssl_flask.ssl_context = context
        except Exception, e:
            Log.error("Could not handle ssl context construction", cause=e)
        finally:
Ejemplo n.º 6
0
    def svn_update(self):
        source = File.new_instance(self.directory,
                                   self.directory.name.replace("-",
                                                               "_")).abspath
        tests = File.new_instance(self.directory, "tests").abspath

        result = self.local("git", [self.git, "checkout", "dev"])
        if File.new_instance(source, ".svn").exists:
            result = self.local("svn",
                                [self.svn, "update", "--accept", "p", source])
            result = self.local("svn",
                                [self.svn, "commit", source, "-m", "auto"])
            result = self.local("svn",
                                [self.svn, "update", "--accept", "p", tests])
            result = self.local("svn",
                                [self.svn, "commit", tests, "-m", "auto"])
        result = self.local("git", [self.git, "add", "-A"])
        process, stdout, stderr = self.local(
            "git", [self.git, "commit", "-m", "updates from other projects"],
            raise_on_error=False)
        if "nothing to commit, working directory clean" in stdout or process.returncode == 0:
            pass
        else:
            Log.error("not expected {{result}}", result=result)
        result = self.local("git", [self.git, "push", "origin", "dev"])
Ejemplo n.º 7
0
def main():
    try:
        settings = startup.read_settings(
            defs=[{
                "name": ["--all", "-a"],
                "action": 'store_true',
                "help": 'process all mo-* subdirectories',
                "dest": "all",
                "required": False
            }, {
                "name": ["--dir", "--directory", "-d"],
                "help": 'directory to deploy',
                "type": str,
                "dest": "directory",
                "required": True,
                "default": "."
            }])
        constants.set(settings.constants)
        Log.start(settings.debug)

        if settings.args.all:
            deploy_all(File(settings.args.directory), settings.prefix,
                       settings)
        else:
            Deploy(File(settings.args.directory), kwargs=settings).deploy()
    except Exception, e:
        Log.warning("Problem with etl", cause=e)
Ejemplo n.º 8
0
def write_profiles(main_thread_profile):
    if cprofiler_stats is None:
        return

    from pyLibrary import convert
    from mo_files import File

    cprofiler_stats.add(pstats.Stats(main_thread_profile.cprofiler))
    stats = cprofiler_stats.pop_all()

    Log.note("aggregating {{num}} profile stats", num=len(stats))
    acc = stats[0]
    for s in stats[1:]:
        acc.add(s)

    stats = [
        {
            "num_calls": d[1],
            "self_time": d[2],
            "total_time": d[3],
            "self_time_per_call": d[2] / d[1],
            "total_time_per_call": d[3] / d[1],
            "file": (f[0] if f[0] != "~" else "").replace("\\", "/"),
            "line": f[1],
            "method": f[2].lstrip("<").rstrip(">")
        }
        for f, d, in iteritems(acc.stats)
    ]
    stats_file = File(FILENAME, suffix=convert.datetime2string(datetime.now(), "_%Y%m%d_%H%M%S"))
    stats_file.write(convert.list2tab(stats))
    Log.note("profile written to {{filename}}", filename=stats_file.abspath)
Ejemplo n.º 9
0
def write_profiles(main_thread_profile):
    if cprofiler_stats is None:
        return

    from pyLibrary import convert
    from mo_files import File

    cprofiler_stats.add(pstats.Stats(main_thread_profile.cprofiler))
    stats = cprofiler_stats.pop_all()

    Log.note("aggregating {{num}} profile stats", num=len(stats))
    acc = stats[0]
    for s in stats[1:]:
        acc.add(s)

    stats = [{
        "num_calls": d[1],
        "self_time": d[2],
        "total_time": d[3],
        "self_time_per_call": d[2] / d[1],
        "total_time_per_call": d[3] / d[1],
        "file": (f[0] if f[0] != "~" else "").replace("\\", "/"),
        "line": f[1],
        "method": f[2].lstrip("<").rstrip(">")
    } for f, d, in iteritems(acc.stats)]
    stats_file = File(FILENAME,
                      suffix=convert.datetime2string(datetime.now(),
                                                     "_%Y%m%d_%H%M%S"))
    stats_file.write(convert.list2tab(stats))
    Log.note("profile written to {{filename}}", filename=stats_file.abspath)
Ejemplo n.º 10
0
    def __init__(
        self,
        host,
        user=None,
        port=None,
        config=None,
        gateway=None,
        forward_agent=None,
        connect_timeout=None,
        connect_kwargs=None,
        inline_ssh_env=None,
        key_filename=None,  # part of connect_kwargs
        kwargs=None,
    ):
        connect_kwargs = set_default({}, connect_kwargs, {
            "key_filename": [File(f).abspath for f in listwrap(key_filename)]
        })

        key_filenames = connect_kwargs.key_filename

        self.stdout = LogStream(host, "stdout")
        self.stderr = LogStream(host, "stderr")
        config = Config(**unwrap(
            set_default(
                {},
                config,
                {
                    "overrides": {
                        "run": {
                            # "hide": True,
                            "out_stream": self.stdout,
                            "err_stream": self.stderr,
                        }
                    }
                },
            )))

        self.warn = False
        cause = Except("expecting some private key to connect")
        for key_file in key_filenames:
            try:
                connect_kwargs.key_filename = File(key_file).abspath
                self.conn = _Connection(
                    host,
                    user,
                    port,
                    config,
                    gateway,
                    forward_agent,
                    connect_timeout,
                    connect_kwargs,
                    inline_ssh_env,
                )
                self.conn.run("echo")  # verify we can connect
                return
            except Exception as e:
                cause = e

        Log.error("could not connect", cause=cause)
Ejemplo n.º 11
0
    def test_small_changeset_to_json(self):
        small_patch_file = File("tests/resources/small.patch")

        j1 = diff_to_json(small_patch_file.read_bytes().decode(
            "utf8", "replace"))
        expected = File("tests/resources/small.json").read_json(flexible=False,
                                                                leaves=False)
        self.assertEqual(j1, expected)
Ejemplo n.º 12
0
 def __init__(self, filename, host="fake", index="fake", kwargs=None):
     self.settings = kwargs
     self.file = File(filename)
     self.cluster= Null
     try:
         self.data = mo_json.json2value(self.file.read())
     except Exception as e:
         self.data = Data()
Ejemplo n.º 13
0
 def put(self, local, remote, use_sudo=False):
     if use_sudo:
         filename = "/tmp/" + Random.string(20, string.digits + 'ABCDEF')
         self.conn.put(File(local).abspath, filename)
         self.sudo("cp " + filename + " " + remote)
         self.sudo("rm " + filename)
     else:
         self.conn.put(File(local).abspath, remote)
Ejemplo n.º 14
0
    def test_extension(self):
        test1 = File("test.json")
        test2 = test1.add_extension("gz")
        test3 = test1.set_extension("gz")

        self.assertEqual(test1.filename, "test.json")
        self.assertEqual(test2.filename, "test.json.gz")
        self.assertEqual(test3.filename, "test.gz")
Ejemplo n.º 15
0
    def test_platform(self):
        with open_binary_stream(File("tests/resources/Platform.gcno")) as source:
            result = gcno.read(source)
        Log.note("gcno: {{result|json}}", result=result)

        with open_binary_stream(File("tests/resources/Platform.gcda")) as source:
            result = gcda.read(source)
        Log.note("gcda: {{result|json}}", result=result)
Ejemplo n.º 16
0
def setup_flask_ssl(flask_app, flask_config):
    """
    SPAWN A NEW THREAD TO RUN AN SSL ENDPOINT
    REMOVES ssl_context FROM flask_config BEFORE RETURNING

    :param flask_app:
    :param flask_config:
    :return:
    """
    if not flask_config.ssl_context:
        return

    ssl_flask = flask_config.copy()
    ssl_flask.debug = False
    ssl_flask.port = 443

    if is_data(flask_config.ssl_context):
        # EXPECTED PEM ENCODED FILE NAMES
        # `load_cert_chain` REQUIRES CONCATENATED LIST OF CERTS
        with TempFile() as tempfile:
            try:
                tempfile.write(
                    File(ssl_flask.ssl_context.certificate_file).read_bytes()
                )
                if ssl_flask.ssl_context.certificate_chain_file:
                    tempfile.write(
                        File(ssl_flask.ssl_context.certificate_chain_file).read_bytes()
                    )
                tempfile.flush()
                tempfile.close()

                context = SSLContext(PROTOCOL_SSLv23)
                context.load_cert_chain(
                    tempfile.name,
                    keyfile=File(ssl_flask.ssl_context.privatekey_file).abspath,
                )

                ssl_flask.ssl_context = context
            except Exception as e:
                Log.error("Could not handle ssl context construction", cause=e)

    def runner(please_stop):
        Log.warning(
            "ActiveData listening on encrypted port {{port}}", port=ssl_flask.port
        )
        flask_app.run(**ssl_flask)

    Thread.run("SSL Server", runner)

    if flask_config.ssl_context and flask_config.port != 80:
        Log.warning(
            "ActiveData has SSL context, but is still listening on non-encrypted http port {{port}}",
            port=flask_config.port,
        )

    flask_config.ssl_context = None
Ejemplo n.º 17
0
def __deploy__():
    # ONLY MEANT TO BE RUN FOR DEPLOYMENT
    from mo_files import File
    source_file = File("moz_sql_parser/sql_parser.py")
    lines = source_file.read().split("\n")
    lines = [
        "sys.setrecursionlimit(1500)"
        if line.startswith("sys.setrecursionlimit") else line for line in lines
    ]
    source_file.write("\n".join(lines))
Ejemplo n.º 18
0
    def test_suffix(self):
        test1 = File("tools/test.json")
        test2 = test1.add_suffix(".backup")
        test3 = test1.add_suffix("-backup")
        test4 = test1.set_name("other")

        self.assertEqual(test1.filename, "tools/test.json")
        self.assertEqual(test2.filename, "tools/test.backup.json")
        self.assertEqual(test3.filename, "tools/test.-backup.json")
        self.assertEqual(test4.filename, "tools/other.json")
Ejemplo n.º 19
0
    def test_big_changeset_to_json(self):
        big_patch_file = File("tests/resources/big.patch")
        # big_patch_file.write_bytes(http.get("https://hg.mozilla.org/mozilla-central/raw-rev/e5693cea1ec944ca077c7a46c5f127c828a90f1b").content)
        self.assertEqual(b'\r'.decode('utf8', 'replace'), u'\r')

        j1 = diff_to_json(big_patch_file.read_bytes().decode(
            "utf8", "replace"))
        expected = File("tests/resources/big.json").read_json(flexible=False,
                                                              leaves=False)
        self.assertEqual(j1, expected)
Ejemplo n.º 20
0
    def test_diff_to_json(self):
        j1 = diff_to_json(File("tests/resources/diff1.patch").read())
        j2 = diff_to_json(File("tests/resources/diff2.patch").read())

        e1 = File("tests/resources/diff1.json").read_json(flexible=False,
                                                          leaves=False)
        e2 = File("tests/resources/diff2.json").read_json(flexible=False,
                                                          leaves=False)
        self.assertEqual(j1, e1)
        self.assertEqual(j2, e2)
Ejemplo n.º 21
0
    def __init__(self, file):
        assert file
        from mo_files import File

        self.file = File(file)
        if self.file.exists:
            self.file.backup()
            self.file.delete()

        self.file_lock = allocate_lock()
Ejemplo n.º 22
0
def format_file_in_place(src, mode):
    """Format file under `src` path. Return True if changed.

    If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted
    code to the file.
    `mode` and `fast` options are passed to :func:`format_file_contents`.
    """
    file = File(src)
    file.write(format_str(file.read(), mode=mode))
    return True
Ejemplo n.º 23
0
class FakeES():
    @override
    def __init__(self, filename, host="fake", index="fake", kwargs=None):
        self.settings = kwargs
        self.file = File(filename)
        self.cluster= Null
        try:
            self.data = mo_json.json2value(self.file.read())
        except Exception as e:
            self.data = Data()

    def search(self, query):
        query = wrap(query)
        f = jx.get(query.query.filtered.filter)
        filtered = wrap([{"_id": i, "_source": d} for i, d in self.data.items() if f(d)])
        if query.fields:
            return wrap({"hits": {"total": len(filtered), "hits": [{"_id": d._id, "fields": unwrap(jx.select([unwrap(d._source)], query.fields)[0])} for d in filtered]}})
        else:
            return wrap({"hits": {"total": len(filtered), "hits": filtered}})

    def extend(self, records):
        """
        JUST SO WE MODEL A Queue
        """
        records = {
            v["id"]: v["value"] if "value" in v else mo_json.json2value(v['json'])
            for v in records
        }
        for r in records.values():
            try:
                del r['etl']
            except Exception:
                pass

        unwrap(self.data).update(records)
        self.refresh()
        Log.note("{{num}} documents added", num=len(records))

    def add(self, record):
        if is_list(record):
            Log.error("no longer accepting lists, use extend()")
        return self.extend([record])

    def delete_record(self, filter):
        f = esfilter2where(filter)
        self.data = wrap({k: v for k, v in self.data.items() if not f(v)})

    def refresh(self, *args, **kwargs):
        data_as_json = mo_json.value2json(self.data, pretty=True)
        self.file.write(data_as_json)


    def set_refresh_interval(self, seconds):
        pass
Ejemplo n.º 24
0
    def put(self, local, remote, use_sudo=False):
        if self.conn.command_cwds and not remote.startswith(("/", "~")):
            remote = self.conn.command_cwds[-1].rstrip("/'") + "/" + remote

        if use_sudo:
            filename = "/tmp/" + Random.hex(20)
            self.conn.put(File(local).abspath, filename)
            self.sudo("cp " + filename + " " + remote)
            self.sudo("rm " + filename)
        else:
            self.conn.put(File(local).abspath, remote)
Ejemplo n.º 25
0
    def get(self, remote, local, use_sudo=False):
        if self.conn.command_cwds and not remote.startswith(("/", "~")):
            remote = self.conn.command_cwds[-1].rstrip("/'") + "/" + remote

        if use_sudo:
            filename = "/tmp/" + Random.filename()
            self.sudo("cp " + remote + " " + filename)
            self.sudo("chmod a+r " + filename)
            self.conn.get(filename, File(local).abspath)
            self.sudo("rm " + filename)
        else:
            self.conn.get(remote, File(local).abspath)
Ejemplo n.º 26
0
    def test_home_path(self):
        home_path = os.path.expanduser("~").replace(os.sep, "/")
        test1 = File("~")
        test2 = File("~/")
        test3 = File("~/test.json")
        test4 = File("~test.json")
        test5 = File("~") / "test.json"

        self.assertEqual(test1.filename, home_path + "/")
        self.assertEqual(test2.filename, home_path + "/")
        self.assertEqual(test3.filename, home_path + "/test.json")
        self.assertEqual(test4.filename, home_path + "/test.json")
        self.assertEqual(test5.filename, home_path + "/test.json")
Ejemplo n.º 27
0
def _upgrade():
    global _upgraded
    _upgraded = True
    try:
        import sys

        sqlite_dll = File.new_instance(sys.exec_prefix, "dlls/sqlite3.dll")
        python_dll = File("pyLibrary/vendor/sqlite/sqlite3.dll")
        if python_dll.read_bytes() != sqlite_dll.read_bytes():
            backup = sqlite_dll.backup()
            File.copy(python_dll, sqlite_dll)
    except Exception as e:
        Log.warning("could not upgrade python's sqlite", cause=e)
Ejemplo n.º 28
0
def _upgrade():
    global _upgraded
    global _sqlite3

    try:
        import sys
        import platform

        if "windows" in platform.system().lower():
            original_dll = File.new_instance(sys.exec_prefix,
                                             "dlls/sqlite3.dll")
            if platform.architecture()[0] == "32bit":
                source_dll = File(
                    "vendor/pyLibrary/vendor/sqlite/sqlite3_32.dll")
            else:
                source_dll = File(
                    "vendor/pyLibrary/vendor/sqlite/sqlite3_64.dll")

            if not all(a == b for a, b in zip_longest(
                    source_dll.read_bytes(), original_dll.read_bytes())):
                original_dll.backup()
                File.copy(source_dll, original_dll)
        else:
            pass
    except Exception as e:
        Log.warning("could not upgrade python's sqlite", cause=e)

    import sqlite3 as _sqlite3

    _ = _sqlite3
    _upgraded = True
Ejemplo n.º 29
0
class FakeES():
    @override
    def __init__(self, filename, host="fake", index="fake", kwargs=None):
        self.settings = kwargs
        self.file = File(filename)
        self.cluster= Null
        try:
            self.data = mo_json.json2value(self.file.read())
        except Exception as e:
            self.data = Data()

    def search(self, query):
        query = wrap(query)
        f = jx.get(query.query.filtered.filter)
        filtered = wrap([{"_id": i, "_source": d} for i, d in self.data.items() if f(d)])
        if query.fields:
            return wrap({"hits": {"total": len(filtered), "hits": [{"_id": d._id, "fields": unwrap(jx.select([unwrap(d._source)], query.fields)[0])} for d in filtered]}})
        else:
            return wrap({"hits": {"total": len(filtered), "hits": filtered}})

    def extend(self, records):
        """
        JUST SO WE MODEL A Queue
        """
        records = {
            v["id"]: v["value"] if "value" in v else mo_json.json2value(v['json'])
            for v in records
        }

        unwrap(self.data).update(records)
        self.refresh()
        Log.note("{{num}} documents added", num=len(records))

    def add(self, record):
        if isinstance(record, list):
            Log.error("no longer accepting lists, use extend()")
        return self.extend([record])

    def delete_record(self, filter):
        f = esfilter2where(filter)
        self.data = wrap({k: v for k, v in self.data.items() if not f(v)})

    def refresh(self, *args, **kwargs):
        data_as_json = mo_json.value2json(self.data, pretty=True)
        self.file.write(data_as_json)


    def set_refresh_interval(self, seconds):
        pass
Ejemplo n.º 30
0
    def _get_test_data(self):
        file1 = File("tests/resources/example_file_v1.py").read().split('\n')
        file2 = File("tests/resources/example_file_v2.py").read().split('\n')
        file3 = File("tests/resources/example_file_v3.py").read().split('\n')

        c1 = parse_diff_to_matrix(
            diff=File("tests/resources/diff1.patch").read(),
            new_source_code=file2)["/tests/resources/example_file.py"]

        c2 = parse_diff_to_matrix(
            diff=File("tests/resources/diff2.patch").read(),
            new_source_code=file3)["/tests/resources/example_file.py"]

        # file1 -> c1 -> file2 -> c2 -> file3
        return file1, c1, file2, c2, file3
Ejemplo n.º 31
0
def accumulate_counts(gcda_file, lookup):
    """
    :param gcda_file: zipfile of gcda directory
    :param lookup: gcno lookup file
    :return:  map from file to line to count
    """
    output = Data()
    with ZipFile(File(gcda_file).abspath) as zipped:
        for num, zip_file in enumerate(zipped.filelist):
            if zip_file.file_size == 0:
                continue
            if not zip_file.filename.endswith(".gcda"):
                continue
            Log.note("process gcda {{file}}", file=zip_file.filename)
            try:
                with zipped.open(zip_file.filename) as source:
                    for c in gcda.stream_counts(source):
                        uid, counters = c['uid'], c['counters']
                        blocks = lookup.get(uid, Null)
                        for b, c in zip(blocks, counters):
                            if c:
                                for l in b:
                                    output[l.file][l.line] += c
            except Exception as e:
                Log.warning("{{filename}} could not be processed",
                            filename=zip_file.filename,
                            cause=e)
    return output
Ejemplo n.º 32
0
def make_log_from_settings(settings):
    assert settings["class"]

    settings = settings.copy()

    # IMPORT MODULE FOR HANDLER
    path = settings["class"].split(".")
    class_name = path[-1]
    path = ".".join(path[:-1])
    constructor = None
    try:
        temp = __import__(path, globals(), locals(), [class_name], 0)
        constructor = object.__getattribute__(temp, class_name)
    except Exception as e:
        if settings.stream and not constructor:
            # PROVIDE A DEFAULT STREAM HANLDER
            constructor = StructuredLogger_usingThreadedStream
        else:
            Log.error("Can not find class {{class}}", {"class": path}, cause=e)

    # IF WE NEED A FILE, MAKE SURE DIRECTORY EXISTS
    if settings.filename != None:
        from mo_files import File

        f = File(settings.filename)
        if not f.parent.exists:
            f.parent.create()

    settings['class'] = None
    params = unwrap(settings)
    log_instance = constructor(**params)
    return log_instance
Ejemplo n.º 33
0
def read_settings(filename=None, defs=None):
    """
    :param filename: Force load a file
    :param defs: arguments you want to accept
    :param default_filename: A config file from an environment variable (a fallback config file, if no other provided)
    :return:
    """
    # READ SETTINGS
    defs = listwrap(defs)
    defs.append({
        "name":
        ["--config", "--settings", "--settings-file", "--settings_file"],
        "help":
        "path to JSON file with settings",
        "type":
        str,
        "dest":
        "filename",
        "default":
        None,
        "required":
        False
    })
    args = argparse(defs)

    args.filename = coalesce(filename, args.filename, "./config.json")
    settings_file = File(args.filename)
    if not settings_file.exists:
        Log.error("Can not read configuration file {{filename}}",
                  {"filename": settings_file.abspath})
    settings = mo_json_config.get("file:///" + settings_file.abspath)
    settings.args = args
    return settings
Ejemplo n.º 34
0
 def __init__(self, filename, host="fake", index="fake", kwargs=None):
     self.settings = kwargs
     self.file = File(filename)
     self.cluster= Null
     try:
         self.data = mo_json.json2value(self.file.read())
     except Exception as e:
         self.data = Data()
Ejemplo n.º 35
0
    def _setup_etl_supervisor(self, cpu_count):
        # INSTALL supervsor
        sudo("apt-get install -y supervisor")
        with fabric_settings(warn_only=True):
            sudo("service supervisor start")

        # READ LOCAL CONFIG FILE, ALTER IT FOR THIS MACHINE RESOURCES, AND PUSH TO REMOTE
        conf_file = File("./examples/config/etl_supervisor.conf")
        content = conf_file.read_bytes()
        find = between(content, "numprocs=", "\n")
        content = content.replace("numprocs=" + find + "\n", "numprocs=" + str(cpu_count) + "\n")
        File("./temp/etl_supervisor.conf.alt").write_bytes(content)
        sudo("rm -f /etc/supervisor/conf.d/etl_supervisor.conf")
        put("./temp/etl_supervisor.conf.alt", '/etc/supervisor/conf.d/etl_supervisor.conf', use_sudo=True)
        run("mkdir -p /home/ubuntu/ActiveData-ETL/results/logs")

        # POKE supervisor TO NOTICE THE CHANGE
        sudo("supervisorctl reread")
        sudo("supervisorctl update")
Ejemplo n.º 36
0
    def __init__(self, file):
        assert file
        from mo_files import File

        self.file = File(file)
        if self.file.exists:
            self.file.backup()
            self.file.delete()

        self.file_lock = allocate_lock()
Ejemplo n.º 37
0
def write_profile(profile_settings, stats):
    from pyLibrary import convert
    from mo_files import File

    acc = stats[0]
    for s in stats[1:]:
        acc.add(s)

    stats = [{
        "num_calls": d[1],
        "self_time": d[2],
        "total_time": d[3],
        "self_time_per_call": d[2] / d[1],
        "total_time_per_call": d[3] / d[1],
        "file": (f[0] if f[0] != "~" else "").replace("\\", "/"),
        "line": f[1],
        "method": f[2].lstrip("<").rstrip(">")
    }
        for f, d, in acc.stats.iteritems()
    ]
    stats_file = File(profile_settings.filename, suffix=convert.datetime2string(datetime.now(), "_%Y%m%d_%H%M%S"))
    stats_file.write(convert.list2tab(stats))
Ejemplo n.º 38
0
def execute_file(
    filename,
    host,
    username,
    password,
    schema=None,
    param=None,
    ignore_errors=False,
    kwargs=None
):
    # MySQLdb provides no way to execute an entire SQL file in bulk, so we
    # have to shell out to the commandline client.
    file = File(filename)
    if file.extension == 'zip':
        sql = file.read_zipfile()
    else:
        sql = File(filename).read()

    if ignore_errors:
        with suppress_exception:
            execute_sql(sql=sql, kwargs=kwargs)
    else:
        execute_sql(sql=sql, kwargs=kwargs)
Ejemplo n.º 39
0
    def _set_mtu(self, mtu=1500):
        # SET RIGHT NOW
        sudo("ifconfig eth0 mtu "+unicode(mtu))

        # DESPITE THE FILE CHANGE, THE MTU VALUE DOES NOT STICK
        local_file = File("./results/temp/ifcfg-eth0")
        local_file.delete()
        get("/etc/sysconfig/network-scripts/ifcfg-eth0", "./results/temp/ifcfg-eth0", use_sudo=True)
        lines = local_file.read()
        if lines.find("MTU=1500") == -1:
            lines += "\nMTU=1500"
        local_file.write(lines)
        put("./results/temp/ifcfg-eth0", "/etc/sysconfig/network-scripts/ifcfg-eth0", use_sudo=True)
Ejemplo n.º 40
0
    def __init__(self, _file):
        """
        file - USES FILE FOR PERSISTENCE
        """
        self.file = File.new_instance(_file)
        self.lock = Lock("lock for persistent queue using file " + self.file.name)
        self.please_stop = Signal()
        self.db = Data()
        self.pending = []

        if self.file.exists:
            for line in self.file:
                with suppress_exception:
                    delta = mo_json.json2value(line)
                    apply_delta(self.db, delta)
            if self.db.status.start == None:  # HAPPENS WHEN ONLY ADDED TO QUEUE, THEN CRASH
                self.db.status.start = 0
            self.start = self.db.status.start

            # SCRUB LOST VALUES
            lost = 0
            for k in self.db.keys():
                with suppress_exception:
                    if k!="status" and int(k) < self.start:
                        self.db[k] = None
                        lost += 1
                  # HAPPENS FOR self.db.status, BUT MAYBE OTHER PROPERTIES TOO
            if lost:
                Log.warning("queue file had {{num}} items lost",  num= lost)

            DEBUG and Log.note("Persistent queue {{name}} found with {{num}} items", name=self.file.abspath, num=len(self))
        else:
            self.db.status = Data(
                start=0,
                end=0
            )
            self.start = self.db.status.start
            DEBUG and Log.note("New persistent queue {{name}}", name=self.file.abspath)
Ejemplo n.º 41
0
class StructuredLogger_usingFile(StructuredLogger):
    def __init__(self, file):
        assert file
        from mo_files import File

        self.file = File(file)
        if self.file.exists:
            self.file.backup()
            self.file.delete()

        self.file_lock = allocate_lock()

    def write(self, template, params):
        try:
            with self.file_lock:
                self.file.append(expand_template(template, params))
        except Exception as e:
            Log.warning("Problem writing to file {{file}}, waiting...", file=file.name, cause=e)
            time.sleep(5)
Ejemplo n.º 42
0
    if not value:
        return Null

    try:
        return int(value)
    except Exception:
        pass

    try:
        return float(value)
    except Exception:
        pass

    return value

tab_data = File("resources/EC2.csv").read()
lines = map(strings.trim, tab_data.split("\n"))
header = lines[0].split(",")
rows = [r.split(",") for r in lines[1:] if r]
data = wrap([{h: unquote(r[c]) for c, h in enumerate(header)} for r in rows])


for d in data:
    d.utility = Math.min(d.memory, d.storage/50, 60)
    d.drives["$ref"] = "#" + unicode(d.num_drives) + "_ephemeral_drives"
    d.discount = 0

Log.note("{{data|json(False)}}", data=[d for d in data if d.utility])

Log.note("{{data|json}}", data={d.instance_type: {"num": d.num_drives, "size": d.storage} for d in jx.sort(data, "instance_type")})
Ejemplo n.º 43
0
    def _install_es(self, gigabytes, es_version="6.2.3"):
        volumes = self.instance.markup.drives

        if not fabric_files.exists("/usr/local/elasticsearch/config/elasticsearch.yml"):
            with cd("/home/ec2-user/"):
                run("mkdir -p temp")

            if not File(LOCAL_JRE).exists:
                Log.error("Expecting {{file}} on manager to spread to ES instances", file=LOCAL_JRE)
            response = run("java -version", warn_only=True)
            if "Java(TM) SE Runtime Environment" not in response:
                with cd("/home/ec2-user/temp"):
                    run('rm -f '+JRE)
                    put(LOCAL_JRE, JRE)
                    sudo("rpm -i "+JRE)
                    sudo("alternatives --install /usr/bin/java java /usr/java/default/bin/java 20000")
                    run("export JAVA_HOME=/usr/java/default")

            with cd("/home/ec2-user/"):
                run('wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-'+es_version+'.tar.gz')
                run('tar zxfv elasticsearch-'+es_version+'.tar.gz')
                sudo("rm -fr /usr/local/elasticsearch", warn_only=True)
                sudo('mkdir /usr/local/elasticsearch')
                sudo('cp -R elasticsearch-'+es_version+'/* /usr/local/elasticsearch/')

            with cd('/usr/local/elasticsearch/'):
                # BE SURE TO MATCH THE PLUGLIN WITH ES VERSION
                # https://github.com/elasticsearch/elasticsearch-cloud-aws
                sudo('sudo bin/elasticsearch-plugin install -b discovery-ec2')

            # REMOVE THESE FILES, WE WILL REPLACE THEM WITH THE CORRECT VERSIONS AT THE END
            sudo("rm -f /usr/local/elasticsearch/config/elasticsearch.yml")
            sudo("rm -f /usr/local/elasticsearch/config/jvm.options")
            sudo("rm -f /usr/local/elasticsearch/config/log4j2.properties")

        self.conn = self.instance.connection

        # MOUNT AND FORMAT THE VOLUMES (list with `lsblk`)
        for i, k in enumerate(volumes):
            if not fabric_files.exists(k.path):
                with fabric_settings(warn_only=True):
                    sudo('sudo umount '+k.device)

                sudo('yes | sudo mkfs -t ext4 '+k.device)

                # ES AND JOURNALLING DO NOT MIX
                sudo('tune2fs -o journal_data_writeback '+k.device)
                sudo('tune2fs -O ^has_journal '+k.device)
                sudo('mkdir '+k.path)
                sudo('sudo mount '+k.device+' '+k.path)
                sudo('chown -R ec2-user:ec2-user '+k.path)

                # ADD TO /etc/fstab SO AROUND AFTER REBOOT
                sudo("sed -i '$ a\\"+k.device+"   "+k.path+"       ext4    defaults,nofail  0   2' /etc/fstab")

        # TEST IT IS WORKING
        sudo('mount -a')

        # INCREASE THE FILE HANDLE LIMITS
        with cd("/home/ec2-user/"):
            File("./results/temp/sysctl.conf").delete()
            get("/etc/sysctl.conf", "./results/temp/sysctl.conf", use_sudo=True)
            lines = File("./results/temp/sysctl.conf").read()
            if lines.find("fs.file-max = 100000") == -1:
                lines += "\nfs.file-max = 100000"
            lines = lines.replace("net.bridge.bridge-nf-call-ip6tables = 0", "")
            lines = lines.replace("net.bridge.bridge-nf-call-iptables = 0", "")
            lines = lines.replace("net.bridge.bridge-nf-call-arptables = 0", "")
            File("./results/temp/sysctl.conf").write(lines)
            put("./results/temp/sysctl.conf", "/etc/sysctl.conf", use_sudo=True)

        sudo("sudo sed -i '$ a\\vm.max_map_count = 262144' /etc/sysctl.conf")

        sudo("sysctl -p")

        # INCREASE FILE HANDLE PERMISSIONS
        sudo("sed -i '$ a\\root soft nofile 100000' /etc/security/limits.conf")
        sudo("sed -i '$ a\\root hard nofile 100000' /etc/security/limits.conf")
        sudo("sed -i '$ a\\root soft memlock unlimited' /etc/security/limits.conf")
        sudo("sed -i '$ a\\root hard memlock unlimited' /etc/security/limits.conf")

        sudo("sed -i '$ a\\ec2-user soft nofile 100000' /etc/security/limits.conf")
        sudo("sed -i '$ a\\ec2-user hard nofile 100000' /etc/security/limits.conf")
        sudo("sed -i '$ a\\ec2-user soft memlock unlimited' /etc/security/limits.conf")
        sudo("sed -i '$ a\\ec2-user hard memlock unlimited' /etc/security/limits.conf")


        if not fabric_files.exists("/data1/logs"):
            run('mkdir /data1/logs')
            run('mkdir /data1/heapdump')

        # COPY CONFIG FILES TO ES DIR
        if not fabric_files.exists("/usr/local/elasticsearch/config/elasticsearch.yml"):
            put("./examples/config/es6_log4j2.properties", '/usr/local/elasticsearch/config/log4j2.properties', use_sudo=True)

            jvm = File("./examples/config/es6_jvm.options").read().replace('\r', '')
            jvm = expand_template(jvm, {"memory": int(gigabytes/2)})
            File("./results/temp/jvm.options").write(jvm)
            put("./results/temp/jvm.options", '/usr/local/elasticsearch/config/jvm.options', use_sudo=True)

            yml = File("./examples/config/es6_config.yml").read().replace("\r", "")
            yml = expand_template(yml, {
                "id": self.instance.ip_address,
                "data_paths": ",".join("/data" + text_type(i + 1) for i, _ in enumerate(volumes))
            })
            File("./results/temp/elasticsearch.yml").write(yml)
            put("./results/temp/elasticsearch.yml", '/usr/local/elasticsearch/config/elasticsearch.yml', use_sudo=True)

        sudo("chown -R ec2-user:ec2-user /usr/local/elasticsearch")
Ejemplo n.º 44
0
    def _install_es(self, gigabytes):
        volumes = self.instance.markup.drives

        if not fabric_files.exists("/usr/local/elasticsearch"):
            with cd("/home/ec2-user/"):
                run("mkdir -p temp")

            if not File(LOCAL_JRE).exists:
                Log.error("Expecting {{file}} on manager to spread to ES instances", file=LOCAL_JRE)
            with cd("/home/ec2-user/temp"):
                run('rm -f '+JRE)
                put("resources/"+JRE, JRE)
                sudo("rpm -i "+JRE)
                sudo("alternatives --install /usr/bin/java java /usr/java/default/bin/java 20000")
                run("export JAVA_HOME=/usr/java/default")

            with cd("/home/ec2-user/"):
                run('wget https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.7.1.tar.gz')
                run('tar zxfv elasticsearch-1.7.1.tar.gz')
                sudo('mkdir /usr/local/elasticsearch')
                sudo('cp -R elasticsearch-1.7.1/* /usr/local/elasticsearch/')

            with cd('/usr/local/elasticsearch/'):
                # BE SURE TO MATCH THE PLUGLIN WITH ES VERSION
                # https://github.com/elasticsearch/elasticsearch-cloud-aws
                sudo('bin/plugin -install elasticsearch/elasticsearch-cloud-aws/2.7.1')

            #REMOVE THESE FILES, WE WILL REPLACE THEM WITH THE CORRECT VERSIONS AT THE END
            sudo("rm -f /usr/local/elasticsearch/config/elasticsearch.yml")
            sudo("rm -f /usr/local/elasticsearch/bin/elasticsearch.in.sh")

        self.conn = self.instance.connection

        # MOUNT AND FORMAT THE EBS VOLUMES (list with `lsblk`)
        for i, k in enumerate(volumes):
            if not fabric_files.exists(k.path):
                with fabric_settings(warn_only=True):
                    sudo('sudo umount '+k.device)

                sudo('yes | sudo mkfs -t ext4 '+k.device)
                sudo('mkdir '+k.path)
                sudo('sudo mount '+k.device+' '+k.path)

                #ADD TO /etc/fstab SO AROUND AFTER REBOOT
                sudo("sed -i '$ a\\"+k.device+"   "+k.path+"       ext4    defaults,nofail  0   2' /etc/fstab")

        # TEST IT IS WORKING
        sudo('mount -a')

        # INCREASE THE FILE HANDLE LIMITS
        with cd("/home/ec2-user/"):
            File("./results/temp/sysctl.conf").delete()
            get("/etc/sysctl.conf", "./results/temp/sysctl.conf", use_sudo=True)
            lines = File("./results/temp/sysctl.conf").read()
            if lines.find("fs.file-max = 100000") == -1:
                lines += "\nfs.file-max = 100000"
            lines = lines.replace("net.bridge.bridge-nf-call-ip6tables = 0", "")
            lines = lines.replace("net.bridge.bridge-nf-call-iptables = 0", "")
            lines = lines.replace("net.bridge.bridge-nf-call-arptables = 0", "")
            File("./results/temp/sysctl.conf").write(lines)
            put("./results/temp/sysctl.conf", "/etc/sysctl.conf", use_sudo=True)

        sudo("sysctl -p")

        # INCREASE FILE HANDLE PERMISSIONS
        sudo("sed -i '$ a\\root soft nofile 50000' /etc/security/limits.conf")
        sudo("sed -i '$ a\\root hard nofile 100000' /etc/security/limits.conf")
        sudo("sed -i '$ a\\root memlock unlimited' /etc/security/limits.conf")

        sudo("sed -i '$ a\\ec2-user soft nofile 50000' /etc/security/limits.conf")
        sudo("sed -i '$ a\\ec2-user hard nofile 100000' /etc/security/limits.conf")
        sudo("sed -i '$ a\\ec2-user memlock unlimited' /etc/security/limits.conf")

        # EFFECTIVE LOGIN TO LOAD CHANGES TO FILE HANDLES
        # sudo("sudo -i -u ec2-user")

        if not fabric_files.exists("/data1/logs"):
            sudo('mkdir /data1/logs')
            sudo('mkdir /data1/heapdump')

            #INCREASE NUMBER OF FILE HANDLES
            # sudo("sysctl -w fs.file-max=64000")
        # COPY CONFIG FILE TO ES DIR
        if not fabric_files.exists("/usr/local/elasticsearch/config/elasticsearch.yml"):
            yml = File("./examples/config/es_config.yml").read().replace("\r", "")
            yml = expand_template(yml, {
                "id": Random.hex(length=8),
                "data_paths": ",".join("/data"+unicode(i+1) for i, _ in enumerate(volumes))
            })
            File("./results/temp/elasticsearch.yml").write(yml)
            put("./results/temp/elasticsearch.yml", '/usr/local/elasticsearch/config/elasticsearch.yml', use_sudo=True)

        # FOR SOME REASON THE export COMMAND DOES NOT SEEM TO WORK
        # THIS SCRIPT SETS THE ES_MIN_MEM/ES_MAX_MEM EXPLICITLY
        if not fabric_files.exists("/usr/local/elasticsearch/bin/elasticsearch.in.sh"):
            sh = File("./examples/config/es_run.sh").read().replace("\r", "")
            sh = expand_template(sh, {"memory": unicode(int(gigabytes/2))})
            File("./results/temp/elasticsearch.in.sh").write(sh)
            with cd("/home/ec2-user"):
                put("./results/temp/elasticsearch.in.sh", './temp/elasticsearch.in.sh', use_sudo=True)
                sudo("cp -f ./temp/elasticsearch.in.sh /usr/local/elasticsearch/bin/elasticsearch.in.sh")
Ejemplo n.º 45
0
    def _worker(self, please_stop):
        global _load_extension_warning_sent

        if DEBUG:
            Log.note("Sqlite version {{version}}", version=sqlite3.sqlite_version)
        if Sqlite.canonical:
            self.db = Sqlite.canonical
        else:
            self.db = sqlite3.connect(coalesce(self.filename, ':memory:'))

            library_loc = File.new_instance(sys.modules[__name__].__file__, "../..")
            full_path = File.new_instance(library_loc, "vendor/sqlite/libsqlitefunctions.so").abspath
            try:
                trace = extract_stack(0)[0]
                if self.upgrade:
                    if os.name == 'nt':
                        file = File.new_instance(trace["file"], "../../vendor/sqlite/libsqlitefunctions.so")
                    else:
                        file = File.new_instance(trace["file"], "../../vendor/sqlite/libsqlitefunctions")

                    full_path = file.abspath
                    self.db.enable_load_extension(True)
                    self.db.execute("SELECT load_extension(" + self.quote_value(full_path) + ")")
            except Exception as e:
                if not _load_extension_warning_sent:
                    _load_extension_warning_sent = True
                    Log.warning("Could not load {{file}}}, doing without. (no SQRT for you!)", file=full_path, cause=e)

        try:
            while not please_stop:
                command, result, signal, trace = self.queue.pop(till=please_stop)

                if DEBUG_INSERT and command.strip().lower().startswith("insert"):
                    Log.note("Running command\n{{command|indent}}", command=command)
                if DEBUG and not command.strip().lower().startswith("insert"):
                    Log.note("Running command\n{{command|indent}}", command=command)
                with Timer("Run command", debug=DEBUG):
                    if signal is not None:
                        try:
                            curr = self.db.execute(command)
                            self.db.commit()
                            result.meta.format = "table"
                            result.header = [d[0] for d in curr.description] if curr.description else None
                            result.data = curr.fetchall()
                            if DEBUG and result.data:
                                text = convert.table2csv(list(result.data))
                                Log.note("Result:\n{{data}}", data=text)
                        except Exception as e:
                            e = Except.wrap(e)
                            result.exception = Except(ERROR, "Problem with\n{{command|indent}}", command=command, cause=e)
                        finally:
                            signal.go()
                    else:
                        try:
                            self.db.execute(command)
                            self.db.commit()
                        except Exception as e:
                            e = Except.wrap(e)
                            e.cause = Except(
                                type=ERROR,
                                template="Bad call to Sqlite",
                                trace=trace
                            )
                            Log.warning("Failure to execute", cause=e)

        except Exception as e:
            if not please_stop:
                Log.error("Problem with sql thread", e)
        finally:
            if DEBUG:
                Log.note("Database is closed")
            self.db.commit()
            self.db.close()