Exemple #1
0
def prepend_to_path(opt_project_dir=True, opt_package_dir=False, opt_current_dir=False):
    if opt_current_dir and opt_current_dir not in sys.path:
        sys.path.insert(0, os.realpath(os.curdir))
    if opt_package_dir:
        sys.path.insert(0, os.path.realpath(os.path.join(PROJECT_DIR, PACKAGE_DIR)))
    if opt_project_dir:
        sys.path.insert(0, PROJECT_DIR)
Exemple #2
0
def clone_file(repository, new_workdir, link, operation):
    if not os.path.exists(os.path.join(repository, link)):
        return
    link_dir = os.path.dirname(os.path.join(new_workdir, link))
    if not os.path.exists(link_dir):
        os.makedirs(link_dir)
    src = os.path.join(repository, link)
    if os.path.islink(src):
        src = os.realpath(src)
    operation(src, os.path.join(new_workdir, link))
Exemple #3
0
 def checkWorkingPath(self, path):
     # First remove any '..' to prevent escaping.
     # Note that 'normpath' ignore symlinks so it would not do it correctly.
     parts = path.split(os.sep)
     try:
         i = len(parts) - parts[::-1].index(os.pardir)
         parts[:i] = os.realpath(parts[:i].join(os.sep)),
         path = parts.join(os.sep)
     except ValueError:
         pass
     # Allow symlinks inside instance home.
     path = os.path.normpath(path)
     real_path = os.path.realpath(path)
     for allowed in getConfiguration().instancehome, gettempdir():
         if issubdir(allowed, path) or issubdir(allowed, real_path):
             return real_path
     raise Unauthorized("Unauthorized access to path %r."
                        " It is NOT in your Zope home instance." % path)
Exemple #4
0
 def checkWorkingPath(self, path):
   # First remove any '..' to prevent escaping.
   # Note that 'normpath' ignore symlinks so it would not do it correctly.
   parts = path.split(os.sep)
   try:
     i = len(parts) - parts[::-1].index(os.pardir)
     parts[:i] = os.realpath(parts[:i].join(os.sep)),
     path = parts.join(os.sep)
   except ValueError:
     pass
   # Allow symlinks inside instance home.
   path = os.path.normpath(path)
   real_path = os.path.realpath(path)
   for allowed in getConfiguration().instancehome, gettempdir():
     if issubdir(allowed, path) or issubdir(allowed, real_path):
       return real_path
   raise Unauthorized("Unauthorized access to path %r."
                      " It is NOT in your Zope home instance." % path)
Exemple #5
0
def mergeFiles(merges, destination, part = None, test = False, override=None):
  if os.path.islink(destination):
    destination = os.realpath(destination)
  if os.path.isfile(destination):
    with open(destination, "r") as f:
      destination_part = json.load(f)
      maybe_update_to_2(destination_part)
      if part:
        part.update(destination_part)
      else:
        part = destination_part
  for fn in merges:
    if os.path.islink(fn):
      continue
    with open(fn, "r") as f:
      this_part = json.load(f)
      maybe_update_to_2(this_part)
      if part:
        part = merge(part, this_part)
      else:
        part = this_part

  if override:
    part.update(override)

  for fn in merges:
    target = os.path.relpath(destination, os.path.dirname(fn))
    if not test and fn != target:
      os.remove(fn)
      os.symlink(target, fn)
    else:
      print("symlink " + fn + " -> " + target)
  if not test:
    with open(destination, "w") as f:
      f.write(json.dumps(part, indent=1, sort_keys=True, separators=(',', ': ')))
  else:
    print(destination)
    print(part)
    print
Exemple #6
0
 def checkWorkingPath(self, path, restricted):
   # First remove any '..' to prevent escaping.
   # Note that 'normpath' ignore symlinks so it would not do it correctly.
   parts = path.split(os.sep)
   try:
     i = len(parts) - parts[::-1].index(os.pardir)
     parts[:i] = os.realpath(parts[:i].join(os.sep)),
     path = parts.join(os.sep)
   except ValueError:
     pass
   # Allow symlinks inside instance home.
   path = os.path.normpath(os.path.expanduser(path))
   real_path = os.path.realpath(path)
   if restricted and not any(
       issubdir(allowed, path) or issubdir(allowed, real_path)
       for allowed in (getConfiguration().instancehome, gettempdir())):
     if 'Developer' not in getSecurityManager().getUser().getRoles():
       raise Unauthorized("Unauthorized access to path %r."
                        " It is NOT in your Zope home instance." % path)
   if os.path.isdir(real_path):
     return real_path
   raise NotAWorkingCopyError(real_path)
Exemple #7
0
 def checkWorkingPath(self, path, restricted):
   # First remove any '..' to prevent escaping.
   # Note that 'normpath' ignore symlinks so it would not do it correctly.
   parts = path.split(os.sep)
   try:
     i = len(parts) - parts[::-1].index(os.pardir)
     parts[:i] = os.realpath(parts[:i].join(os.sep)),
     path = parts.join(os.sep)
   except ValueError:
     pass
   # Allow symlinks inside instance home.
   path = os.path.normpath(os.path.expanduser(path))
   real_path = os.path.realpath(path)
   if restricted and not any(
       issubdir(allowed, path) or issubdir(allowed, real_path)
       for allowed in (getConfiguration().instancehome, gettempdir())):
     if 'Developer' not in getSecurityManager().getUser().getRoles():
       raise Unauthorized("Unauthorized access to path %r."
                        " It is NOT in your Zope home instance." % path)
   if os.path.isdir(real_path):
     return real_path
   raise NotAWorkingCopyError(real_path)
Exemple #8
0
            def _verify_be():
                is_changed = False

                def _update_sourcedir():
                    be.sourcedir = os.environ.get('TOASTER_DIR')
                    return True

                if len(be.sourcedir) == 0:
                    print("\n -- Validation: The layers checkout directory must be set.")
                    is_changed = _update_sourcedir()

                if not be.sourcedir.startswith("/"):
                    print("\n -- Validation: The layers checkout directory must be set to an absolute path.")
                    is_changed = _update_sourcedir()

                if is_changed:
                    if be.betype == BuildEnvironment.TYPE_LOCAL:
                        be.needs_import = True
                    return True

                def _update_builddir():
                    be.builddir = os.environ.get('TOASTER_DIR')+"/build"
                    return True

                if len(be.builddir) == 0:
                    print("\n -- Validation: The build directory must be set.")
                    is_changed = _update_builddir()

                if not be.builddir.startswith("/"):
                    print("\n -- Validation: The build directory must to be set to an absolute path.")
                    is_changed = _update_builddir()

                if is_changed:
                    print("\nBuild configuration saved")
                    be.save()
                    return True

                if be.needs_import:
                    try:
                        print("Loading default settings")
                        call_command("loaddata", "settings")
                        template_conf = os.environ.get("TEMPLATECONF", "")

                        if "poky" in template_conf:
                            print("Loading poky configuration")
                            call_command("loaddata", "poky")
                        else:
                            print("Loading OE-Core configuration")
                            call_command("loaddata", "oe-core")
                            if template_conf:
                                oe_core_path = os.realpath(template_conf +
                                                           "/../")
                            else:
                                print("TEMPLATECONF not found. You may have to"
                                      " manually configure layer paths")
                                oe_core_path = input("Please enter the path of"
                                                     " your openembedded-core "
                                                     "layer: ")
                            # Update the layer instances of openemebedded-core
                            for layer in Layer.objects.filter(
                                    name="openembedded-core"):
                                layer.local_source_dir = oe_core_path
                                layer.save()

                        # Import the custom fixture if it's present
                        with warnings.catch_warnings():
                            warnings.filterwarnings(
                                action="ignore",
                                message="^.*No fixture named.*$")
                            print("Importing custom settings if present")
                            call_command("loaddata", "custom")

                        # we run lsupdates after config update
                        print("\nFetching information from the layer index, "
                              "please wait.\nYou can re-update any time later "
                              "by running bitbake/lib/toaster/manage.py "
                              "lsupdates\n")
                        call_command("lsupdates")

                        # we don't look for any other config files
                        return is_changed
                    except Exception as e:
                        print("Failure while trying to setup toaster: %s"
                              % e)
                        traceback.print_exc()

                return is_changed
 def find(self, path):
     path = os.realpath(path)
     if self.services.has_key(path):
         return '/Services/' . self.services[path].name
     else:
         return ''
Exemple #10
0
#!/usr/bin/env python
"""
PEER Lifelines program task 1A02, Problem SC2.1
SCEC Community Velocity Model, version 2.2 with double-couple point source.
"""
import os
import json
import cst.sord

dx, nproc3 = 2000.0, [1, 1, 1]
dx, nproc3 = 200.0, [1, 2, 30]
dx, nproc3 = 100.0, [1, 4, 60]
dx, nproc3 = 500.0, [1, 1, 2]

mesh = os.realpath('repo/SC21-Mesh-%.0f/' % dx)
meta = json.load(open(mesh + 'meta.json'))
dx, dy, dz = meta['delta']
nx, ny, nz = meta['shape']

dt = dx / 16000.0
dt = dx / 20000.0
nt = int(50.0 / dt + 1.00001)
j = int(56000.0 / dx + 0.5)
k = int(40000.0 / dx + 0.5)
l = int(14000.0 / dx + 0.5)

prm = {
    'delta': [dx, dy, dz, dt],
    'shape': [nx, ny, nz, nt],
    'bc1': ['pml', 'pml', 'free'],
    'bc2': ['pml', 'pml', 'pml'],
def _ensure_dir(dirPath):  #TODO: remove?
    fullDirPath = os.realpath(dirPath)
    if not os.exists(fullDirPath):
        os.mkdir(fullDirPath)
    if not os.access(fullDirPath, os.W_OK):
        raise Exception("ERROR: restricted directory '{}'".format(dirPath))
Exemple #12
0
from os import dirname, realpath

REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
INTERVAL = 60
REPORT_DIR = '/opt/puppetlabs/server/data/puppetserver/reports'
APP_DIR = dirname(realpath(__file__))
ROOT_DIR = APP_DIR.split("/")[-1]
CUR_PREFIX = 'curreport'
HIST_PREFIX = 'histreport'
HIST_REPORTS = 3
"""
The number of seconds old a report can be before we alert
"""
REPORT_WARN = 3600
REPORT_ERROR = 7200

# Debug
FLASK_HOST = '0.0.0.0'
FLASK_PORT = 8000
DEBUG = False
THREADED = True
Exemple #13
0
#!/usr/bin/env python
"""
PEER Lifelines program task 1A02, Problem SC2.1
SCEC Community Velocity Model, version 2.2 with double-couple point source.
"""
import os
import json
import cst.sord

dx, nproc3 = 2000.0, [1, 1, 1]
dx, nproc3 = 200.0, [1, 2, 30]
dx, nproc3 = 100.0, [1, 4, 60]
dx, nproc3 = 500.0, [1, 1, 2]

mesh = os.realpath('repo/SC21-Mesh-%.0f/' % dx)
meta = json.load(open(mesh + 'meta.json'))
dx, dy, dz = meta['delta']
nx, ny, nz = meta['shape']

dt = dx / 16000.0
dt = dx / 20000.0
nt = int(50.0 / dt + 1.00001)
j = int(56000.0 / dx + 0.5)
k = int(40000.0 / dx + 0.5)
l = int(14000.0 / dx + 0.5)

prm = {
    'delta': [dx, dy, dz, dt],
    'shape': [nx, ny, nz, nt],
    'bc1': ['pml', 'pml', 'free'],
    'bc2': ['pml', 'pml', 'pml'],
Exemple #14
0
            def _verify_be():
                is_changed = False

                def _update_sourcedir():
                    be.sourcedir = os.environ.get('TOASTER_DIR')
                    return True

                if len(be.sourcedir) == 0:
                    print(
                        "\n -- Validation: The layers checkout directory must be set."
                    )
                    is_changed = _update_sourcedir()

                if not be.sourcedir.startswith("/"):
                    print(
                        "\n -- Validation: The layers checkout directory must be set to an absolute path."
                    )
                    is_changed = _update_sourcedir()

                if is_changed:
                    if be.betype == BuildEnvironment.TYPE_LOCAL:
                        be.needs_import = True
                    return True

                def _update_builddir():
                    be.builddir = os.environ.get('TOASTER_DIR') + "/build"
                    return True

                if len(be.builddir) == 0:
                    print("\n -- Validation: The build directory must be set.")
                    is_changed = _update_builddir()

                if not be.builddir.startswith("/"):
                    print(
                        "\n -- Validation: The build directory must to be set to an absolute path."
                    )
                    is_changed = _update_builddir()

                if is_changed:
                    print("\nBuild configuration saved")
                    be.save()
                    return True

                if be.needs_import:
                    try:
                        print("Loading default settings")
                        call_command("loaddata", "settings")
                        template_conf = os.environ.get("TEMPLATECONF", "")

                        if "poky" in template_conf:
                            print("Loading poky configuration")
                            call_command("loaddata", "poky")
                        else:
                            print("Loading OE-Core configuration")
                            call_command("loaddata", "oe-core")
                            if template_conf:
                                oe_core_path = os.realpath(template_conf +
                                                           "/../")
                            else:
                                print("TEMPLATECONF not found. You may have to"
                                      " manually configure layer paths")
                                oe_core_path = input("Please enter the path of"
                                                     " your openembedded-core "
                                                     "layer: ")
                            # Update the layer instances of openemebedded-core
                            for layer in Layer.objects.filter(
                                    name="openembedded-core"):
                                layer.local_source_dir = oe_core_path
                                layer.save()

                        # Import the custom fixture if it's present
                        with warnings.catch_warnings():
                            warnings.filterwarnings(
                                action="ignore",
                                message="^.*No fixture named.*$")
                            print("Importing custom settings if present")
                            call_command("loaddata", "custom")

                        # we run lsupdates after config update
                        print("\nFetching information from the layer index, "
                              "please wait.\nYou can re-update any time later "
                              "by running bitbake/lib/toaster/manage.py "
                              "lsupdates\n")
                        call_command("lsupdates")

                        # we don't look for any other config files
                        return is_changed
                    except Exception as e:
                        print("Failure while trying to setup toaster: %s" % e)
                        traceback.print_exc()

                return is_changed
Exemple #15
0
    def _getDataHome(self):
        dirs = {'data': []}
        for homedir in os.listdir(os.path.join(self._rootDir, 'home')):
            usrhome = os.path.join(self._rootDir, 'home', homedir)

            dirs['data'].append(os.realpath())
Exemple #16
0
import sys
import glob
import shutil
import subprocess

from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext

from distutils import log

if not hasattr(sys, 'version_info') or sys.version_info < (3, 6):
    raise SystemExit("GodoPy requires Python version 3.6 or above.")


if os.path.realpath(os.path.dirname(__file__)) != os.path.realpath(os.getcwd()):
    os.chdir(os.realpath(os.path.dirname(__file__)))


def get_godot_exe():
    godot_build_dir = os.environ.get('GODOT_BUILD')
    if not godot_build_dir:
        raise SystemExit("'GODOT_BUILD' environment variable is required.")

    godot_exe_list = crossplat_exe_glob(godot_build_dir, 'godot.*.64')

    if not godot_exe_list:
        raise SystemExit("Can't find Godot executable.")

    return godot_exe_list.pop()

Exemple #17
0
def attach(target,
           gdbscript='',
           exe=None,
           gdb_args=None,
           ssh=None,
           sysroot=None,
           r2cmd=None):
    r"""
    Start GDB in a new terminal and attach to `target`.

    Arguments:
        target: The target to attach to.
        gdbscript(:obj:`str` or :obj:`file`): GDB script to run after attaching.
        exe(str): The path of the target binary.
        arch(str): Architechture of the target binary.  If `exe` known GDB will
          detect the architechture automatically (if it is supported).
        gdb_args(list): List of additional arguments to pass to GDB.
        sysroot(str): Foreign-architecture sysroot, used for QEMU-emulated binaries
            and Android targets.

    Returns:
        PID of the GDB process (or the window which it is running in).

    Notes:

        The ``target`` argument is very robust, and can be any of the following:

        :obj:`int`
            PID of a process
        :obj:`str`
            Process name.  The youngest process is selected.
        :obj:`tuple`
            Host, port pair of a listening ``gdbserver``
        :class:`.process`
            Process to connect to
        :class:`.sock`
            Connected socket. The executable on the other end of the connection is attached to.
            Can be any socket type, including :class:`.listen` or :class:`.remote`.
        :class:`.ssh_channel`
            Remote process spawned via :meth:`.ssh.process`.
            This will use the GDB installed on the remote machine.
            If a password is required to connect, the ``sshpass`` program must be installed.

    Examples:

        Attach to a process by PID

        >>> pid = gdb.attach(1234) # doctest: +SKIP

        Attach to the youngest process by name

        >>> pid = gdb.attach('bash') # doctest: +SKIP

        Attach a debugger to a :class:`.process` tube and automate interaction

        >>> io = process('bash')
        >>> pid = gdb.attach(io, gdbscript='''
        ... call puts("Hello from process debugger!")
        ... detach
        ... quit
        ... ''')
        >>> io.recvline()
        b'Hello from process debugger!\n'
        >>> io.sendline('echo Hello from bash && exit')
        >>> io.recvall()
        b'Hello from bash\n'

        Attach to the remote process from a :class:`.remote` or :class:`.listen` tube,
        as long as it is running on the same machine.

        >>> server = process(['socat', 'tcp-listen:12345,reuseaddr,fork', 'exec:/bin/bash,nofork'])
        >>> sleep(1) # Wait for socat to start
        >>> io = remote('127.0.0.1', 12345)
        >>> sleep(1) # Wait for process to fork
        >>> pid = gdb.attach(io, gdbscript='''
        ... call puts("Hello from remote debugger!")
        ... detach
        ... quit
        ... ''')
        >>> io.recvline()
        b'Hello from remote debugger!\n'
        >>> io.sendline('echo Hello from bash && exit')
        >>> io.recvall()
        b'Hello from bash\n'

        Attach to processes running on a remote machine via an SSH :class:`.ssh` process
        
        >>> shell = ssh('travis', 'example.pwnme', password='******')
        >>> io = shell.process(['cat'])
        >>> pid = gdb.attach(io, gdbscript='''
        ... call sleep(5)
        ... call puts("Hello from ssh debugger!")
        ... detach
        ... quit
        ... ''')
        >>> io.recvline(timeout=5)  # doctest: +SKIP
        b'Hello from ssh debugger!\n'
        >>> io.sendline('This will be echoed back')
        >>> io.recvline()
        b'This will be echoed back\n'
        >>> io.close()
    """
    if context.noptrace:
        log.warn_once("Skipping debug attach since context.noptrace==True")
        return

    # if gdbscript is a file object, then read it; we probably need to run some
    # more gdb script anyway
    if hasattr(gdbscript, 'read'):
        with gdbscript:
            gdbscript = gdbscript.read()

    # enable gdb.attach(p, 'continue')
    if gdbscript and not gdbscript.endswith('\n'):
        gdbscript += '\n'

    # Use a sane default sysroot for Android
    if not sysroot and context.os == 'android':
        sysroot = 'remote:/'

    # gdb script to run before `gdbscript`
    pre = ''
    if not context.native:
        pre += 'set endian %s\n' % context.endian
        pre += 'set architecture %s\n' % get_gdb_arch()
        if sysroot:
            pre += 'set sysroot %s\n' % sysroot

        if context.os == 'android':
            pre += 'set gnutarget ' + _bfdname() + '\n'

        if exe:
            pre += 'file %s\n' % exe

    # let's see if we can find a pid to attach to
    pid = None
    if isinstance(target, six.integer_types):
        # target is a pid, easy peasy
        pid = target
    elif isinstance(target, str):
        # pidof picks the youngest process
        pidof = proc.pidof

        if context.os == 'android':
            pidof = adb.pidof

        pids = list(pidof(target))
        if not pids:
            log.error('No such process: %s' % target)
        pid = pids[0]
        log.info('Attaching to youngest process "%s" (PID = %d)' %
                 (target, pid))
    elif isinstance(target, tubes.ssh.ssh_channel):
        if not target.pid:
            log.error("PID unknown for channel")

        shell = target.parent

        tmpfile = shell.mktemp()
        if six.PY3:
            tmpfile = tmpfile.decode()
        gdbscript = 'shell rm %s\n%s' % (tmpfile, gdbscript)
        shell.upload_data(gdbscript or '', tmpfile)

        cmd = [
            'ssh', '-C', '-t', '-p',
            str(shell.port), '-l', shell.user, shell.host
        ]
        if shell.password:
            if not misc.which('sshpass'):
                log.error("sshpass must be installed to debug ssh processes")
            cmd = ['sshpass', '-p', shell.password] + cmd
        if shell.keyfile:
            cmd += ['-i', shell.keyfile]
        exefile = target.executable
        cmd += ['gdb -q %s %s -x "%s"' % (exefile, target.pid, tmpfile)]

        misc.run_in_new_terminal(' '.join(cmd))
        return

    elif isinstance(target, tubes.sock.sock):
        pids = proc.pidof(target)
        if not pids:
            log.error('Could not find remote process (%s:%d) on this machine' %
                      target.sock.getpeername())
        pid = pids[0]

        # Specifically check for socat, since it has an intermediary process
        # if you do not specify "nofork" to the EXEC: argument
        # python(2640)───socat(2642)───socat(2643)───bash(2644)
        if proc.exe(pid).endswith('/socat') and time.sleep(
                0.1) and proc.children(pid):
            pid = proc.children(pid)[0]

        # We may attach to the remote process after the fork but before it performs an exec.
        # If an exe is provided, wait until the process is actually running the expected exe
        # before we attach the debugger.
        t = Timeout()
        with t.countdown(2):
            while exe and os.realpath(
                    proc.exe(pid)) != os.realpath(exe) and t.timeout:
                time.sleep(0.1)

    elif isinstance(target, tubes.process.process):
        pid = proc.pidof(target)[0]
        exe = exe or target.executable
    elif isinstance(target, tuple) and len(target) == 2:
        host, port = target

        if context.os != 'android':
            pre += 'target remote %s:%d\n' % (host, port)
        else:
            # Android debugging is done over gdbserver, which can't follow
            # new inferiors (tldr; follow-fork-mode child) unless it is run
            # in extended-remote mode.
            pre += 'target extended-remote %s:%d\n' % (host, port)
            pre += 'set detach-on-fork off\n'

        def findexe():
            for spid in proc.pidof(target):
                sexe = proc.exe(spid)
                name = os.path.basename(sexe)
                # XXX: parse cmdline
                if name.startswith('qemu-') or name.startswith('gdbserver'):
                    exe = proc.cmdline(spid)[-1]
                    return os.path.join(proc.cwd(spid), exe)

        exe = exe or findexe()
    elif isinstance(target, elf.corefile.Corefile):
        pre += 'target core %s\n' % target.path
    else:
        log.error("don't know how to attach to target: %r" % target)

    # if we have a pid but no exe, just look it up in /proc/
    if pid and not exe:
        exe_fn = proc.exe
        if context.os == 'android':
            exe_fn = adb.proc_exe
        exe = exe_fn(pid)

    if not pid and not exe and not ssh:
        log.error('could not find target process')

    cmd = binary()
    cmd = ""

    if gdb_args:
        cmd += ' '
        cmd += ' '.join(gdb_args)

    if context.gdbinit:
        cmd += ' -nh '  # ignore ~/.gdbinit
        cmd += ' -x %s ' % context.gdbinit  # load custom gdbinit

    cmd += ' -q '

    if exe and context.native:
        if not ssh and not os.path.isfile(exe):
            log.error('No such file: %s' % exe)
        cmd += ' "%s"' % exe

    if pid and not context.os == 'android':
        cmd += ' %d' % pid

    if context.os == 'android' and pid:
        runner = _get_runner()
        which = _get_which()
        gdb_cmd = _gdbserver_args(pid=pid, which=which, env=env)
        gdbserver = runner(gdb_cmd)
        port = _gdbserver_port(gdbserver, None)
        host = context.adb_host
        pre += 'target extended-remote %s:%i\n' % (context.adb_host, port)

        # gdbserver on Android sets 'detach-on-fork on' which breaks things
        # when you're trying to debug anything that forks.
        pre += 'set detach-on-fork off\n'

    gdbscript = pre + (gdbscript or '')

    if gdbscript:
        tmp = tempfile.NamedTemporaryFile(prefix='pwn',
                                          suffix='.gdb',
                                          delete=False,
                                          mode='w+')
        log.debug('Wrote gdb script to %r\n%s' % (tmp.name, gdbscript))
        gdbscript = 'shell rm %s\n%s' % (tmp.name, gdbscript)

        tmp.write(gdbscript)
        tmp.close()
        cmd += ' -x %s' % (tmp.name)

    #cmd = "r2 -d {}".format(pid)
    if r2cmd != None:
        cmd = "r2 -c '{}' -d {}".format(r2cmd, pid)
    else:
        cmd = "r2 -d {}".format(pid)
    print("Command: {}".format(cmd))

    log.info('running in new terminal: %s' % cmd)

    gdb_pid = misc.run_in_new_terminal(cmd)

    if pid and context.native:
        proc.wait_for_debugger(pid, gdb_pid)

    return gdb_pid
Exemple #18
0
def _start(
    hg_name="default",
    data_dir=None,
    port=8989,
    temp_dir=op.join(TEMP_DIR, "higlass-docker"),
    version="latest",
    site_url=None,
    media_dir=None,
    public_data=True,
    default_track_options=None,
    workers=None,
    use_redis=False,
    redis_dir=None,
    hg_repository="higlass/higlass-docker",
    redis_repository="redis",
    redis_tag="5.0.3-alpine",
    redis_port=6379,
    yes=False,
):
    """
    Start a HiGlass instance
    """
    click.echo('Starting HiGlass instance "{}" on port {}...\n'.format(
        hg_name, port))

    hg_container_name = "{}-{}".format(CONTAINER_PREFIX, hg_name)

    if data_dir is None:
        container_root = op.join(HOME_DIR, ".higlass", "containers")
        data_dir = op.join(container_root, hg_name)

    data_dir = op.realpath(op.expanduser(data_dir))

    if not op.exists(data_dir) and not yes:
        if click.confirm(
                'Data directory "{}" does not exist. Create?'.format(data_dir),
                abort=True):
            os.makedirs(data_dir)
            click.echo('Created data directory {}'.format(data_dir))

    else:
        click.echo('Using data directory {}'.format(data_dir))

    temp_dir = op.realpath(op.expanduser(temp_dir))
    if not op.exists(temp_dir):
        os.makedirs(temp_dir)

    client = docker.from_env()

    try:
        hg_container = client.containers.get(hg_container_name)
        sys.stderr.write("Stopping previously running container\n")
        hg_container.stop()
        hg_container.remove()
    except docker.errors.NotFound:
        # container isn't running so no need to stop it
        pass
    except requests.exceptions.ConnectionError:
        sys.stderr.write(
            "Error connecting to the Docker daemon, make sure it is started "
            "and you are logged in.\n")
        return

    if use_redis:
        if redis_dir is None:
            redis_dir = op.join(data_dir, 'redis')
        redis_dir = op.realpath(os.expanduser(redis_dir))
        click.echo('Starting redis [redis_dir: {}]'.format(redis_dir))

        network_name = "{}-{}".format(NETWORK_PREFIX, hg_name)
        redis_name = "{}-{}".format(REDIS_PREFIX, hg_name)

        # set up a bridge network for Redis and higlass containers to share
        try:
            network_list = client.networks.list(names=[network_name])
            if network_list:
                network = client.networks.get(network_name)
                sys.stderr.write(
                    "Attempting to remove existing Docker network instance\n")
                network.remove()
        except docker.errors.APIError:
            sys.stderr.write(
                "Error: Could not access Docker network list to remove "
                "existing network.\n")
            sys.exit(-1)

        try:
            # https://docker-py.readthedocs.io/en/stable/networks.html
            network = client.networks.create(network_name, driver="bridge")
        except docker.errors.APIError as err:
            sys.stderr.write(
                "Error: Could not access Docker network ({}).\n".format(err))
            sys.exit(-1)

        # clear up any running Redis container
        try:
            redis_container = client.containers.get(redis_name)
            sys.stderr.write("Stopping previously running Redis container\n")
            redis_container.stop()
            redis_container.remove()
        except docker.errors.NotFound:
            pass
        except requests.exceptions.ConnectionError:
            sys.stderr.write(
                "Error: Error connecting to the Docker daemon, make sure it "
                "is started and you are logged in.\n")
            sys.exit(-1)

        # pull Redis image
        sys.stderr.write("Pulling {}:{}\n".format(redis_repository, redis_tag))
        sys.stderr.flush()
        redis_image = client.images.pull(redis_repository, tag=redis_tag)
        sys.stderr.write("done\n")
        sys.stderr.flush()

        # set up Redis container settings and environment
        if not op.exists(redis_dir):
            os.makedirs(redis_dir)

        redis_conf = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                  "redis", "redis.conf")
        if not os.path.exists(redis_conf):
            sys.stderr.write(
                "Error: Could not locate Redis configuration file "
                "[{}]\n".format(redis_conf))
            sys.exit(-1)

        redis_volumes = {
            redis_dir: {
                "bind": "/data",
                "mode": "rw"
            },
            redis_conf: {
                "bind": REDIS_CONF,
                "mode": "rw"
            },
        }

        redis_command = "redis-server {}".format(REDIS_CONF)

        try:
            # run Redis container
            redis_container = client.containers.run(
                redis_image,
                redis_command,
                name=redis_name,
                network=network_name,
                volumes=redis_volumes,
                detach=True,
            )
        except docker.errors.ContainerError as err:
            sys.stderr.write(
                "Error: Redis container could not be started\n{}\n".format(
                    err))
            sys.exit(-1)
        except docker.errors.ImageNotFound as err:
            sys.stderr.write("Error: Redis container image could not be found"
                             "\n{}\n".format(err))
            sys.exit(-1)
        except docker.errors.APIError as err:
            sys.stderr.write(
                "Error: Redis container server ran into a fatal error"
                "\n{}\n".format(err))
            sys.exit(-1)

    if version == "local":
        hg_image = client.images.get("image-default")
    else:
        sys.stderr.write("Pulling latest image... \n")
        sys.stderr.flush()
        hg_image = client.images.pull(hg_repository, version)
        sys.stderr.write("done\n")
        sys.stderr.flush()

    hg_environment = {}

    if site_url is not None:
        hg_environment["SITE_URL"] = site_url

    if workers is not None:
        hg_environment["WORKERS"] = workers

    sys.stderr.write("Data directory: {}\n".format(data_dir))
    sys.stderr.write("Temp directory: {}\n".format(temp_dir))
    hg_volumes = {
        temp_dir: {
            "bind": "/tmp",
            "mode": "rw"
        },
        data_dir: {
            "bind": "/data",
            "mode": "rw"
        },
    }

    if media_dir is not None:
        media_dir = os.realpath(os.expanduser(media_dir))
        hg_volumes[media_dir] = {"bind": "/media", "mode": "rw"}
        hg_environment["HIGLASS_MEDIA_ROOT"] = "/media"

    if not use_redis:
        hg_container = client.containers.run(
            hg_image,
            ports={80: port},
            volumes=hg_volumes,
            name=hg_container_name,
            environment=hg_environment,
            detach=True,
        )
    else:
        # add some environment variables to the higlass container
        hg_environment["REDIS_HOST"] = redis_name
        hg_environment["REDIS_PORT"] = redis_port
        # run the higlass container on the shared network with the Redis container
        hg_container = client.containers.run(
            hg_image,
            network=network_name,
            ports={80: port},
            volumes=hg_volumes,
            name=hg_container_name,
            environment=hg_environment,
            publish_all_ports=True,
            detach=True,
        )

    sys.stderr.write("Docker started: {}\n".format(hg_container_name))

    started = False
    counter = 1
    while not started:
        try:
            sys.stderr.write("sending request {}\n".format(counter))
            counter += 1
            req = requests.get(
                "http://localhost:{}/api/v1/viewconfs/?d=default".format(port),
                timeout=5,
            )
            # sys.stderr.write("request returned {} {}\n".format(req.status_code, req.content))

            if req.status_code != 200:
                sys.stderr.write(
                    "Non 200 status code returned ({}), waiting...\n".format(
                        req.status_code))
                time.sleep(0.5)
            else:
                started = True
        except requests.exceptions.ConnectionError:
            sys.stderr.write("Waiting to start (tilesets)...\n")
            time.sleep(0.5)
        except requests.exceptions.ReadTimout:
            sys.stderr.write("Request timed out\n")
            time.sleep(0.5)

    sys.stderr.write("public_data: {}\n".format(public_data))

    if not public_data or default_track_options is not None:
        # we're going to be changing the higlass js file so first we copy it to a location
        # with a new hash
        new_hash = slugid.nice()

        sed_command = """bash -c 'cp higlass-app/static/js/main.*.chunk.js higlass-app/static/js/main.{}.chunk.js'""".format(
            new_hash)

        ret = hg_container.exec_run(sed_command)

    if not public_data:
        config = json.loads(req.content.decode("utf-8"))
        config["trackSourceServers"] = ["/api/v1"]
        started = True
        # sys.stderr.write('config {}\n'.format(json.dumps(config, indent=2)))
        config = {"uid": "default_local", "viewconf": config}

        ret = hg_container.exec_run(
            """python higlass-server/manage.py shell --command="import tilesets.models as tm; o = tm.ViewConf.objects.get(uuid='default_local'); o.delete();" """
        )
        ret = requests.post(
            "http://localhost:{}/api/v1/viewconfs/".format(port), json=config)
        sys.stderr.write("ret: {}\n".format(ret.content))
        # ret = container.exec_run('echo "import tilesets.models as tm; tm.ViewConf.get(uuid={}default{}).delete()" | python higlass-server/manage.py shell'.format("'", "'"), tty=True)
        ret = hg_container.exec_run(
            """bash -c 'sed -i '"'"'s/"default"/"default_local"/g'"'"' higlass-app/static/js/main.*.chunk.js'"""
            .format(new_hash))
        sys.stderr.write("ret: {}\n".format(ret))

    if default_track_options is not None:
        with open(default_track_options, "r") as f:
            default_options_json = json.load(f)

            sed_command = """bash -c 'sed -i '"'"'s/assign({{}},this.props.options/assign({{defaultOptions: {} }},this.props.options/g'"'"' """.format(
                json.dumps(default_options_json))
            sed_command += " higlass-app/static/js/main.*.chunk.js'"
            # sys.stderr.write("sed_command: {}\n".format(sed_command))

            ret = hg_container.exec_run(sed_command)

    # sys.stderr.write("ret: {}\n".format(ret))

    sed_command = (
        """bash -c 'sed -i '"'"'s/main.*.chunk.js/main.invalid.chunk.js/g'"'"' """
    )
    sed_command += " higlass-app/precache-manifest.*.js'"

    ret = hg_container.exec_run(sed_command)
    # sys.stderr.write("ret: {}\n".format(ret))

    sed_command = """bash -c 'sed -i '"'"'s/index.html/index_invalidated_by_higlass_manage/g'"'"' """
    sed_command += " higlass-app/precache-manifest.*.js'"

    ret = hg_container.exec_run(sed_command)
    # sys.stderr.write("ret: {}\n".format(ret))
    sys.stderr.write("Replaced js file\n")

    sys.stderr.write("Started\n")