Exemple #1
0
def start_stop(num:int,
               interval:int,
               pause:float=0.1,
               vms=None):
  import shlex
  """ Like freezing, but do not make any. """
  for vm in vms:
    if vm.bname == 'sdag':
      break


  PERF = "/home/sources/perf_lite"
  CMD = "{perf} kvm stat -e instructions,cycles -o {out} -x, -I {subinterval} -p {pid}"
  out = "results/limit/start_stop_f_subint1_%s.csv" % vm.bname
  cmd = CMD.format(perf=PERF, pid=vm.pid, out=out, subinterval=1)
  p = Popen(shlex.split(cmd))

  for i in range(1, num+1):
    #print("%s out of %s" % (i, num))
    if pause:
      sleep(pause)
    vm.exclusive()
    sleep(interval/1000)
    vm.shared()
  print("done")
  p.send_signal(2)  # SIGINT
  return None
class Process:
	def __init__(self, args, name=None, keep_alive=False):
		self.args = args
		self.proc = None
		self.name = name
		self.keep_alive = keep_alive
		if self.name is None:
			self.name = "%s" % self.args

	def start(self):
		if not self.running():
			rospy.loginfo("Starting process %s" % self.name)
			self.proc = Popen(self.args)
		else:
			rospy.loginfo("Process %s already started" % self.name)

	def stop(self):
		if self.proc is not None:
			if self.running():
				rospy.loginfo("Stopping process %s" % self.name)
				self.proc.send_signal(signal.SIGINT)
				self.proc.wait()
			else:
				rospy.loginfo("Process %s already stopped" % self.name)

	def restart(self):
		rospy.loginfo("Restarting process %s" % self.name)
		self.stop()
		time.sleep(1) #TODO
		self.start()

	def running(self):
		started = self.proc is not None
		running = started and self.proc.poll() is None
		return running
def vpn_manager(ovpn):
    """ Check VPN season
        If vpn tunnel break or fail to create, terminate vpn season
        So openvpn not keep sending requests to proxy server and
         save you from being blocked.
    """
    global dns, verbose, dropped_time

    command = ['openvpn', '--config', ovpn]
    p = Popen(command, stdout=PIPE, stdin=PIPE)
    try:
        while p.poll() is None:
            line = p.stdout.readline()
            if verbose == 'yes':
                print line,
            if 'Initialization Sequence Completed' in line:
                dropped_time = 0
                dns_manager('change', dns)
                print ctext('VPN tunnel established successfully'.center(40), 'B')
                print 'Ctrl+C to quit VPN'.center(40)
            elif 'Restart pause, ' in line and dropped_time <= max_retry:
                dropped_time += 1
                print ctext('Vpn has restarted %s time' % dropped_time, 'rB')
            elif dropped_time == max_retry or 'Connection timed out' in line or 'Cannot resolve' in line:
                dropped_time = 0
                print line
                print ctext('Terminate vpn', 'B')
                p.send_signal(signal.SIGINT)
    except KeyboardInterrupt:
        p.send_signal(signal.SIGINT)
        p.wait()
        print ctext('VPN tunnel is terminated'.center(40), 'B')
    finally:
        dns_manager('restore')
Exemple #4
0
def run_tests(tmpdir):
    socket_file = os.path.join(tmpdir, "logductd.sock")
    logs_dir = os.path.join(tmpdir, "logs")
    daemon = Popen([sys.executable, "-m", "logduct.daemon", "-s", socket_file, "-d", logs_dir, "--trust-blindly"])

    unit = "dummyunit"
    stdio_log = os.path.join(logs_dir, unit, "stdio.log")
    third_log = os.path.join(logs_dir, unit, "third.log")
    try:
        wait_until_exists(socket_file)

        # stdio
        check_call([sys.executable, "-m", "logduct.run", "-s", socket_file, "-u", unit, "echo", "hello"])
        wait_until_exists(stdio_log)

        data = slurp(stdio_log)
        match = re.match(r"\d\d:\d\d:\d\d.\d\d\d (unknown|echo)\[\d+\]: hello\n", data)
        assert match

        # pipe fd
        check_call([sys.executable, "-m", "logduct.run", "-s", socket_file, "-u", unit, "--fd", "3:third",
                    "--no-stdio", "bash", "-c", "echo there >&3"])
        wait_until_exists(third_log)

        data = slurp(third_log)
        match = re.match(r"\d\d:\d\d:\d\d.\d\d\d: there\n", data)
        assert match
    finally:
        daemon.send_signal(signal.SIGTERM)
        time.sleep(0.2)
        daemon.kill()
Exemple #5
0
 def run(self):
     try:
         if osflag:
             proc=Popen(self.cmd,shell=False,stdin=None,stdout=PIPE,\
                 stderr=STDOUT,bufsize=0)
         else:
             from subprocess import STARTUPINFO
             si=STARTUPINFO()
             si.dwFlags|=1
             si.wShowWindow=0
             proc=Popen(self.cmd,shell=False,stdin=None,stdout=PIPE,\
                 stderr=STDOUT,bufsize=0,startupinfo=si)
         while 1:
             if self.stop_flag:
                 if osflag: proc.send_signal(signal.SIGKILL)
                 else: proc.kill()
                 break
             if osflag:
                 if proc.stdout in select.select([proc.stdout],[],[],1)[0]:
                     line=proc.stdout.readline()
                 else: line=' \n'
             else: line=proc.stdout.readline()
             if not len(line): break
             else:
                 if count(line,'ttl') or count(line,'TTL'): self.retries=0
                 else: self.retries=self.retries+1
                 line=' '
             sleep(0.5)
         proc.poll()
     except: pass
Exemple #6
0
    def execute_sequence(self, sequence):
        def purify_args(args):
            for rm_arg in ['-h, --hardware', ]:
                try:
                    args.remove(rm_arg)
                except ValueError:
                    pass
            for add_arg in ['--no-gui', '--server']:
                args.append(add_arg)
            return args

        while True:
            for command in sequence['sequence']:
                args = split(command['command'])
                cwd = join(realpath(dirname(__file__)), '..', command['dir'])
                args[0] = join(cwd, args[0])
                process = Popen(purify_args(args), cwd=cwd)
                print "Starting "+str(args)
                reason = self.wait(command['timeout'], command['interruptible'], process) # TODO interruptible raw_input in new_thread for 2.7, exec with timeout= for 3
                print "End:", reason
                if reason!='terminated':
                    process.terminate()  # SIGTERM
                    process.send_signal(SIGINT)
                    process.wait() # should poll() and kill() if it does not close?
            if not sequence['infinite']:
                break
def test_shell_background_support_setsid(both_debug_modes, setsid_enabled):
    """In setsid mode, dumb-init should suspend itself and its children when it
    receives SIGTSTP, SIGTTOU, or SIGTTIN.
    """
    proc = Popen(
        ('dumb-init', sys.executable, '-m', 'tests.lib.print_signals'),
        stdout=PIPE,
    )
    match = re.match(b'^ready \(pid: ([0-9]+)\)\n$', proc.stdout.readline())
    pid = match.group(1).decode('ascii')

    for signum in SUSPEND_SIGNALS:
        # both dumb-init and print_signals should be running or sleeping
        assert process_state(pid) in ['running', 'sleeping']
        assert process_state(proc.pid) in ['running', 'sleeping']

        # both should now suspend
        proc.send_signal(signum)

        os.waitpid(proc.pid, os.WUNTRACED)
        assert process_state(proc.pid) == 'stopped'
        assert process_state(pid) == 'stopped'

        # and then both wake up again
        proc.send_signal(SIGCONT)
        assert (
            proc.stdout.readline() == '{0}\n'.format(SIGCONT).encode('ascii')
        )
        assert process_state(pid) in ['running', 'sleeping']
        assert process_state(proc.pid) in ['running', 'sleeping']

    for pid in pid_tree(proc.pid):
        os.kill(pid, SIGKILL)
Exemple #8
0
def sh(cd, f):
        simLog(f, 'CMD: ' + ' '.join(cd))
        simLog(f, 'DIR: ' + HTML + 'upload/')
        sub = Popen(cd, cwd = HTML + 'upload/', stdout = PIPE, stderr = PIPE, stdin = PIPE, close_fds=True, preexec_fn = os.setsid)

        try:
                print 'Sub Id: ', sub.pid
#               while sub.poll() is None:
                while True:
                        sub.stdout.flush()
                        tstr = sub.stdout.readline()
                        if sub.poll() is not None and not tstr:
                                break
                        simLog(f, tstr.strip('\n'))
                        f.flush()
                        time.sleep(0.1)
                retcode = sub.poll()
                simLog(f, 'Poll: ' + str(retcode))
                retcode = sub.wait()
                simLog(f, 'Wait: ' + str(retcode))
                f.flush()
        except KeyboardInterrupt:
                sub.send_signal(signal.SIGTERM)
                sub.terminate()
                sub.kill()
                os.killpg(sub.pid,signal.SIGTERM)
                sub.wait()
                print 'Kill Sub'
        except Exception, ex:
                print ex
def test_shell_background_support_without_setsid(both_debug_modes, setsid_disabled):
    """In non-setsid mode, dumb-init should forward the signals SIGTSTP,
    SIGTTOU, and SIGTTIN, and then suspend itself.
    """
    proc = Popen(
        ('dumb-init', sys.executable, '-m', 'tests.lib.print_signals'),
        stdout=PIPE,
    )

    assert re.match(b'^ready \(pid: (?:[0-9]+)\)\n$', proc.stdout.readline())

    for signum in SUSPEND_SIGNALS:
        assert process_state(proc.pid) in ['running', 'sleeping']
        proc.send_signal(signum)
        assert proc.stdout.readline() == '{0}\n'.format(signum).encode('ascii')
        os.waitpid(proc.pid, os.WUNTRACED)
        assert process_state(proc.pid) == 'stopped'

        proc.send_signal(SIGCONT)
        assert (
            proc.stdout.readline() == '{0}\n'.format(SIGCONT).encode('ascii')
        )
        assert process_state(proc.pid) in ['running', 'sleeping']

    for pid in pid_tree(proc.pid):
        os.kill(pid, SIGKILL)
Exemple #10
0
class pd(object):
    @staticmethod
    def _getPdBin(pdbin):
        if pdbin is None:
            if "PD_BIN" in os.environ:
                return os.environ["PD_BIN"]
            else:
                if sys.platform == "win32":
                    return os.path.join("pd", "bin", "pd.exe")
                elif sys.platform == "linux2":
                    return "pd"
                elif sys.platform == "darwin":
                    return os.path.join("", "Applications", "Pd.app",
                                        "Contents", "Resources", "bin", "pd")
                else:
                    raise PdException("Unknown Pd executable location on your"
                                      " platform ({}).".format(sys.platform))
        else:
            return pdbin

    def __init__(self, stderr=True, nogui=True, initPatch=None, bin=None):
        self.pdbin = pd._getPdBin(bin)
        args = [self.pdbin]

        self.pdsend = os.path.join(os.path.dirname(self.pdbin), "pdsend")
        self.port = DEFAULT_PORT

        if stderr:
            args.append("-stderr")

        if nogui:
            args.append("-nogui")

        if initPatch:
            args.append("-open")
            args.append(initPatch)

        try:
            print(args)
            self.proc = Popen(args, stdin=None, stderr=PIPE, stdout=PIPE,
                              close_fds=(sys.platform != "win32"))
        except OSError:
            raise PdException(
                "Problem running `{}` from '{}'".format(self.pdbin,
                                                        os.getcwd()))

    def send(self, msg):
        args = [self.pdsend, str(DEFAULT_PORT)]
        print(args, msg)
        msg = "; " + msg + ";"
        sendProc = Popen(args, stdin=PIPE, close_fds=(sys.platform != "win32"),
                         universal_newlines=True)
        out, err = sendProc.communicate(input=msg)

    def kill(self):
        self.proc.send_signal(signal.SIGINT)
        if self.proc:
            self.proc.wait()
Exemple #11
0
class GHCI:
    def __init__(self):
        self.process = Popen(["ghci", "-v0", "+RTS -M1k"], stdout=PIPE, stdin=PIPE, stderr=PIPE)
        self.stdin = self.process.stdin
        self.stdout = self.process.stdout
        self.stderr = self.process.stderr
        self.devnull = open(devnull, "w")

        fcntl(self.stdout.fileno(), F_SETFL, O_NONBLOCK)
        fcntl(self.stderr.fileno(), F_SETFL, O_NONBLOCK)

    def __del__(self):
        self.process.kill()

    def execute(self, code):
        self.stdin.flush()
        self.stdout.flush()
        self.stderr.flush()

        self.stdout.read()
        self.stderr.read()

        self.stdin.write(bytes(code + "\n", "utf-8"))
        self.stdin.flush()
        self.stdout.flush()
        self.stderr.flush()

    def evaluate(self, code):
        self.stdin.flush()
        self.stdout.flush()
        self.stderr.flush()

        self.stdout.read()
        self.stderr.read()

        self.stdin.write(bytes(code + "\n", "utf-8"))
        self.stdin.flush()
        self.stdout.flush()
        self.stderr.flush()

        # read input
        out = ""
        fds = select([self.stdout, self.stderr], [], [], 5)[0]
        if len(fds) > 0:
            for fd in fds:
                fd.flush()
                line = fd.readline().decode("utf-8")
                out += line
            print(out.strip(), end="", flush=True)
            return True
        else:
            self.process.send_signal(SIGINT)
            return False

    def close(self):
        self.stdin.close()
        self.stdout.close()
        self.stderr.close()
Exemple #12
0
def _close_app_process(process: subprocess.Popen):
    # SIGINT instead of terminate,
    # so that subprocess coverage works without special signal handling
    process.send_signal(signal.SIGINT)
    try:
        process.wait(3)
    except subprocess.TimeoutExpired:
        process.kill()
        pytest.fail("The process didn't close on time.")
Exemple #13
0
    def test_bridgedb_commands(self):
        print('')
        here       = os.getcwd()
        runDir     = pjoin(here, 'rundir')
        topDir     = here.rstrip('_trial_temp')
        scriptsDir = pjoin(topDir, 'scripts')

        # Create the lowest directory we need, and all its parents:
        os.makedirs(os.path.join(runDir, 'gnupghome'))

        conf      = pjoin(topDir, 'bridgedb.conf')
        confMoved = pjoin(runDir, 'bridgedb.conf')
        gpgFile   = pjoin(topDir, 'gnupghome', 'TESTING.subkeys.sec')
        gpgMoved  = pjoin(runDir, 'gnupghome', 'TESTING.subkeys.sec')
        certFile  = pjoin(topDir, 'cert')
        certMoved = pjoin(runDir, 'cert')
        keyFile   = pjoin(topDir, 'privkey.pem')
        keyMoved  = pjoin(runDir, 'privkey.pem')

        makeSSLCertScript = os.path.join(scriptsDir, 'make-ssl-cert')
        bridgedbScript    = which('bridgedb') # this returns a list

        self.doCopyFile(conf, confMoved, 'config')
        self.doCopyFile(gpgFile, gpgMoved, 'GPG test key')
        print("Running subcommands from directory:\n  %r" % runDir)
        print("Running %r..." % makeSSLCertScript)
        makeSSLCertProcess = Popen(makeSSLCertScript)
        makeSSLCertProcess.wait()
        self.doMoveFile(certFile, certMoved, 'certificate')
        self.doMoveFile(keyFile, keyMoved, 'SSL private key')

        self.assertTrue(os.path.isfile(bridgedbScript[0]),
                        "Couldn't find bridgedb script %r" % bridgedbScript[0])
        bridgedbScript = bridgedbScript[0]
        print("Running bridgedb script %r..." % bridgedbScript)

        os.chdir(runDir)  # we have to do this to get files to end up there
        print("Running `bridgedb mock' to generate mock bridge descriptors...")
        mockProc = Popen([bridgedbScript, 'mock', '-n', '50'])
        mockProcCode = mockProc.wait()
        print("`bridgedb mock' exited with status code %d" % int(mockProcCode))
        os.chdir(here)

        # See ticket #11216, cached-extrainfo* files should not be parsed
        # cumulatively.
        eidesc  = pjoin(runDir, 'cached-extrainfo')
        eindesc = pjoin(runDir, 'cached-extrainfo.new')
        self.doCopyFile(eindesc, eidesc, 'duplicated cached-extrainfo(.new)')

        print("Running `bridgedb' to test server startups...")
        bridgedbProc = Popen([bridgedbScript, '-r', runDir])
        time.sleep(30)
        bridgedbProc.send_signal(signal.SIGINT)
        bridgedbProcCode = bridgedbProc.wait()
        print("`bridgedb' exited with status code %d" % int(bridgedbProcCode))
        self.assertEqual(bridgedbProcCode, 0)
Exemple #14
0
class SplashServer(object):

    def __init__(self, logfile=None, proxy_profiles_path=None,
                 js_profiles_path=None, filters_path=None, portnum=None,
                 proxy_portnum=None, extra_args=None, verbosity=3):
        self.logfile = logfile
        self.proxy_profiles_path = proxy_profiles_path
        self.js_profiles_path = js_profiles_path
        self.filters_path = filters_path
        self.verbosity = verbosity
        self.portnum = portnum if portnum is not None else get_ephemeral_port()
        self.proxy_portnum = proxy_portnum if proxy_portnum is not None else get_ephemeral_port()
        self.tempdir = tempfile.mkdtemp()
        self.extra_args = extra_args or []

    def __enter__(self):
        args = [sys.executable, '-u', '-m', 'splash.server']
        args += ['--cache-path', self.tempdir]
        args += ['--port', str(self.portnum)]
        args += ['--verbosity', str(self.verbosity)]
        if self.logfile:
            args += ['-f', self.logfile]
        if self.proxy_profiles_path:
            args += ['--proxy-profiles-path', self.proxy_profiles_path]
        if self.js_profiles_path:
            args += ['--js-profiles-path', self.js_profiles_path]
        if self.filters_path:
            args += ['--filters-path', self.filters_path]
        if self.proxy_portnum:
            args += ['--proxy-portnum', str(self.proxy_portnum)]

        args.extend(self.extra_args)

        self.proc = Popen(args, env=get_testenv())
        self.proc.poll()
        if self.proc.returncode is not None:
            msg = ("unable to start splash server. return code: %d" %
                   self.proc.returncode)
            raise RuntimeError(msg)
        _wait_for_port(self.portnum)
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        if self.proc is not None:
            self.proc.send_signal(signal.SIGINT)
            self.proc.wait()
            self.proc = None
            shutil.rmtree(self.tempdir)

    def url(self, path):
        return "http://localhost:%s/%s" % (self.portnum, path.lstrip('/'))

    def proxy_url(self):
        return "http://localhost:%s" % self.proxy_portnum
Exemple #15
0
    def handle(self, *args, **options):
        shutdown_message = options.get('shutdown_message', '')
        application = options.get('application')
        version = options.get('version')

        from djangae.boot import setup_paths, find_project_root
        setup_paths()

        project_root = find_project_root()

        expected_path = os.path.join(project_root, "app.yaml")
        if not os.path.exists(expected_path):
            sys.stderr.write("Unable to find app.yaml at '%s'\n" % expected_path)
            sys.exit(1)

        # Will have been set by setup_paths
        sdk_path = os.environ['APP_ENGINE_SDK']

        appcfg = os.path.join(sdk_path, "appcfg.py")

        # very simple for now, only runs appcfg.py update . and some
        # extra parameters like app id or version

        command = [
            appcfg,
            "update",
            project_root
        ]

        if application:
            command += ["-A", application]
        if version:
            command += ["-V", version]

        process = Popen(
            command,
            stdout=sys.__stdout__,
            stderr=sys.__stderr__,
            cwd=project_root
        )

        try:
            process.wait()
        except KeyboardInterrupt:
            #Tell the dev appserver to shutdown and forcibly kill
            #if it takes too long
            process.send_signal(signal.SIGTERM)
            time.sleep(2)
            process.kill()

            if shutdown_message:
                sys.stdout.write(shutdown_message)

        sys.exit(process.returncode)
def spawn_and_kill_pipeline():
    proc = Popen(("dumb-init", "sh", "-c", "yes 'oh, hi' | tail & yes error | tail >&2"))
    time.sleep(0.1)

    pids = pid_tree(os.getpid())
    assert len(living_pids(pids)) == 6

    proc.send_signal(signal.SIGTERM)
    proc.wait()

    time.sleep(0.1)
    return pids
Exemple #17
0
    def doit(self, doit_args, cell):
        with NamedTemporaryFile(delete=False, suffix='.py') as tmp_file:
            tmp_name = tmp_file.name
            tmp_file.write(cell)

        cur_dir = os.getcwd()
        doit_args = doit_args.split()
        if doit_args:
            doit_command = [doit_args.pop(0)]
        else:
            doit_command = []

        cmd = ['doit']
        cmd += doit_command
        cmd += [ '-d', cur_dir, '-f', tmp_name]
        cmd += doit_args

        p = Popen(cmd, stdout=PIPE, stderr=PIPE)

        try:
            out, err = p.communicate(cell)
        except KeyboardInterrupt:
            try:
                p.send_signal(signal.SIGINT)
                time.sleep(0.1)
                if p.poll() is not None:
                    print("Process is interrupted.")
                    return
                p.terminate()
                time.sleep(0.1)
                if p.poll() is not None:
                    print("Process is terminated.")
                    return
                p.kill()
                print("Process is killed.")
            except OSError:
                pass
            except Exception as e:
                print("Error while terminating subprocess (pid=%i): %s" \
                    % (p.pid, e))
            return

        out = py3compat.bytes_to_str(out)
        err = py3compat.bytes_to_str(err)

        sys.stdout.write(out)
        sys.stdout.flush()
        sys.stderr.write(err)
        sys.stderr.flush()

        os.remove(tmp_name)
def main():
    
    (options, args) = parseArgs()
    flags = ['-P',options.ping_pattern,'--switch',options.switch]
    if options.verbose:
        flags.append('-v')

    # GET PATHS
    controller_src_path = os.path.expanduser('~/pyretic/examples/load_balancer.py')
    unit_test_path = os.path.expanduser('~/pyretic/tests/load_balancer_unit_test.py')
    pox_path = os.path.expanduser('~/pox/pox.py')

    # MAKE SURE WE CAN SEE ALL OUTPUT IF VERBOSE
    env = os.environ.copy()
    if options.verbose:
        env['PYTHONUNBUFFERED'] = 'True'

    dists = [(2,2),(8,3)]
    print "=========== LOAD BALANCER TESTER =============="
    print "-TOPO------------------PKTS----------TIME------"
    count = 0
    for (clients,servers) in dists:

        # STARTUP CONTROLLER
        controller = Popen([sys.executable, pox_path,'--no-cli', controller_src_path, 
                        '--clients='+str(clients), '--servers='+str(servers)], 
                           env=env,
                           stdout=PIPE, 
                           stderr=STDOUT)
        if options.verbose:
            controller_out = subprocess_output(controller)
            controller_out.start()
            sleep(1)

        cs_params = str(clients)+','+str(servers)
        topos = ['bump_clique,1,'+cs_params, 'bump_clique,4,'+cs_params]

        for topo in topos:
            test = ['sudo', unit_test_path, '--topo', topo, '-c', str(clients),'-s', str(servers)] + flags
            testproc = call(test)
            if testproc == 0:
                count += 1

        # KILL CONTROLLER
        controller.send_signal( SIGINT )
    
    print "-----------------------------------------------"
    if count == len(topos) * len(dists):
        print "+ load_balancer_tester PASSED [%d/%d]" % (count,len(topos)*len(dists))
    else:
        print "- load_balancer_tester FAILED [%d/%d]" % (count,len(topos)*len(dists))
Exemple #19
0
def play(filename):

    try:
        p = Popen(["play", filename, 'trim', '60'],
        stdout=PIPE)

    except KeyboardInterrupt:
        raise
    except:
        print "exiting" 
        p.send_signal(signal.SIGINT)
        p.wait()
        return
    return
Exemple #20
0
def timed_process(args, timeout=None, env=None, shell=False):
    """
    Execute a command using :py:mod:`subprocess` module,
    if timeout is specified the process is killed if it does
    not terminate in the maxim required time.

    Parameters:

    args
        the command to run, in a list format

    timeout
        the maximumt time to wait for the process to terminate
        before killing it

    env
        a dictionary representing the environment
    """
    extra = dict()
    if env is not None:
        extra['env'] = env
    try:
        proc = Popen(args, stdout=PIPE, stderr=PIPE, shell=shell,
                     **extra)
    except OSError:
        error = sys.exc_info()[1]
        raise ProcessError("OSError %s" % error)
    except ValueError:
        error = sys.exc_info()[1]
        raise ProcessError("ValueError %s" % error)
    if timeout is None:
        out, err = proc.communicate()
        return proc.poll(), out, err
    maxt = time.time() + timeout
    while proc.poll() is None and time.time() < maxt:
        time.sleep(CHECK_TIME)
    if proc.poll() is None:
        try:
            getattr(proc, "send_signal")
            proc.send_signal(signal.SIGKILL)
        except AttributeError:
            try:
                os.kill(proc.pid, signal.SIGKILL)
            except OSError:  # process already gone
                pass
        raise ProcessTimedout("Process %s timed out after %s seconds." %
                              (" ".join(args), timeout))
    else:
        out, err = proc.communicate()
        return proc.poll(), out, err
Exemple #21
0
class PingerInterface(Collector):
    def __init__(self, *argz, **kwz):
        super(PingerInterface, self).__init__(*argz, **kwz)
        self.hosts = dict(
            it.chain(
                (("v4:{}".format(spec), name) for name, spec in (self.conf.hosts.ipv4 or dict()).viewitems()),
                (("v6:{}".format(spec), name) for name, spec in (self.conf.hosts.ipv6 or dict()).viewitems()),
            )
        )
        if not self.hosts:
            log.info("No valid hosts to ping specified, disabling collector")
            self.conf.enabled = False
        else:
            self.spawn_pinger()

    def spawn_pinger(self):
        cmd = (
            ["python", os.path.join(os.path.dirname(__file__), "_ping.py")]
            + map(
                bytes,
                [
                    self.conf.interval,
                    self.conf.resolve.no_reply or 0,
                    self.conf.resolve.time or 0,
                    self.conf.ewma_factor,
                    os.getpid(),
                    self.conf.resolve.max_retries,
                ],
            )
            + self.hosts.keys()
        )
        log.debug("Starting pinger subprocess: {}".format(" ".join(cmd)))
        self.proc = Popen(cmd, stdout=PIPE, close_fds=True)
        self.proc.stdout.readline()  # wait until it's initialized

    def read(self):
        err = self.proc.poll()
        if err is not None:
            log.warn("Pinger subprocess has failed" " (exit code: {}), restarting it".format(err))
            self.spawn_pinger()
        else:
            self.proc.send_signal(signal.SIGQUIT)
            for line in iter(self.proc.stdout.readline, ""):
                line = line.strip()
                if not line:
                    break
                host, ts_offset, rtt, lost = line.split()
                host = self.hosts[host]
                yield Datapoint("network.ping.{}.ping".format(host), "gauge", float(rtt), None)
                yield Datapoint("network.ping.{}.droprate".format(host), "counter", int(lost), None)
def test_proxies_signals(both_debug_modes, both_setsid_modes):
    """Ensure dumb-init proxies regular signals to its child."""
    proc = Popen(
        ('dumb-init', sys.executable, '-m', 'tests.lib.print_signals'),
        stdout=PIPE,
    )

    assert re.match(b'^ready \(pid: (?:[0-9]+)\)\n$', proc.stdout.readline())

    for signum in NORMAL_SIGNALS:
        proc.send_signal(signum)
        assert proc.stdout.readline() == '{0}\n'.format(signum).encode('ascii')

    for pid in pid_tree(proc.pid):
        os.kill(pid, signal.SIGKILL)
Exemple #23
0
        def test_hang(self):

            if WIN:
                from subprocess import CREATE_NEW_PROCESS_GROUP
                kwargs = {'creationflags': CREATE_NEW_PROCESS_GROUP}
            else:
                kwargs = {}
            p = Popen([sys.executable, __file__, 'subprocess'], stdout=PIPE, **kwargs)
            line = p.stdout.readline()
            if not isinstance(line, str):
                line = line.decode('ascii')
            # Windows needs the \n in the string to write (because of buffering), but
            # because of newline handling it doesn't make it through the read; whereas
            # it does on other platforms. Universal newlines is broken on Py3, so the best
            # thing to do is to strip it
            line = line.strip()
            self.assertEqual(line, 'ready')
            # On Windows, we have to send the CTRL_BREAK_EVENT (which seems to terminate the process); SIGINT triggers
            # "ValueError: Unsupported signal: 2". The CTRL_C_EVENT is ignored on Python 3 (but not Python 2).
            # So this test doesn't test much on Windows.
            signal_to_send = signal.SIGINT if not WIN else getattr(signal, 'CTRL_BREAK_EVENT')
            p.send_signal(signal_to_send)
            # Wait a few seconds for child process to die. Sometimes signal delivery is delayed
            # or even swallowed by Python, so send the signal a few more times if necessary
            wait_seconds = 15.0
            now = time.time()
            midtime = now + (wait_seconds / 2.0)
            endtime = time.time() + wait_seconds
            while time.time() < endtime:
                if p.poll() is not None:
                    break
                if time.time() > midtime:
                    p.send_signal(signal_to_send)
                    midtime = endtime + 1 # only once
                time.sleep(0.1)
            else:
                # Kill unresponsive child and exit with error 1
                p.terminate()
                p.wait()
                raise AssertionError("Failed to wait for child")

            # If we get here, it's because we caused the process to exit; it
            # didn't hang. Under Windows, however, we have to use CTRL_BREAK_EVENT,
            # which has an arbitrary returncode depending on versions (so does CTRL_C_EVENT
            # on Python 2). We still
            # count this as success.
            self.assertEqual(p.returncode if not WIN else 0, 0)
            p.stdout.close()
Exemple #24
0
def run(args, cwd=None, postprocessor=None):
    io_queue = Queue()

    # Set environment, disabling Python buffering of the subprocess
    env = os.environ.copy()
    env['PYTHONUNBUFFERED'] = 'True'

    # Set platform-specific arguments
    kwargs = {}
    if sys.platform == 'win32':
        from subprocess import CREATE_NEW_PROCESS_GROUP
        # Use a new process group
        kwargs['creationflags'] = CREATE_NEW_PROCESS_GROUP
        # Prevent "Terminate batch job (Y/N)?"
        kwargs['stdin'] = open(os.devnull, 'r')

    # Run process
    process = Popen(
        args, env=env, shell=True, stdout=PIPE, stderr=PIPE, **kwargs)

    # Create and start all listening threads
    stdout_thread = Thread(target=_reader, args=[process.stdout, io_queue])
    stdin_thread = Thread(target=_reader, args=[process.stderr, io_queue])
    checker_thread = Thread(target=_checker, args=[process])
    printer_thread = Thread(target=_printer,
                            args=[io_queue, checker_thread, postprocessor])
    stdout_thread.start()
    stdin_thread.start()
    checker_thread.start()
    printer_thread.start()

    # Wait for keyboard interrupt
    while checker_thread.is_alive():
        try:
            sleep(0.5)
        except KeyboardInterrupt:
            break

    # Gracefully terminate and show output from cleanup
    term_signal = (
        signal.CTRL_BREAK_EVENT if sys.platform == 'win32' else signal.SIGTERM)
    while True:
        try:
            process.send_signal(term_signal)
            printer_thread.join()
            break
        except KeyboardInterrupt:
            continue
class Processes:
	def __init__(self):
		self.process = Popen(['rostopic', 'hz', 'tf'], stdin=PIPE)
		rospy.sleep(1)
		self.process.send_signal(signal.SIGINT)
		process_thread = threading.Thread(target=self.start_process(), name='proc')
		kill_thread = threading.Thread(target=self.kill_process(), name='kill')
		#process_thread.start()
		#kill_thread.start()

	def start_process(self):
		self.process = Popen(['rostopic', 'hz', 'tf'], stdin=PIPE)

	def kill_process(self):
		rospy.sleep(1)
		self.process.send_signal(signal.SIGINT)
def spawn_and_kill_pipeline():
    proc = Popen((
        'dumb-init',
        'sh', '-c',
        "yes 'oh, hi' | tail & yes error | tail >&2"
    ))

    def assert_living_pids():
        assert len(living_pids(pid_tree(os.getpid()))) == 6

    sleep_until(assert_living_pids)

    pids = pid_tree(os.getpid())
    proc.send_signal(signal.SIGTERM)
    proc.wait()
    return pids
def main():
    
    (options, args) = parseArgs()
    flags = ['-P',options.ping_pattern,'--switch',options.switch]
    if options.verbose:
        flags.append('-v')

    # GET PATHS
    unit_test_path = os.path.expanduser('~/pyretic/tests/mac_learner_unit_test.py')
    pox_path = os.path.expanduser('~/pox/pox.py')

    # MAKE SURE WE CAN SEE ALL OUTPUT IF VERBOSE
    env = os.environ.copy()
    if options.verbose:
        env['PYTHONUNBUFFERED'] = 'True'

    # STARTUP CONTROLLER
    controller = Popen([sys.executable, '-u', '-O', pox_path,'--no-cli', 'pyretic/examples/virtualize.py', '--program=pyretic/examples/mac_learner.py', '--virttopo=pyretic/virttopos/bfs.py'], 
                       env=env,
                       stdout=PIPE, 
                       stderr=STDOUT)
    controller_out = subprocess_output(controller, options.verbose)  # VERY ODD, IF WE DON'T RUN SUBPROCESS OUTPUT, TEST FAILS...
    controller_out.start()
    sleep(1)

    # TEST EACH TOPO
    topos = ['single,2','single,16','linear,2','linear,8','tree,2,2','tree,3,2']
#    topos = ['cycle,8,8','clique,8,8']  # THESE TWO AREN'T RELIABLE CURRENTLY


    print "======= LEARNING SWITCH ON BFS TESTER ========="
    print "-TOPO---------CONNS----PKTS----------TIME------"
    count = 0
    for topo in topos:
        test = ['sudo', unit_test_path, '--topo', topo] + flags
        testproc = call(test)
        if testproc == 0:
            count += 1
    print "----------------------------------------------------"

    if count == len(topos):
        print "+ mac_learner_unit PASSED [%d/%d]" % (count,len(topos))
    else:
        print "- mac_learner_unit FAILED [%d/%d]" % (count,len(topos))
        
    # KILL CONTROLLER
    controller.send_signal( SIGINT )
 def run(self):
     ''' 
     endless running function with state machine for handling the flash 
     process
     '''
     while self.exiting == False:
         #wait for card to be inserted
         if (self.flash_state == self.FLASH_STATE_WAIT_FOR_INSERT):
             print("wait for insert")
             self.state.emit("wait for card insert")
             self.dataReady.emit(0)
             time.sleep(1)
         elif (self.flash_state == self.FLASH_STATE_FLASHING):
             self.state.emit("flashing...")
             english_env = dict(os.environ)
             english_env['LANG'] = "LANG=en_US.UTF-8"
             dd_process = Popen(['dd', 'of=' + self.deviceName, \
                                 'bs=1M', 'oflag=direct', \
                                 'if=' + self.inputfile], \
                                 stderr=PIPE, env=english_env)
             while dd_process.poll() is None:
                 print self.deviceName + ": wait for dd end"
                 time.sleep(1)
                 print self.deviceName + ": wait for dd end"
                 dd_process.send_signal(signal.SIGUSR1)
                 print self.deviceName + ": sent signal SIGUSR1 to dd"
                 while 1:
                     time.sleep(.1)
                     print self.deviceName + \
                            ": in endless loop for reading stderr"
                     dd_line = dd_process.stderr.readline()
                     print self.deviceName + dd_line
                     if 'bytes' in dd_line:
                         bytes_copied = dd_line.split(' ')[0]
                         print self.deviceName + ": " + str(bytes_copied) +\
                                " of " + str(self.filesize) + \
                                " bytes copied so far"
                         #the following calculation will reach 99% as maximum
                         self.dataReady.emit(99*int(bytes_copied) \
                                             / self.filesize)
                         break
             self.dataReady.emit(100)
             self.flash_state = self.FLASH_STATE_WAIT_FOR_REMOVAL
         elif (self.flash_state == self.FLASH_STATE_WAIT_FOR_REMOVAL):
             self.state.emit("wait for card removal")
             time.sleep(1)
Exemple #29
0
class Process(object):
    def __init__(self, cmd, run_env):
        self.cmd = cmd
        self.run_env = run_env
        self.process = None
        self.pid = None

    def dump_backtraces(self):
        # Obtain backtraces for all threads using pmp every few seconds and dump in
        # a file, might help later to debug
        dbg(0, 'Collecting backtraces for pid ' + str(self.pid))
        filename = FI_TMP_DIR + '/fi_pid_' + str(self.pid) + '_hung_btt.log'
        with open(filename, 'w') as f:
            for i in range(3):
                pmp_process = Popen([PMP_PATH, str(self.pid)], stdout=PIPE, stderr=PIPE)
                out = pmp_process.communicate()
                f.write(out[0] + '\n')
                dbg(0, 'Backtrace:\n' + out[0])
                time.sleep(1)

    def run(self, timeout):
        def exec_proc():
            dbg(2, 'Executing command .. ' + self.cmd)
            if verbose > 1:
                self.process = Popen(shlex.split(self.cmd), env=self.run_env)
            else:
                self.process = Popen(shlex.split(self.cmd),
                     stdout=PIPE, stderr=PIPE, env=self.run_env)
            self.process.communicate()

        thread = threading.Thread(target=exec_proc)
        thread.start()

        thread.join(timeout)
        self.pid = self.process.pid
        if thread.is_alive():
            dbg(1, 'Process timed-out, assuming process to be hung, collect backtraces and terminate ..')
            # Let's save a few iterations of backtraces for all threads, might help debug
            self.dump_backtraces()

            # Kill the process, this should get us core as well
            self.process.send_signal(signal.SIGQUIT)
            thread.join()

        return self.process.returncode
class ProgramWrapper(MultiThreadClosing):

    def __init__(self, cmd):
        super(ProgramWrapper, self).__init__()
        self.cmd = cmd
        self.on_posix = 'posix' in sys.builtin_module_names
        self.child_p = Popen(self.cmd, stdout=PIPE, stderr=PIPE, bufsize=1, close_fds=self.on_posix, shell=True)

    def enqueue_output(self, out, queue, _type):
        for line in iter(out.readline, ''):
            if line != "\n":
                queue.put((_type, line))
        out.close()

    def start(self):
        q = Queue()
        t1 = Thread(target=self.enqueue_output, args=(self.child_p.stdout, q, "info"))
        self.threads.append(t1)
        t2 = Thread(target=self.enqueue_output, args=(self.child_p.stderr, q, "error"))
        self.threads.append(t2)
        for thread in self.threads:
            thread.start()
        while True:
            try:
                _type, line = q.get_nowait()
            except Empty:
                time.sleep(1)
                alive = False
                for thread in self.threads:
                    alive = alive or thread.isAlive()
                if not alive and q.empty():
                    break
            else:
                getattr(self.logger, _type)(line.strip()),

    def stop(self, *args):
        super(ProgramWrapper, self).stop(*args)
        if self.child_p:
            self.child_p.send_signal(signal.SIGTERM)

    @classmethod
    def parseArgs(cls):
        parser = ArgumentParser(description="record stdout and stderr")
        parser.add_argument('-c', '--cmd', dest="cmd", help='command to execute')
        return cls(**vars(parser.parse_args()))
Exemple #31
0
 def send_siginit():
     popen.send_signal(proc1, signal.SIGINT)
     self.assertTrue("Quitting" in sys.stdout.getvalue())
Exemple #32
0
class LocalProcessLauncher(BaseLauncher):
    """Start and stop an external process in an asynchronous manner.

    This will launch the external process with a working directory of
    ``self.work_dir``.
    """

    # This is used to to construct self.args, which is passed to
    # spawnProcess.
    cmd_and_args = List([])
    poll_frequency = Integer(100) # in ms

    def __init__(self, work_dir=u'.', config=None, **kwargs):
        super(LocalProcessLauncher, self).__init__(
            work_dir=work_dir, config=config, **kwargs
        )
        self.process = None
        self.poller = None

    def find_args(self):
        return self.cmd_and_args

    def start(self):
        self.log.debug("Starting %s: %r", self.__class__.__name__, self.args)
        if self.state == 'before':
            self.process = Popen(self.args,
                stdout=PIPE,stderr=PIPE,stdin=PIPE,
                env=os.environ,
                cwd=self.work_dir
            )
            if WINDOWS:
                self.stdout = forward_read_events(self.process.stdout)
                self.stderr = forward_read_events(self.process.stderr)
            else:
                self.stdout = self.process.stdout.fileno()
                self.stderr = self.process.stderr.fileno()
            self.loop.add_handler(self.stdout, self.handle_stdout, self.loop.READ)
            self.loop.add_handler(self.stderr, self.handle_stderr, self.loop.READ)
            self.poller = ioloop.PeriodicCallback(self.poll, self.poll_frequency, self.loop)
            self.poller.start()
            self.notify_start(self.process.pid)
        else:
            s = 'The process was already started and has state: %r' % self.state
            raise ProcessStateError(s)

    def stop(self):
        return self.interrupt_then_kill()

    def signal(self, sig):
        if self.state == 'running':
            if WINDOWS and sig != SIGINT:
                # use Windows tree-kill for better child cleanup
                check_output(['taskkill', '-pid', str(self.process.pid), '-t', '-f'])
            else:
                self.process.send_signal(sig)

    def interrupt_then_kill(self, delay=2.0):
        """Send INT, wait a delay and then send KILL."""
        try:
            self.signal(SIGINT)
        except Exception:
            self.log.debug("interrupt failed")
            pass
        self.killer  = ioloop.DelayedCallback(lambda : self.signal(SIGKILL), delay*1000, self.loop)
        self.killer.start()

    # callbacks, etc:

    def handle_stdout(self, fd, events):
        if WINDOWS:
            line = self.stdout.recv()
        else:
            line = self.process.stdout.readline()
        # a stopped process will be readable but return empty strings
        if line:
            self.log.debug(line[:-1])
        else:
            self.poll()

    def handle_stderr(self, fd, events):
        if WINDOWS:
            line = self.stderr.recv()
        else:
            line = self.process.stderr.readline()
        # a stopped process will be readable but return empty strings
        if line:
            self.log.debug(line[:-1])
        else:
            self.poll()

    def poll(self):
        status = self.process.poll()
        if status is not None:
            self.poller.stop()
            self.loop.remove_handler(self.stdout)
            self.loop.remove_handler(self.stderr)
            self.notify_stop(dict(exit_code=status, pid=self.process.pid))
        return status
class Taskd(object):
    """Manage a taskd instance

    A temporary folder is used as data store of taskd.
    This class can be instanciated multiple times if multiple taskd servers are
    needed.

    This class implements mechanisms to automatically select an available port
    and prevent assigning the same port to different instances.

    A server can be stopped and started multiple times, but should not be
    started or stopped after being destroyed.
    """
    DEFAULT_TASKD = taskd_binary_location()
    TASKD_RUNNING = 0
    TASKD_NEVER_STARTED = 1
    TASKD_EXITED = 2
    TASKD_NOT_LISTENING = 3

    def __init__(self,
                 taskd=DEFAULT_TASKD,
                 certpath=None,
                 address="localhost"):
        """Initialize a Task server that runs in the background and stores data
        in a temporary folder

        :arg taskd: Taskd binary to launch the server (defaults: taskd in PATH)
        :arg certpath: Folder where to find all certificates needed for taskd
        :arg address: Address to bind to
        """
        self.taskd = taskd
        self.usercount = 0

        # Will hold the taskd subprocess if it's running
        self.proc = None
        self.datadir = tempfile.mkdtemp(prefix="taskd_")
        self.tasklog = os.path.join(self.datadir, "taskd.log")
        self.taskpid = os.path.join(self.datadir, "taskd.pid")

        # Ensure any instance is properly destroyed at session end
        atexit.register(lambda: self.destroy())

        self.reset_env()

        if certpath is None:
            certpath = DEFAULT_CERT_PATH
        self.certpath = certpath

        self.address = address
        self.port = find_unused_port(self.address)

        # Keep all certificate paths public for access by TaskClients
        self.client_cert = os.path.join(self.certpath, "client.cert.pem")
        self.client_key = os.path.join(self.certpath, "client.key.pem")
        self.server_cert = os.path.join(self.certpath, "server.cert.pem")
        self.server_key = os.path.join(self.certpath, "server.key.pem")
        self.server_crl = os.path.join(self.certpath, "server.crl.pem")
        self.ca_cert = os.path.join(self.certpath, "ca.cert.pem")

        # Initialize taskd
        cmd = (self.taskd, "init", "--data", self.datadir)
        run_cmd_wait(cmd, env=self.env)

        self.config("server", "{0}:{1}".format(self.address, self.port))
        self.config("family", "IPv4")
        self.config("log", self.tasklog)
        self.config("pid.file", self.taskpid)
        self.config("root", self.datadir)
        self.config("client.allow", "^task [2-9]")

        # Setup all necessary certificates
        self.config("client.cert", self.client_cert)
        self.config("client.key", self.client_key)
        self.config("server.cert", self.server_cert)
        self.config("server.key", self.server_key)
        self.config("server.crl", self.server_crl)
        self.config("ca.cert", self.ca_cert)

        self.default_user = self.create_user()

    def __repr__(self):
        txt = super(Taskd, self).__repr__()
        return "{0} running from {1}>".format(txt[:-1], self.datadir)

    def reset_env(self):
        """Set a new environment derived from the one used to launch the test
        """
        # Copy all env variables to avoid clashing subprocess environments
        self.env = os.environ.copy()

        # Make sure TASKDDATA points to the temporary folder
        self.env["TASKDATA"] = self.datadir

    def create_user(self, user=None, group=None, org=None):
        """Create a user/group in the server and return the user
        credentials to use in a taskw client.
        """
        if user is None:
            # Create a unique user ID
            uid = self.usercount
            user = "******".format(uid)

            # Increment the user_id
            self.usercount += 1

        if group is None:
            group = "default_group"

        if org is None:
            org = "default_org"

        self._add_entity("org", org, ignore_exists=True)
        self._add_entity("group", org, group, ignore_exists=True)
        userkey = self._add_entity("user", org, user)

        return user, group, org, userkey

    def _add_entity(self, keyword, org, value=None, ignore_exists=False):
        """Add an organization, group or user to the current server

        If a user creation is requested, the user unique ID is returned
        """
        cmd = (self.taskd, "add", "--data", self.datadir, keyword, org)

        if value is not None:
            cmd += (value, )

        try:
            code, out, err = run_cmd_wait(cmd, env=self.env)
        except CommandError as e:
            match = False
            for line in e.out.splitlines():
                if line.endswith("already exists.") and ignore_exists:
                    match = True
                    break

            # If the error was not "Already exists" report it
            if not match:
                raise

        if keyword == "user":
            expected = "New user key: "
            for line in out.splitlines():
                if line.startswith(expected):
                    return line.replace(expected, '')

    def config(self, var, value):
        """Run setup `var` as `value` in taskd config
        """
        cmd = (self.taskd, "config", "--force", "--data", self.datadir, var,
               value)
        run_cmd_wait(cmd, env=self.env)

        # If server is running send a SIGHUP to force config reload
        if self.proc is not None:
            try:
                self.proc.send_signal(signal.SIGHUP)
            except:
                pass

    def status(self):
        """Check the status of the server by checking if it's still running and
        listening for connections
        :returns: Taskd.TASKD_[NEVER_STARTED/EXITED/NOT_LISTENING/RUNNING]
        """
        if self.proc is None:
            return self.TASKD_NEVER_STARTED

        if self.returncode() is not None:
            return self.TASKD_EXITED

        if not port_used(addr=self.address, port=self.port):
            return self.TASKD_NOT_LISTENING

        return self.TASKD_RUNNING

    def returncode(self):
        """If taskd finished, return its exit code, otherwise return None.
        :returns: taskd's exit code or None
        """
        return self.proc.poll()

    def start(self, minutes=5, tries_per_minute=2):
        """Start the taskd server if it's not running.
        If it's already running OSError will be raised
        """
        if self.proc is None:
            cmd = (self.taskd, "server", "--data", self.datadir)
            self.proc = Popen(cmd,
                              stdout=PIPE,
                              stderr=PIPE,
                              stdin=DEVNULL,
                              env=self.env)
        else:
            self.show_log_contents()

            raise OSError("Taskd server is still running or crashed")

        # Wait for server to listen by checking connectivity in the port
        # Default is to wait up to 5 minutes checking once every 500ms
        for i in range(minutes * 60 * tries_per_minute):
            status = self.status()

            if status == self.TASKD_RUNNING:
                return

            elif status == self.TASKD_NEVER_STARTED:
                self.show_log_contents()

                raise OSError("Task server was never started. "
                              "This shouldn't happen!!")

            elif status == self.TASKD_EXITED:
                # Collect output logs
                out, err = self.proc.communicate()

                self.show_log_contents()

                raise OSError(
                    "Task server launched with '{0}' crashed or exited "
                    "prematurely. Exit code: {1}. "
                    "Listening on port: {2}. "
                    "Stdout: {3!r}, "
                    "Stderr: {4!r}.".format(
                        self.taskd,
                        self.returncode(),
                        self.port,
                        out,
                        err,
                    ))

            elif status == self.TASKD_NOT_LISTENING:
                sleep(1 / tries_per_minute)

            else:
                self.show_log_contents()

                raise OSError(
                    "Unknown running status for taskd '{0}'".format(status))

        # Force stop so we can collect output
        proc = self.stop()

        # Collect output logs
        out, err = proc.communicate()

        self.show_log_contents()

        raise OSError("Task server didn't start and listen on port {0} after "
                      "{1} minutes. Stdout: {2!r}. Stderr: {3!r}.".format(
                          self.port, minutes, out, err))

    def stop(self):
        """Stop the server by sending a SIGTERM and SIGKILL if fails to
        terminate.
        If it's already stopped OSError will be raised

        Returns: a reference to the old process object
        """
        if self.proc is None:
            raise OSError("Taskd server is not running")

        if self._check_pid():
            self.proc.send_signal(signal.SIGTERM)

        if self._check_pid():
            self.proc.kill()

        # Wait for process to end to avoid zombies
        self.proc.wait()

        # Keep a reference to the old process
        proc = self.proc

        # Unset the process to inform that no process is running
        self.proc = None

        return proc

    def _check_pid(self):
        "Check if self.proc is still running and a PID still exists"
        # Wait ~1 sec for taskd to finish
        signal = True
        for i in range(10):
            sleep(0.1)
            if self.proc.poll() is not None:
                signal = False
                break

        return signal

    def destroy(self):
        """Cleanup the data folder and release server port for other instances
        """
        # Ensure server is stopped first
        if self.proc is not None:
            self.stop()

        try:
            shutil.rmtree(self.datadir)
        except OSError as e:
            if e.errno == 2:
                # Directory no longer exists
                pass
            else:
                raise

        release_port(self.port)

        # Prevent future reuse of this instance
        self.start = self.__destroyed
        self.config = self.__destroyed
        self.stop = self.__destroyed

        # self.destroy will get called when the python session closes.
        # If self.destroy was already called, turn the action into a noop
        self.destroy = lambda: None

    def __destroyed(self, *args, **kwargs):
        raise AttributeError("Taskd instance has been destroyed. "
                             "Create a new instance if you need a new server.")

    @classmethod
    def not_available(cls):
        """Check if the taskd binary is available in the path"""
        if which(cls.DEFAULT_TASKD):
            return False
        else:
            return True

    def client_data(self, client):
        """Return a python list with the content of tx.data matching the given
        task client. tx.data will be parsed to string and JSON.
        """
        file = os.path.join(self.datadir, "orgs", client.credentials["org"],
                            "users", client.credentials["userkey"], "tx.data")

        return parse_datafile(file)

    def show_log_contents(self):
        """Print to to STDOUT the contents of taskd.log
        """
        if os.path.isfile(self.tasklog):
            with open(self.tasklog) as fh:
                print("#### Start taskd.log ####")
                for line in fh:
                    print(line, end='')
                print("#### End taskd.log ####")
Exemple #34
0
    def shebang(self, line, cell):
        """Run a cell via a shell command
        
        The `%%script` line is like the #! line of script,
        specifying a program (bash, perl, ruby, etc.) with which to run.
        
        The rest of the cell is run by that program.
        
        Examples
        --------
        ::
        
            In [1]: %%script bash
               ...: for i in 1 2 3; do
               ...:   echo $i
               ...: done
            1
            2
            3
        """
        argv = arg_split(line, posix=not sys.platform.startswith('win'))
        args, cmd = self.shebang.parser.parse_known_args(argv)

        try:
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
        except OSError as e:
            if e.errno == errno.ENOENT:
                print("Couldn't find program: %r" % cmd[0])
                return
            else:
                raise

        if not cell.endswith('\n'):
            cell += '\n'
        cell = cell.encode('utf8', 'replace')
        if args.bg:
            self.bg_processes.append(p)
            self._gc_bg_processes()
            if args.out:
                self.shell.user_ns[args.out] = p.stdout
            if args.err:
                self.shell.user_ns[args.err] = p.stderr
            self.job_manager.new(self._run_script, p, cell, daemon=True)
            if args.proc:
                self.shell.user_ns[args.proc] = p
            return

        try:
            out, err = p.communicate(cell)
        except KeyboardInterrupt:
            try:
                p.send_signal(signal.SIGINT)
                time.sleep(0.1)
                if p.poll() is not None:
                    print("Process is interrupted.")
                    return
                p.terminate()
                time.sleep(0.1)
                if p.poll() is not None:
                    print("Process is terminated.")
                    return
                p.kill()
                print("Process is killed.")
            except OSError:
                pass
            except Exception as e:
                print("Error while terminating subprocess (pid=%i): %s" \
                    % (p.pid, e))
            return
        out = py3compat.bytes_to_str(out)
        err = py3compat.bytes_to_str(err)
        if args.out:
            self.shell.user_ns[args.out] = out
        else:
            sys.stdout.write(out)
            sys.stdout.flush()
        if args.err:
            self.shell.user_ns[args.err] = err
        else:
            sys.stderr.write(err)
            sys.stderr.flush()
Exemple #35
0
class PPPConnection:
    def __init__(self, *args, **kwargs):
        self.output = ''
        self._laddr = None
        self._raddr = None

        commands = []

        if kwargs.pop('sudo', True):
            sudo_path = kwargs.pop('sudo_path', '/usr/bin/sudo')
            if not os.path.isfile(sudo_path) or not os.access(
                    sudo_path, os.X_OK):
                raise IOError('%s not found' % sudo_path)
            commands.append(sudo_path)

        pppd_path = kwargs.pop('pppd_path', '/usr/sbin/pppd')
        if not os.path.isfile(pppd_path) or not os.access(pppd_path, os.X_OK):
            raise IOError('%s not found' % pppd_path)

        commands.append(pppd_path)

        for k, v in kwargs.items():
            commands.append(k)
            commands.append(v)
        commands.extend(args)
        commands.append('nodetach')

        self.proc = Popen(commands,
                          stdout=PIPE,
                          stderr=STDOUT,
                          universal_newlines=True)

        # set stdout to non-blocking
        fd = self.proc.stdout.fileno()
        fl = fcntl.fcntl(fd, fcntl.F_GETFL)
        fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)

        while True:
            try:
                self.output += self.proc.stdout.read()
            except IOError as e:
                if e.errno != 11:
                    raise
                time.sleep(1)
            if 'ip-up finished' in self.output:
                return
            elif self.proc.poll():
                raise PPPConnectionError(self.proc.returncode, self.output)

    @property
    def laddr(self):
        if not self._laddr:
            try:
                self.output += self.proc.stdout.read()
            except IOError as e:
                if e.errno != 11:
                    raise
            result = re.search(r'local  IP address ([\d\.]+)', self.output)
            if result:
                self._laddr = result.group(1)

        return self._laddr

    @property
    def raddr(self):
        if not self._raddr:
            try:
                self.output += self.proc.stdout.read()
            except IOError as e:
                if e.errno != 11:
                    raise
            result = re.search(r'remote IP address ([\d\.]+)', self.output)
            if result:
                self._raddr = result.group(1)

        return self._raddr

    def connected(self):
        if self.proc.poll():
            try:
                self.output += self.proc.stdout.read()
            except IOError as e:
                if e.errno != 11:
                    raise
            if self.proc.returncode not in [0, 5]:
                raise PPPConnectionError(proc.returncode, self.output)
            return False
        elif 'ip-up finished' in self.output:
            return True

        return False

    def disconnect(self):
        try:
            if not self.connected():
                return
        except PPPConnectionError:
            return

        self.proc.send_signal(signal.SIGHUP)
        self.proc.wait()
Exemple #36
0
class Matlab(TransplantMaster):
    """An instance of Matlab, running in its own process.

    if ``address`` is supplied, Matlab is started on a remote machine.
    This is done by opening an SSH connection to that machine
    (optionally using user account ``user``), and then starting Matlab
    on that machine. For this to work, `address` must be reachable
    using SSH, ``matlab`` must be in the ``user``'s PATH, and
    ``transplant_remote`` must be in Matlab's ``path`` and `libzmq`
    must be available on the remote machine.

    All Matlab errors are caught in Matlab, and re-raised as
    :class:`TransplantError` in Python. Some Matlab errors can not be
    caught with try-catch. In this case, Transplant will not be able
    to get a backtrace, but will continue running (as part of
    ``atexit`` in Matlab). If this happens often, performance might
    degrade.

    In case Matlab segfaults or otherwise terminates abnormally,
    Transplant will raise a :class:`TransplantError`, and you will
    need to create a new :class:`Matlab` instance.

    ``SIGINT``/``KeyboardInterrupt`` will be forwarded to Matlab. Be
    aware however, that some Matlab functions silently ignore
    ``SIGINT``, and will continue running regardless.

    Parameters
    ----------
    executable : str
        The executable name, defaults to ``matlab``.
    arguments : tuple
        Additional arguments to supply to the executable, defaults to
        ``-nodesktop``, ``-nosplash``, and on Windows, ``-minimize``.
    msgformat : str
        The communication format to use for talking to Matlab,
        defaults to ``"msgpack"``. For debugging, you can use
        ``"json"`` instead.
    address : str
        An address of a remote SSH-reachable machine on which to call
        Matlab.
    user : str
        The user name to use for the SSH connection (if ``address`` is
        given).
    print_to_stdout : bool
        Whether to print outputs to stdout, defaults to ``True``.
    desktop : bool
        Whether to start Matlab with ``-nodesktop``, defaults to ``True``.
    jvm : bool
        Whether to start Matlab with ``-nojvm``, defaults to ``False``.

    """

    ProxyObject = MatlabProxyObject

    def __init__(self,
                 executable='matlab',
                 arguments=tuple(),
                 msgformat='msgpack',
                 address=None,
                 user=None,
                 print_to_stdout=True,
                 desktop=False,
                 jvm=True):
        """Starts a Matlab instance and opens a communication channel."""
        if msgformat not in ['msgpack', 'json']:
            raise ValueError('msgformat must be "msgpack" or "json"')

        # build up command line arguments:
        if not desktop:
            if '-nodesktop' not in arguments:
                arguments += '-nodesktop',
            if '-nosplash' not in arguments:
                arguments += '-nosplash',
            if '-minimize' not in arguments and sys.platform in ('cygwin',
                                                                 'win32'):
                arguments += '-minimize',
        if not jvm and '-nojvm' not in arguments:
            arguments += '-nojvm',

        if address is None:
            if sys.platform == 'linux' or sys.platform == 'darwin':
                # generate a valid and unique local pathname
                with tempfile.NamedTemporaryFile() as f:
                    zmq_address = 'ipc://' + f.name
            else:  # cygwin/win32
                # ZMQ does not support ipc:// on Windows, so use tcp:// instead
                from random import randint
                port = randint(49152, 65535)
                zmq_address = 'tcp://127.0.0.1:' + str(port)

            process_arguments = ([executable] + list(arguments) + [
                '-r', "addpath('{}');cd('{}');"
                "transplant_remote('{}','{}','{}');".format(
                    os.path.dirname(__file__), os.getcwd(), msgformat,
                    zmq_address, self._locate_libzmq())
            ])
        else:
            # get local IP address
            from socket import create_connection
            with create_connection((address, 22)) as s:
                local_address, _ = s.getsockname()
            # generate a random port number
            from random import randint
            port = randint(49152, 65535)
            zmq_address = 'tcp://' + local_address + ':' + str(port)
            if user is not None:
                address = '{}@{}'.format(user, address)
            process_arguments = (
                ['ssh', address, executable, '-wait'] + list(arguments) + [
                    '-r', '"transplant_remote {} {} {}"'.format(
                        msgformat, zmq_address, "zmq")
                ])
        if sys.platform == 'win32' or sys.platform == 'cygwin':
            process_arguments += ['-wait']
        self.msgformat = msgformat
        # Create a new ZMQ context instead of sharing the global ZMQ context.
        # We now have ownership of it, and can terminate it with impunity.
        self.context = zmq.Context()
        self.socket = self.context.socket(zmq.REQ)
        self.socket.bind(zmq_address)
        # start Matlab, but make sure that it won't eat the REPL stdin
        # (stdin=DEVNULL).
        self.process = Popen(process_arguments, stdin=DEVNULL, stdout=PIPE)
        if print_to_stdout:
            self._start_reader()
        self.eval('0;')  # no-op. Wait for Matlab startup to complete.

    def exit(self):
        """Close the connection, and kill the process."""
        super().exit()
        self.socket.close()
        self.context.term()

    def _call(self, name, args, nargout=-1):
        """Call a function on the remote."""
        args = list(args)
        try:
            response = self.send_message('call',
                                         name=name,
                                         args=args,
                                         nargout=nargout)
        except KeyboardInterrupt as exc:
            # hand the interrupt down to Matlab:
            self.process.send_signal(SIGINT)
            # receive outstanding message to get ZMQ back in the right state
            if self.msgformat == 'msgpack':
                response = msgpack.unpackb(self.socket.recv(),
                                           raw=False,
                                           max_bin_len=2**31 - 1)
            else:
                response = self.socket.recv_json()
            # continue with the exception
            raise exc

        if response['type'] == 'value':
            return response['value']

    def _decode_function(self, data):
        """Decode a special list to a wrapper function."""

        # Wrap functions in a MatlabFunction class with a __doc__
        # property.
        # However, there are two ways of accessing documentation:
        # - help(func) will access __doc__ on type(func), so __doc__
        #   must be accessible on the class of the returned value.
        # - func.__doc__ must also be accessible on the object itself.
        #
        # The following constructs a new class with the appropriate
        # __doc__ property that is accessible both on the class and
        # the object.

        class classproperty(property):
            def __get__(self, cls, owner):
                return classmethod(self.fget).__get__(None, owner)()

        class ThisFunc(MatlabFunction):
            # only fetch documentation when it is actually needed:
            @classproperty
            def __doc__(_self):
                return self.help(data[1], nargout=1)

        return ThisFunc(self, data[1])

    def __getattr__(self, name):
        """Retrieve a value or function from the remote.

        Global variables are returned as native Python objects or
        :class:`MatlabProxyObject` objects.

        Functions are returned as :class:`MatlabFunction` objects.

        """

        try:
            return self._get_global(name)
        except TransplantError as err:
            # package identifiers for `what` use '/' instead of '.':
            packagedict = self.what(name.replace('.', '/'))
            if not (err.identifier == 'TRANSPLANT:novariable' and packagedict):
                raise err
            else:  # a package of the given name exists. Return a wrapper:

                class MatlabPackage:
                    def __getattr__(self_, attrname):
                        return self.__getattr__(name + '.' + attrname)

                    def __repr__(self_):
                        return "<MatlabPackage {}>".format(name)

                    @property
                    def __doc__(_self):
                        return self.help(name, nargout=1)

                return MatlabPackage()

    def _locate_libzmq(self):
        """Find the full path to libzmq.

        CFFI can import a library by its name, but Matlab's `loadlibrary`
        requires the full library path. This walks the file system, and
        looks for the libzmq binary. If it can't find libzmq in the normal
        library locations, it additionally tries common install
        directories such as a conda installation or the ZMQ Windows
        installer.

        """

        if sys.platform == 'linux' or sys.platform == 'darwin':
            libzmq = ctypes.util.find_library('zmq')
        else:  # cygwin/win32
            libzmq = ctypes.util.find_library('libzmq.dll')

        # depending on the OS, either of these outcomes is possible:
        if libzmq is not None and os.path.isabs(libzmq):
            return libzmq

        # manually try to locate libzmq
        if sys.platform == 'linux':
            # according to man dlopen:
            search_dirs = ((os.getenv('LD_LIBRARY_PATH') or '').split(':') +
                           self._read_ldsoconf('/etc/ld.so.conf') +
                           self._ask_ld_for_paths() +
                           ['/lib/', '/lib64/', '/usr/lib/', '/usr/lib64/'])
            extension = '.so'
        elif sys.platform == 'darwin':
            # according to man dlopen:
            search_dirs = (
                (os.getenv('LD_LIBRARY_PATH') or '').split(':') +
                (os.getenv('DYLD_LIBRARY_PATH') or '').split(':') +
                (os.getenv('DYLD_FALLBACK_PATH') or '').split(':') +
                [os.getenv('HOME') + '/lib', '/usr/local/lib', '/usr/lib'])
            extension = '.dylib'
        elif sys.platform == 'win32' or sys.platform == 'cygwin':
            # according to https://msdn.microsoft.com/en-us/library/windows/desktop/ms682586(v=vs.85).aspx
            search_dirs = ((os.getenv('PATH') or '').split(':') +
                           ['C:/Program Files/ZeroMQ*/bin'])
            extension = '.dll'

        if libzmq is None:
            libzmq = '*zmq*' + extension

        # add anaconda libzmq install locations:
        search_dirs.append(sys.prefix + '/lib')
        search_dirs.append(os.path.dirname(zmq.__file__))

        for directory in search_dirs:
            candidates = glob(directory + '/' + libzmq)
            if candidates:
                return candidates[0]

        raise RuntimeError('could not locate libzmq for Matlab')

    def _ask_ld_for_paths(self):
        """Asks `ld` for the paths it searches for libraries."""

        try:
            ld = Popen(['ld', '--verbose'], stdin=DEVNULL, stdout=PIPE)
            output = ld.stdout.read().decode()
        except:
            return []

        search_dirs = re.compile(r'SEARCH_DIR\(([^)]*)\)').findall(output)
        return [d.strip(' "') for d in search_dirs]

    def _read_ldsoconf(self, file):
        """Read paths from a library list referenced from /etc/ld.so.conf."""

        search_dirs = []
        with open(file) as f:
            for line in f:
                if '#' in line:
                    line = line.split('#')[0]
                if line.startswith('include'):
                    for search_dir in glob(line[len('include'):].strip()):
                        search_dirs += self._read_ldsoconf(search_dir)
                elif os.path.isabs(line):
                    search_dirs.append(line.strip())

        return search_dirs
Exemple #37
0
    def inner_run(self, *args, **options):
        import sys

        shutdown_message = options.get('shutdown_message', '')
        do_reload = options.get('use_reloader', True)

        #We use the old dev appserver if threading is disabled or --old was passed
        use_old_dev_appserver = options.get(
            'use_old_dev_appserver') or not options.get("use_threading")
        quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'

        from djangae.utils import find_project_root, data_root
        from djangae.sandbox import _find_sdk_from_python_path

        from django.conf import settings
        from django.utils import translation

        #Check for app.yaml
        expected_path = os.path.join(find_project_root(), "app.yaml")
        if not os.path.exists(expected_path):
            sys.stderr.write("Unable to find app.yaml at '%s'\n" %
                             expected_path)
            sys.exit(1)

        self.stdout.write("Validating models...\n\n")
        self.validate(display_num_errors=True)
        self.stdout.write(
            ("%(started_at)s\n"
             "Django version %(version)s, using settings %(settings)r\n"
             "Starting development server at http://%(addr)s:%(port)s/\n"
             "Quit the server with %(quit_command)s.\n") % {
                 "started_at": datetime.now().strftime('%B %d, %Y - %X'),
                 "version": self.get_version(),
                 "settings": settings.SETTINGS_MODULE,
                 "addr": self._raw_ipv6 and '[%s]' % self.addr or self.addr,
                 "port": self.port,
                 "quit_command": quit_command,
             })
        sys.stdout.write("\n")
        sys.stdout.flush()

        # django.core.management.base forces the locale to en-us. We should
        # set it up correctly for the first request (particularly important
        # in the "--noreload" case).
        translation.activate(settings.LANGUAGE_CODE)

        #Will have been set by setup_paths
        sdk_path = _find_sdk_from_python_path()

        if use_old_dev_appserver:
            dev_appserver = os.path.join(sdk_path, "old_dev_appserver.py")
            command = [
                dev_appserver,
                find_project_root(),
                "-p",
                self.port,
                "-h",
                self.addr,
                "--use_sqlite",
                "--high_replication",
                "--allow_skipped_files",
            ]
        else:
            dev_appserver = os.path.join(sdk_path, "dev_appserver.py")
            command = [
                dev_appserver,
                find_project_root(), "--port", self.port, "--host", self.addr,
                "--admin_port",
                str(int(self.port) + 1), "--automatic_restart",
                "True" if do_reload else "False", "--allow_skipped_files"
            ]

        process = Popen(command,
                        stdout=sys.__stdout__,
                        stderr=sys.__stderr__,
                        cwd=find_project_root())

        #This makes sure that dev_appserver gets killed on reload
        import atexit
        atexit.register(process.kill)

        try:
            process.wait()
        except KeyboardInterrupt:
            #Tell the dev appserver to shutdown and forcibly kill
            #if it takes too long
            process.send_signal(signal.SIGTERM)
            time.sleep(2)
            process.kill()

            if shutdown_message:
                sys.stdout.write(shutdown_message)

        #Some weird race condition crazy sometimes makes this None...
        if sys:
            sys.exit(process.returncode)
Exemple #38
0
    else:
        kwargs = {}
    p = Popen([sys.executable, __file__, 'subprocess'], stdout=PIPE, **kwargs)
    line = p.stdout.readline()
    if not isinstance(line, str):
        line = line.decode('ascii')
    # Windows needs the \n in the string to write (because of buffering), but
    # because of newline handling it doesn't make it through the read; whereas
    # it does on other platforms. Universal newlines is broken on Py3, so the best
    # thing to do is to strip it
    line = line.strip()
    assert line == 'ready', line
    # On Windows, we have to send the CTRL_BREAK_EVENT (which seems to terminate the process); SIGINT triggers
    # "ValueError: Unsupported signal: 2". The CTRL_C_EVENT is ignored on Python 3 (but not Python 2).
    # So this test doesn't test much on Windows.
    p.send_signal(
        signal.SIGINT if not WIN else getattr(signal, 'CTRL_BREAK_EVENT'))
    # Wait up to 3 seconds for child process to die
    for i in range(30):
        if p.poll() is not None:
            break
        time.sleep(0.1)
    else:
        # Kill unresponsive child and exit with error 1
        p.terminate()
        p.wait()
        sys.exit(1)

    # If we get here, it's because we caused the process to exit; it
    # didn't hang. Under Windows, however, we have to use CTRL_BREAK_EVENT,
    # which has an arbitrary returncode depending on versions (so does CTRL_C_EVENT
    # on Python 2). We still
Exemple #39
0
class Local(Runner):
    """
    Execute a command on the local system in a subprocess.

    .. note::
        When Invoke itself is executed without a controlling terminal (e.g.
        when ``sys.stdin`` lacks a useful ``fileno``), it's not possible to
        present a handle on our PTY to local subprocesses. In such situations,
        `Local` will fallback to behaving as if ``pty=False`` (on the theory
        that degraded execution is better than none at all) as well as printing
        a warning to stderr.

        To disable this behavior, say ``fallback=False``.
    """
    def __init__(self, context):
        super(Local, self).__init__(context)
        # Bookkeeping var for pty use case
        self.status = None

    def should_use_pty(self, pty=False, fallback=True):
        use_pty = False
        if pty:
            use_pty = True
            # TODO: pass in & test in_stream, not sys.stdin
            if not has_fileno(sys.stdin) and fallback:
                if not self.warned_about_pty_fallback:
                    sys.stderr.write("WARNING: stdin has no fileno; falling back to non-pty execution!\n") # noqa
                    self.warned_about_pty_fallback = True
                use_pty = False
        return use_pty

    def read_proc_stdout(self, num_bytes):
        # Obtain useful read-some-bytes function
        if self.using_pty:
            # Need to handle spurious OSErrors on some Linux platforms.
            try:
                data = os.read(self.parent_fd, num_bytes)
            except OSError as e:
                # Only eat this specific OSError so we don't hide others
                if "Input/output error" not in str(e):
                    raise
                # The bad OSErrors happen after all expected output has
                # appeared, so we return a falsey value, which triggers the
                # "end of output" logic in code using reader functions.
                data = None
        else:
            data = os.read(self.process.stdout.fileno(), num_bytes)
        return data

    def read_proc_stderr(self, num_bytes):
        # NOTE: when using a pty, this will never be called.
        # TODO: do we ever get those OSErrors on stderr? Feels like we could?
        return os.read(self.process.stderr.fileno(), num_bytes)

    def _write_proc_stdin(self, data):
        # NOTE: parent_fd from os.fork() is a read/write pipe attached to our
        # forked process' stdout/stdin, respectively.
        fd = self.parent_fd if self.using_pty else self.process.stdin.fileno()
        # Try to write, ignoring broken pipes if encountered (implies child
        # process exited before the process piping stdin to us finished;
        # there's nothing we can do about that!)
        try:
            return os.write(fd, data)
        except OSError as e:
            if 'Broken pipe' not in str(e):
                raise

    def start(self, command, shell, env):
        if self.using_pty:
            if pty is None: # Encountered ImportError
                sys.exit("You indicated pty=True, but your platform doesn't support the 'pty' module!") # noqa
            cols, rows = pty_size()
            self.pid, self.parent_fd = pty.fork()
            # If we're the child process, load up the actual command in a
            # shell, just as subprocess does; this replaces our process - whose
            # pipes are all hooked up to the PTY - with the "real" one.
            if self.pid == 0:
                # TODO: both pty.spawn() and pexpect.spawn() do a lot of
                # setup/teardown involving tty.setraw, getrlimit, signal.
                # Ostensibly we'll want some of that eventually, but if
                # possible write tests - integration-level if necessary -
                # before adding it!
                #
                # Set pty window size based on what our own controlling
                # terminal's window size appears to be.
                # TODO: make subroutine?
                winsize = struct.pack('HHHH', rows, cols, 0, 0)
                fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize)
                # Use execve for bare-minimum "exec w/ variable # args + env"
                # behavior. No need for the 'p' (use PATH to find executable)
                # for now.
                # TODO: see if subprocess is using equivalent of execvp...
                os.execve(shell, [shell, '-c', command], env)
        else:
            self.process = Popen(
                command,
                shell=True,
                executable=shell,
                env=env,
                stdout=PIPE,
                stderr=PIPE,
                stdin=PIPE,
            )

    @property
    def process_is_finished(self):
        if self.using_pty:
            # NOTE:
            # https://github.com/pexpect/ptyprocess/blob/4058faa05e2940662ab6da1330aa0586c6f9cd9c/ptyprocess/ptyprocess.py#L680-L687
            # implies that Linux "requires" use of the blocking, non-WNOHANG
            # version of this call. Our testing doesn't verify this, however,
            # so...
            # NOTE: It does appear to be totally blocking on Windows, so our
            # issue #351 may be totally unsolvable there. Unclear.
            pid_val, self.status = os.waitpid(self.pid, os.WNOHANG)
            return pid_val != 0
        else:
            return self.process.poll() is not None

    def send_interrupt(self, interrupt):
        # NOTE: No need to reraise the interrupt since we have full control
        # over the local process and can kill it.
        if self.using_pty:
            os.kill(self.pid, SIGINT)
        else:
            # Use send_signal with platform-appropriate signal (Windows doesn't
            # support SIGINT unfortunately, only SIGTERM).
            # NOTE: could use subprocess.terminate() (which is cross-platform)
            # but feels best to use SIGINT as much as we possibly can as it's
            # most appropriate. terminate() always sends SIGTERM.
            # NOTE: in interactive POSIX terminals, this is technically
            # unnecessary as Ctrl-C submits the INT to the entire foreground
            # process group (which will be both Invoke and its spawned
            # subprocess). However, it doesn't seem to hurt, & ensures that a
            # *non-interactive* SIGINT is forwarded correctly.
            self.process.send_signal(SIGINT if not WINDOWS else SIGTERM)

    def returncode(self):
        if self.using_pty:
            return os.WEXITSTATUS(self.status)
        else:
            return self.process.returncode

    def stop(self):
        # No explicit close-out required (so far).
        pass
Exemple #40
0
def makeServer():
    p = Popen(['python', 'tests/rss_server.py', str(port)])
    time.sleep(1)  # Give the server time to start
    yield None
    p.send_signal(SIGINT)
    time.sleep(0.5)  # Give the server time to shutdown
Exemple #41
0
class PPPConnection():
    def __repr__(self):
        return type(self).__name__

    def __init__(self, *args, **kwargs):
        # Logging setup.
        self.logger = logging.getLogger(__name__)

        self._laddr = None
        self._raddr = None
        self.proc = None

        self.output = ''

        self._commands = []

        # This makes it harder to kill pppd so we're defaulting to it off for now
        # It's redudant anyway for the CLI
        if kwargs.pop('sudo', False):
            sudo_path = kwargs.pop('sudo_path', '/usr/bin/sudo')
            if not os.path.isfile(sudo_path) or not os.access(
                    sudo_path, os.X_OK):
                raise IOError('%s not found' % sudo_path)
            self._commands.append(sudo_path)

        pppd_path = kwargs.pop('pppd_path', '/usr/sbin/pppd')
        if not os.path.isfile(pppd_path) or not os.access(pppd_path, os.X_OK):
            raise IOError('%s not found' % pppd_path)

        self._commands.append(pppd_path)

        for k, v in kwargs.items():
            self._commands.append(k)
            self._commands.append(v)
        self._commands.extend(args)
        self._commands.append('nodetach')

    # EFFECTS: Spins out a new thread that connects to the network with a given
    #          timeout value. Default to DEFAULT_CONNECT_TIMEOUT seconds.
    #          Returns true if successful, false otherwise.
    def connect(self, timeout=DEFAULT_CONNECT_TIMEOUT):

        self.logger.info('Starting pppd')
        self.proc = Popen(self._commands, stdout=PIPE, stderr=STDOUT)

        # set stdout to non-blocking
        fd = self.proc.stdout.fileno()
        fl = fcntl.fcntl(fd, fcntl.F_GETFL)
        fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)

        result = False
        try:
            result = self.waitForPPPSuccess(timeout)
        except Exception as e:
            self.logger.error(e)

        if not result and self.proc and (self.proc.poll() is None):
            self.logger.debug('Killing pppd')
            self.proc.send_signal(signal.SIGTERM)
            time.sleep(1)

        return result

    def readFromPPP(self):
        try:
            pppd_out = self.proc.stdout.read()
            if pppd_out is not None:
                self.output += pppd_out.decode()
        except IOError as e:
            if e.errno != errno.EAGAIN:
                raise
            time.sleep(1)

    def waitForPPPSuccess(self, timeout):
        starttime = time.time()
        while (time.time() - starttime) < timeout:
            self.readFromPPP()

            if self.laddr is not None and self.raddr is not None:
                return True

            if 'Modem hangup' in self.output:
                raise PPPError(
                    'Modem hangup - possibly due to an unregistered SIM')
            elif self.proc.poll():
                raise PPPConnectionError(self.proc.returncode, self.output)
        return False

    # EFFECTS: Disconnects from the network.
    def disconnect(self):
        if self.proc and self.proc.poll() is None:
            self.proc.send_signal(signal.SIGTERM)
            time.sleep(1)

    # EFFECTS: Returns true if a cellular connection is established.
    def connected(self):
        if self.proc and self.proc.poll():
            self.readFromPPP()
            if self.proc.returncode not in [0, 5]:
                raise PPPConnectionError(self.proc.returncode, self.output)
            return False
        elif self.laddr is not None and self.raddr is not None:
            return True

        return False

    # EFFECTS: Returns the local IP address.
    @property
    def laddr(self):
        if self.proc and not self._laddr:
            self.readFromPPP()
            result = re.search(r'local  IP address ([\d\.]+)', self.output)
            if result:
                self._laddr = result.group(1)

        return self._laddr

    # EFFECTS: Returns the remote IP address.
    @property
    def raddr(self):
        if self.proc and not self._raddr:
            self.readFromPPP()
            result = re.search(r'remote IP address ([\d\.]+)', self.output)
            if result:
                self._raddr = result.group(1)

        return self._raddr
Exemple #42
0
class Process(BaseComponent):

    channel = "process"

    def init(self, args, cwd=None, shell=False):
        self.args = args
        self.cwd = cwd
        self.shell = shell

        self.p = None
        self.stderr = BytesIO()
        self.stdout = BytesIO()

        self._status = None
        self._terminated = False

        self._stdout_closed = False
        self._stderr_closed = False

        self._stdin = None
        self._stderr = None
        self._stdout = None

        self._stdin_closed_handler = None
        self._stderr_read_handler = None
        self._stdout_read_handler = None
        self._stderr_closed_handler = None
        self._stdout_closed_handler = None

    def start(self):
        self.p = Popen(self.args,
                       cwd=self.cwd,
                       shell=self.shell,
                       stdin=PIPE,
                       stderr=PIPE,
                       stdout=PIPE)

        self.stderr = BytesIO()
        self.stdout = BytesIO()

        self._status = None

        self._stdin = File(self.p.stdin,
                           channel="{0:d}.stdin".format(
                               self.p.pid)).register(self)

        self._stderr = File(self.p.stderr,
                            channel="{0:d}.stderr".format(
                                self.p.pid)).register(self)

        self._stdout = File(self.p.stdout,
                            channel="{0:d}.stdout".format(
                                self.p.pid)).register(self)

        self._stderr_read_handler = self.addHandler(
            handler("read", channel="{0:d}.stderr".format(self.p.pid))(
                self.__class__._on_stderr_read))

        self._stdout_read_handler = self.addHandler(
            handler("read", channel="{0:d}.stdout".format(self.p.pid))(
                self.__class__._on_stdout_read))

        self._stderr_closed_handler = self.addHandler(
            handler("closed", channel="{0:d}.stderr".format(self.p.pid))(
                self.__class__._on_stderr_closed))

        self._stdout_closed_handler = self.addHandler(
            handler("closed", channel="{0:d}.stdout".format(self.p.pid))(
                self.__class__._on_stdout_closed))

        self.fire(started(self))

    @staticmethod
    def _on_stdout_closed(self):
        self._stdout_closed = True

    @staticmethod
    def _on_stderr_closed(self):
        self._stderr_closed = True

    def stop(self):
        if self.p is not None:
            self.p.terminate()

    def kill(self):
        self.p.kill()

    def signal(self, signal):
        self.p.send_signal(signal)

    def wait(self):
        return self.p.wait()

    def write(self, data):
        self.fire(write(data), "{0:d}.stdin".format(self.p.pid))

    @property
    def status(self):
        if getattr(self, "p", None) is not None:
            return self.p.poll()

    @staticmethod
    def _on_stderr_read(self, data):
        self.stderr.write(data)

    @staticmethod
    def _on_stdout_read(self, data):
        self.stdout.write(data)

    @handler("generate_events")
    def _on_generate_events(self, event):
        if self.p is not None and self._status is None:
            self._status = self.p.poll()

        if self._status is not None and self._stderr_closed \
                and self._stdout_closed and not self._terminated:
            self._terminated = True
            self.removeHandler(self._stderr_read_handler)
            self.removeHandler(self._stdout_read_handler)
            self.removeHandler(self._stderr_closed_handler)
            self.removeHandler(self._stdout_closed_handler)
            self.fire(stopped(self))
            event.reduce_time_left(0)
            event.stop()
        else:
            event.reduce_time_left(TIMEOUT)
Exemple #43
0
class VideoPlayer:
    def __init__(self, hostname: str, login: str):
        self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.s.connect((hostname.encode('utf-8'), 37890))

        self.s.send((login + '\r\n').encode('utf-8'))

        data = self.s.recv(6).decode('utf-8')
        if data != 'DATA\r\n':
            eprint('unexpected response DATA, got', data)

        self._queue = Queue()
        self._thread = Thread(target=self._handle, args=())
        self._thread.start()

        self.total = 0
        self.current_position = 0
        self.discard_until = 0

        self.vlc = None

    def __del__(self):
        self._queue.put(None)
        self._thread.join()

        self.stop_vlc()

    def _handle(self):
        while True:
            buf = self._queue.get()
            if buf is None:  # end request
                break

            if buf.stream == 255:
                info = buf.data_as_string()
                eprint('data-stream-info', info, self.current_position)
                if info.startswith('DISCARD'):
                    self.stop_vlc()
                continue

            self.start_vlc()
            self.vlc.stdin.write(buf.data)

    def start_vlc(self):
        if self.vlc is None:
            self.vlc = Popen(
                ['vlc', '-', '--intf', 'rc', '--rc-host', 'localhost:23456'],
                stdin=PIPE)

    def stop_vlc(self):
        if self.vlc:
            self.vlc.stdin.close()
            self.vlc.send_signal(2)
            self.vlc.wait()
            self.vlc = None

    def vlc_rc_send(self, cmd: str):
        try:
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            sock.connect(('localhost', 23456))
            sock.send(cmd.encode('utf-8'))
            sock.close()
        except:
            eprint('could not connect to vlc-rc')

    def trickspeed(self, mode: int):
        eprint('trickspeed', mode)
        if self.vlc is None:
            eprint(' vlc none')
            return

        if mode == 0:
            self.vlc_rc_send('pause\n')
        elif mode == 1:
            self.vlc_rc_send('play\n')

    def process(self):
        data = read_exact(self.s, 13)
        if len(data) != 13:
            eprint('header-length failed')
            return False

        buf = VideoBuffer(data)

        if not buf.set_data(read_exact(self.s, buf.length)):
            return False

        self._queue.put(buf)

        if self._queue.qsize() % 200 == 0:
            eprint('big queue', self._queue.qsize())

        # self.current_position = pos

        # eprint('writing', len(data))
        # if self.current_position >= self.discard_until:
        #     eprint('started')
        #     self.vlc.stdin.write(data)
        #     eprint('after write vlc')
        # else:
        #     eprint('discarding', self.current_position, self.discard_until)

        # self.total += l
        # if self.total > 5e7:
        #     eprint('50MB received')
        #     self.total = 0

        return True
Exemple #44
0
class DebugNode( object ):
   """ Wraps the steemd debug node plugin for easier automated testing of the Steem Network"""

   def __init__( self, steemd, data_dir, args='', plugins=[], apis=[], steemd_out=None, steemd_err=None ):
      """ Creates a steemd debug node.

      It can be ran by using 'with debug_node:'
      While in the context of 'with' the debug node will continue to run.
      Upon exit of 'with' the debug will exit and clean up temporary files.
      This class also contains methods to allow basic manipulation of the blockchain.
      For all other requests, the python-steem library should be used.

      args:
         steemd -- The string path to the location of the steemd binary
         data_dir -- The string path to an existing steemd data directory which will be used to pull blocks from.
         args -- Other string args to pass to steemd.
         plugins -- Any additional plugins to start with the debug node. Modify plugins DebugNode.plugins
         apis -- Any additional APIs to have available. APIs will retain this order for accesibility starting at id 3.
            database_api is 0, login_api is 1, and debug_node_api is 2. Modify apis with DebugNode.api
         steemd_stdout -- A stream for steemd's stdout. Default is to pipe to /dev/null
         steemd_stderr -- A stream for steemd's stderr. Default is to pipe to /dev/null
      """
      self._data_dir = None
      self._debug_key = None
      self._FNULL = None
      self._rpc = None
      self._steemd_bin = None
      self._steemd_lock = None
      self._steemd_process = None
      self._temp_data_dir = None

      self._steemd_bin = Path( steemd )
      if( not self._steemd_bin.exists() ):
         raise ValueError( 'steemd does not exist' )
      if( not self._steemd_bin.is_file() ):
         raise ValueError( 'steemd is not a file' )

      self._data_dir = Path( data_dir )
      if( not self._data_dir.exists() ):
         raise ValueError( 'data_dir either does not exist or is not a properly constructed steem data directory' )
      if( not self._data_dir.is_dir() ):
         raise ValueError( 'data_dir is not a directory' )

      self.plugins = plugins
      self.apis = apis

      if( args != '' ):
         self._args = args.split( "\\s" )
      else:
         self._args = list()

      self._FNULL = open( devnull, 'w' )
      if( steemd_out != None ):
         self.steemd_out = steemd_out
      else:
         self.steemd_out = self._FNULL

      if( steemd_err != None ):
         self.steemd_err = steemd_err
      else:
         self.steemd_err = self._FNULL

      self._debug_key = '5JHNbFNDg834SFj8CMArV6YW7td4zrPzXveqTfaShmYVuYNeK69'
      self._steemd_lock = Lock()


   def __enter__( self ):
      self._steemd_lock.acquire()

      # Setup temp directory to use as the data directory for this
      self._temp_data_dir = TemporaryDirectory()

      for child in self._data_dir.iterdir():
         if( child.is_dir() ):
            copytree( str( child ), str( self._temp_data_dir.name ) + '/' + child.name )

      db_version = Path( self._data_dir.name ) / 'db_version'
      if( db_version.exists() and not db_version.is_dir() ):
         copy2( str( db_version ), str( self._temp_data_dir.name ) + '/db_version' )

      config = Path( self._temp_data_dir.name ) / 'config.ini'
      config.touch()
      config.write_text( self._get_config() )

      steemd = [ str( self._steemd_bin ), '--data-dir=' + str( self._temp_data_dir.name ) ]
      steemd.extend( self._args )

      self._steemd_process = Popen( steemd, stdout=self.steemd_out, stderr=self.steemd_err )
      self._steemd_process.poll()
      sleep( 5 )
      if( not self._steemd_process.returncode ):
         self._rpc = SteemNodeRPC( 'ws://127.0.0.1:8095', '', '' )
      else:
         raise Exception( "steemd did not start properly..." )

   def __exit__( self, exc, value, tb ):
      self._rpc = None

      if( self._steemd_process != None ):
         self._steemd_process.poll()

         if( not self._steemd_process.returncode ):
            self._steemd_process.send_signal( SIGINT )

            sleep( 7 )
            self._steemd_process.poll()

            if( not self._steemd_process.returncode ):
               self._steemd_process.send_signal( SIGTERM )

               sleep( 5 )
               self._steemd_process.poll()

               if( self._steemd_process.returncode ):
                  loggin.error( 'steemd did not properly shut down after SIGINT and SIGTERM. User intervention may be required.' )

      self._steemd_process = None
      self._temp_data_dir.cleanup()
      self._temp_data_dir = None
      self._steemd_lock.release()


   def _get_config( self ):
      return "# no seed-node in config file or command line\n" \
          + "p2p-endpoint = 127.0.0.1:2001       # bind to localhost to prevent remote p2p nodes from connecting to us\n" \
          + "rpc-endpoint = 127.0.0.1:8095       # bind to localhost to secure RPC API access\n" \
          + "enable-plugin = witness debug_node " + " ".join( self.plugins ) + "\n" \
          + "public-api = database_api login_api debug_node_api " + " ".join( self.apis ) + "\n"


   def debug_generate_blocks( self, count ):
      """
      Generate blocks on the current chain. Pending transactions will be applied, otherwise the
      blocks will be empty.

      The debug node plugin requires a WIF key to sign blocks with. This class uses the key
      5JHNbFNDg834SFj8CMArV6YW7td4zrPzXveqTfaShmYVuYNeK69 which was generated from
      `get_dev_key steem debug`. Do not use this key on the live chain for any reason.

      args:
         count -- The number of new blocks to generate.

      returns:
         int: The number of blocks actually pushed.
      """
      if( count < 0 ):
         raise ValueError( "count must be a positive non-zero number" )
      return self._rpc.rpcexec( json.loads( '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_generate_blocks",["' + self._debug_key + '",' + str( count ) + ']], "id": 1}' ) )


   def debug_generate_blocks_until( self, timestamp, generate_sparsely=True ):
      """
      Generate block up until a head block time rather than a specific number of blocks. As with
      `debug_generate_blocks` all blocks will be empty unless there were pending transactions.

      The debug node plugin requires a WIF key to sign blocks with. This class uses the key
      5JHNbFNDg834SFj8CMArV6YW7td4zrPzXveqTfaShmYVuYNeK69 which was generated from
      `get_dev_key steem debug`. Do not use this key on the live chain for any reason.

      args:
         time -- The desired new head block time. This is a POSIX Timestmap.
         generate_sparsely -- True if you wish to skip all intermediate blocks between the current
            head block time and the desired head block time. This is useful to trigger events, such
            as payouts and bandwidth updates, without generating blocks. However, many automatic chain
            updates (such as block inflation) will not continue at their normal rate as they are only
            calculated when a block is produced.

      returns:
         (time, int): A tuple including the new head block time and the number of blocks that were
            generated.
      """
      if( not isinstance( timestamp, int ) ):
         raise ValueError( "Time must be a int" )
      generate_sparsely_str = "true"
      if( not generate_sparsely ):
         generate_sparsely_str = "false"

      iso_string = datetime.fromtimestamp( timestamp, timezone.utc ).isoformat().split( '+' )[0].split( '-' )
      if( len( iso_string ) == 4 ):
         iso_string = iso_string[:-1]
      iso_string = '-'.join( iso_string )

      print( iso_string )
      return self._rpc.rpcexec( json.loads( '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_generate_blocks_until",["' + self._debug_key + '","' + iso_string + '","' + generate_sparsely_str + '"]], "id": 1}' ) )


   def debug_set_hardfork( self, hardfork_id ):
      """
      Schedules a hardfork to happen on the next block. call `debug_generate_blocks( 1 )` to trigger
      the hardfork. All hardforks with id less than or equal to hardfork_id will be scheduled and
      triggered.

      args:
         hardfork_id: The id of the hardfork to set. Hardfork IDs start at 1 (0 is genesis) and increment
            by one for each hardfork. The maximum value is BMCHAIN_NUM_HARDFORKS in chain/hardfork.d/0-preamble.hf
      """
      if( hardfork_id < 0 ):
         raise ValueError( "hardfork_id cannot be negative" )

      self._rpc.rpcexec( json.loads( '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_set_hardfork",[' + str( hardfork_id ) + ']], "id":1}' ) )


   def debug_has_hardfork( self, hardfork_id ):
      return self._rpc.rpcexec( json.loads( '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_has_hardfork",[' + str( hardfork_id ) + ']], "id":1}' ) )


   def debug_get_witness_schedule( self ):
      return self._rpc.rpcexec( json.loads( '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_get_witness_schedule",[]], "id":1}' ) )


   def debug_get_hardfork_property_object( self ):
      return self._rpc.rpcexec( json.loads( '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_get_hardfork_property_object",[]], "id":1}' ) )
Exemple #45
0
class Session(QObject, LogCaller, ParserListener, SessionCommon):
    """ A session is a caffe training process.

    Objects of this class encapsulate the state of a process, control the
    process (starting, pausing etc.), handle events and provide the session
    output stream.
    """

    # event signals
    stateChanged = pyqtSignal(object)
    stateDictChanged = pyqtSignal(object, bool)
    iterationChanged = pyqtSignal()
    snapshotAdded = pyqtSignal(object)

    def __init__(self,
                 project,
                 directory=None,
                 sid=None,
                 parse_old=False,
                 caffe_bin=None,
                 last_solverstate=None,
                 last_caffemodel=None,
                 state_dictionary=None):
        super(Session, self).__init__()
        self.caller_id = None
        self.state = State.UNDEFINED
        self.invalidErrorsList = []
        self.sid = sid
        self.directory = directory
        self.rid = 0
        self.project = project
        self.logs = None
        if self.directory is None:
            if self.sid is None:
                raise ValueError(
                    'Either directory or sid must be provided to create a session.'
                )
            self.directory = self.__createSessionDirectoryName()
            self.logs = os.path.join(self.directory, 'logs')
        else:
            self.logs = os.path.join(directory, 'logs')
            dir_sid, self.rid = self.__parseSessionId()
            if self.sid is None:
                self.sid = dir_sid
            else:
                Log.log(
                    'Provided sid and directory do not match (' +
                    str(self.sid) + ' vs. ' + str(dir_sid) + '), ' +
                    'provided sid is used.', self.getCallerId())

        self.parse_old = parse_old
        self.caffe_bin = caffe_bin  # overrides project caffe_root if necessary, i.e. if deployed to another system
        self.pretrainedWeights = None

        self.last_solverstate = last_solverstate
        self.last_caffemodel = last_caffemodel
        self.state_dictionary = state_dictionary  # state as saved from the network manager, such it can be restored

        self.start_time = self.__parseStartTime()

        self.snapshot_dir = None
        self.snapshot_prefix = None
        self.proc = None
        self.tee = None
        self.parser = None
        self.iteration = 0
        self.max_iter = 1
        self.parser_initialized = False

        self.__getSettingsFromSessionFile()

        if self.state_dictionary is not None:
            self.__parseSettings(self.state_dictionary)

        self.__solverFile = os.path.join(self.directory,
                                         Paths.FILE_NAME_SOLVER)
        self.__netInternalFile = os.path.join(self.directory,
                                              Paths.FILE_NAME_NET_INTERNAL)
        self.__netOriginalFile = os.path.join(self.directory,
                                              Paths.FILE_NAME_NET_ORIGINAL)

        self.lock = Lock()
        self.getSnapshotDirectory()

    # comparison methods for the priorization of sessions in the thread pool

    def __lt__(self, other):
        if type(other) is not Session:
            return True
        return other.sid - self.sid < 0

    def __gt__(self, other):
        if type(other) is not Session:
            return False
        return other.sid - self.sid > 0

    def __eq__(self, other):
        if type(other) is not Session:
            return False
        return other.sid - self.sid == 0

    def __le__(self, other):
        if type(other) is not Session:
            return True
        return other.sid - self.sid <= 0

    def __ge__(self, other):
        if type(other) is not Session:
            return False
        return other.sid - self.sid >= 0

    def __ne__(self, other):
        if type(other) is not Session:
            return True
        return other.sid - self.sid != 0

    def __createSessionDirectoryName(self):
        """ Return a new session directory with the format YYYYMMDD_hh24mmss_SID
        """
        directory = self.__getDateTimeString()
        directory += '_' + str(self.sid)
        return os.path.join(self.project.getSessionsDirectory(), directory)

    def __ensureDirectory(self, directory):
        """ Creates a directory if it does not exist.
        """
        if directory == '':
            return
        if os.path.exists(directory) is False:
            try:
                os.makedirs(directory)
                Log.log('Created directory: ' + directory, self.getCallerId())
            except Exception as e:
                Log.error(
                    'Failed to create directory ' + directory + ')' + str(e),
                    self.getCallerId())

    def __getSettingsFromSessionFile(self):
        filename = os.path.join(self.directory,
                                baristaSessionFile(self.directory))
        if os.path.isfile(filename):
            with open(filename, 'r') as f:
                res = json.load(f)
                self.__parseSettings(res)

    def checkFiles(self):
        """ Check for the existence of the session directories and files.
        """
        if os.path.exists(self.directory) is False:
            Log.error('Session directory does not exists: ' + self.directory,
                      self.getCallerId())
        if os.path.exists(self.logs) is False:
            Log.error('Log directory does not exists: ' + self.logs,
                      self.getCallerId())
        if os.path.exists(self.snapshot_dir) is False:
            Log.error(
                'Snapshot directory does not exists: ' + self.snapshot_dir,
                self.getCallerId())
        if os.file.exists(
                caffeVersions.getVersionByName(
                    self.project.getCaffeVersion())) is False:
            Log.error(
                'Caffe binary does not exists: ' +
                self.project.getCaffeVersion(), self.getCallerId())

    def __parseSnapshotPrefix(self):
        """ Parse the snapshot prefix from the solver file.
        """
        if self.snapshot_prefix is None:
            sf = os.path.join(self.getDirectory(), self.getSolver())
            if os.path.isfile(sf):
                self.snapshot_prefix = self.project.parseSnapshotPrefixFromFile(
                    sf)
        return

    def getSnapshotPrefix(self):
        """ Return the snapshot prefix which is used for snapshots of this
        session.
        """
        if not self.snapshot_prefix:
            self.__parseSnapshotPrefix()
        if not self.snapshot_prefix:
            self.snapshot_prefix = ''

        return self.snapshot_prefix

    def getSnapshotExtension(self):
        """ Return the file name extension for snapshot files.
        The extensions differ between binaryproto and hdf5 formats.
        """
        extension = None
        if self.last_caffemodel:
            # try to parse the extension from the last_caffemodel
            regex_snapshot = re.compile('(\.caffemodel[\.\w-]*)')
            snapshot_match = regex_snapshot.search(self.last_caffemodel)
            if snapshot_match:
                return snapshot_match.group(1)
        else:
            # try to parse the suffix from the last_solverstate
            extension = self.getLastSnapshot()
            if extension:
                regex_snapshot = re.compile('\.solverstate([\.\w-]*)')
                snapshot_match = regex_snapshot.search(extension)
                if snapshot_match:
                    ext = snapshot_match.group(1)
                    if ext:
                        # append suffix
                        return '.caffemodel' + ext
        return '.caffemodel'

    def getSolverstateExtension(self):
        """ Return the file name extension for solverstate files.
        The extensions differ between binaryproto and hdf5 formats.
        """
        extension = self.getLastSnapshot()
        if extension:
            regex_snapshot = re.compile('(\.solverstate[\.\w-]*)')
            snapshot_match = regex_snapshot.search(extension)
            if snapshot_match:
                return snapshot_match.group(1)
        return '.solverstate'

    def getState(self):
        """ Return the state of the session.
        """
        if self.state == State.FAILED:
            return self.state

        if self.proc is not None:
            if self.iteration == self.max_iter:
                self.state = State.FINISHED
            else:
                self.state = State.RUNNING
        else:
            if len(checkMinimumTrainingRequirements(self)) > 0:
                self.state = State.INVALID
            elif self.iteration == self.max_iter:
                self.state = State.FINISHED
            elif len(self.getSnapshots()) > 0:
                self.state = State.PAUSED
            else:
                self.state = State.WAITING
        return self.state

    def getErrorList(self):
        return self.invalidErrorsList

    def setState(self, state):
        """ Set the state of the session and emit a stateChanged signal.
        """
        self.state = state
        self.stateChanged.emit(self.state)

    def setErrorList(self, errorList):
        self.invalidErrorsList = errorList

    def setStateDict(self, stateDict):
        self.state_dictionary = stateDict
        self.stateDictChanged.emit(self, False)

        return

    def getSessionId(self):
        """ Return the id of the session.
        """
        return self.sid

    def getRunId(self):
        """ Return the run id of the session.

        This id increases every time the process is started/proceeded.
        It enables the user to distinguish between different runs of the
        session.
        """
        return self.rid

    def start(self, solverstate=None, caffemodel=None):
        """ Start the process.

        Return
            True if the process was started
            False if the start failed
        """
        if self.getState() is State.WAITING:
            self.rid += 1
            # (re-)write all session files
            self.save(includeProtoTxt=True)
            # check if the session has its own caffeRoot
            caffeBin = self.caffe_bin
            if not caffeBin:
                # else take the project's caffeRoot path
                caffeBin = caffeVersions.getVersionByName(
                    self.project.getCaffeVersion()).getBinarypath()

            try:
                self.getParser().setLogging(True)

                cmd = [caffeBin, 'train', '-solver', self.getSolver()]
                if solverstate:
                    cmd.append('-snapshot')
                    cmd.append(str(solverstate))
                elif caffemodel:
                    cmd.append('-weights')
                    cmd.append(str(caffemodel))
                self.proc = Popen(cmd,
                                  stdout=PIPE,
                                  stderr=STDOUT,
                                  cwd=self.getDirectory())
                try:
                    self.tee = Popen(
                        ['tee', '-a', self.getRunLogFileName()],
                        stdin=self.proc.stdout,
                        stdout=PIPE)
                except Exception as e:
                    # continue without tee
                    Log.error('Failed to start tee: ' + str(e),
                              self.getCallerId())
                self.setState(State.RUNNING)
                Log.log(
                    'Session ' + self.getRunLogFileName(True) + ' was started',
                    self.getCallerId())
                self.__startParsing()
                return True
            except Exception as e:
                # check if caffe root exists
                Log.error('Failed to start session: ' + str(e),
                          self.getCallerId())
                if os.file.exists(
                        caffeVersions.getVersionByName(
                            self.project.getCaffeVersion()).getBinarypath()
                ) is False:
                    Log.error(
                        'CAFFE_BINARY directory does not exists: ' +
                        caffe_bin +
                        '! Please set CAFFE_BINARY to run a session.',
                        self.getCallerId())
        else:
            Log.error(
                'Could not start a session in state ' + str(self.getState()),
                self.getCallerId())
            # self.setState(State.UNDEFINED)
        return False

    def pause(self):
        """ Pause the process.

        Return
            True if the process was paused
            False if the pause failed
        """
        if self.getState() is State.RUNNING:
            if self.proc:
                self.snapshot()
                # give the caffe process a second to save the state
                time.sleep(1)
                try:
                    if self.proc:
                        self.proc.kill()
                except Exception as e:
                    Log.error('Pausing session failed: ' + str(e))
                try:
                    if self.tee:
                        self.tee.kill()
                except Exception:
                    pass
                self.proc = None
                self.tee = None
                self.last_solverstate = None
                snap = self._getLastSnapshotFromSnapshotDirectory()
                if snap is not None:
                    self.last_solverstate = os.path.basename(
                        self._getLastSnapshotFromSnapshotDirectory())
                    regex_iter = re.compile(
                        'iter_([\d]+)\.solverstate[\.\w-]*$')
                    iter_match = regex_iter.search(self.last_solverstate)
                    if iter_match:
                        self.iteration = int(iter_match.group(1))
                    self.iterationChanged.emit()
                    self.setState(State.PAUSED)
                    Log.log(
                        'Session ' + self.getRunLogFileName(True) +
                        ' was paused', self.getCallerId())
                else:
                    self.setState(State.WAITING)
                    Log.log(
                        'Session ' + self.getRunLogFileName(True) +
                        ' was halted', self.getCallerId())
                self.save()
                return True
            else:
                Log.error(
                    'Could not pause a session in state ' +
                    str(self.getState()), self.getCallerId())
                self.setState(State.UNDEFINED)
        return False

    def proceed(self, snapshot=None):
        """ Continue training from the (last) snapshot.

        Return
            True if the process was continued
            False if the continuation failed
        """
        if self.getState() is State.PAUSED:
            self.__ensureDirectory(self.getSnapshotDirectory())
            self.__ensureDirectory(self.logs)

            if snapshot is None:
                snapshot = self.getLastSnapshot()
            self.rid += 1
            try:
                self.getParser().setLogging(True)
                self.proc = Popen([
                    caffeVersions.getVersionByName(
                        self.project.getCaffeVersion()).getBinarypath(),
                    'train', '-solver',
                    self.getSolver(), '-snapshot', snapshot
                ],
                                  stdout=PIPE,
                                  stderr=STDOUT,
                                  cwd=self.getDirectory())
                try:
                    self.tee = Popen(
                        ['tee', '-a', self.getRunLogFileName()],
                        stdin=self.proc.stdout,
                        stdout=PIPE)
                except Exception as e:
                    # continue without tee
                    Log.error('Failed to start tee: ' + str(e),
                              self.getCallerId())
                self.setState(State.RUNNING)
                Log.log(
                    'Session ' + self.getRunLogFileName(True) +
                    ' was proceeded', self.getCallerId())
                self.__startParsing()
                return True
            except Exception as e:
                # check if caffe root exists
                Log.error('Failed to continue session: ' + str(e),
                          self.getCallerId())
                if os.file.exists(
                        caffeVersions.getVersionByName(
                            self.project.getCaffeVersion()).getBinarypath()
                ) is False:
                    Log.error(
                        'CAFFE_BINARY directory does not exists: ' +
                        caffe_bin +
                        '! Please set CAFFE_BINARY to run a session.',
                        self.getCallerId())
        elif self.getState() in (State.FAILED, State.FINISHED):
            Log.error(
                'Could not continue a session in state ' +
                str(self.getState()), self.getCallerId())
        return False

    def snapshot(self):
        """ Create a snapshot from the training state.

        Return
            True if the snapshot was created
            False if the snapshot could not be created
        """
        if self.getState() is State.RUNNING:
            if self.proc:
                self.last_solverstate = None
                try:
                    self.proc.send_signal(signal.SIGHUP)
                except Exception as e:
                    Log.error('Failed to take snapshot: ' + str(e),
                              self.getCallerId())
                    return False
                Log.log(
                    'Snapshot was saved for session ' +
                    self.getRunLogFileName(True) + '', self.getCallerId())
                return True
            else:
                Log.error(
                    'Could not take a session snapshot in state ' +
                    str(self.getState()), self.getCallerId())
        return False

    def setFinished(self):
        """ Mark this session as finished.
        """
        self.save()
        self.setState(State.FINISHED)
        self.proc = None

    def getStream(self):
        """ Return the log stream of this session.
        This is an iterator over stdout of the subprocess.
        """
        if self.tee:
            return iter(self.tee.stdout.readline, '')
        if self.proc:
            return iter(self.proc.stdout.readline, '')
        return iter([])

    def getDirectory(self):
        """ Return the session directory, usually a directory of the form
        YYYYMMDD_hh24mmss_SID.
        """
        return self.directory

    def getLogs(self):
        """ Return the log directory.
        """
        if os.path.exists(self.logs) is False:
            os.makedirs(self.logs)
        return self.logs

    def isRemote(self):
        return False

    def getSnapshotDirectory(self):
        """ Return the snapshot directory.
        """
        if self.snapshot_dir:
            return self.snapshot_dir
        snapshot_prefix = self.getSnapshotPrefix()
        sdir = os.path.dirname(snapshot_prefix)
        self.snapshot_dir = os.path.join(self.getDirectory(), sdir)
        return self.snapshot_dir

    def getSnapshots(self):
        """ Return all snapshot files, keyed by iteration number.
        """
        regex_snapshot = re.compile('iter_([\d]+)\.solverstate')
        snaps = {}
        if os.path.exists(self.getSnapshotDirectory()):
            for entry in os.listdir(self.getSnapshotDirectory()):
                snap_match = regex_snapshot.search(entry)
                if snap_match:
                    try:
                        iter_num = int(snap_match.group(1))
                        snaps[iter_num] = entry
                    except Exception:
                        pass
        return snaps

    def getLastModel(self):
        """ Return the name of the last saved caffe model.
        """
        return self.last_caffemodel

    def setLastModel(self, lcm):
        self.last_caffemodel = lcm

    def getPretrainedWeights(self):
        return self.pretrainedWeights

    def setPretrainedWeights(self, weights):
        self.pretrainedWeights = weights

    def setLastSnapshot(self, lss):
        self.last_solverstate = lss

    def getSolver(self, log=False):
        """ Returns the solver prototxt file name.
        When the log flag is set, a message will be sent to the logger console if the file does not exist.
        """
        if log:
            if not os.path.isfile(self.__solverFile):
                Log.log(
                    "This sessions Solverfile: " + self.__solverFile +
                    " does not exist.", self.caller_id)
        return self.__solverFile

    def getOriginalNetFile(self, log=False):
        """ Returns the original net prototxt file name.
        When the log flag is set, a message will be sent to the logger console if the file does not exist.
        """
        if log:
            if not os.path.isfile(self.__netOriginalFile):
                Log.log(
                    "This sessions net file: " + self.__netOriginalFile +
                    " does not exist.", self.caller_id)
        return self.__netOriginalFile

    def getInternalNetFile(self, log=False):
        """ Returns the original net prototxt file name.
        When the log flag is set, a message will be sent to the logger console if the file does not exist.
        """
        if log:
            if not os.path.isfile(self.__netInternalFile):
                Log.log(
                    "This sessions net file: " + self.__netInternalFile +
                    " does not exist.", self.caller_id)
        return self.__netInternalFile

    def readInternalNetFile(self):
        """ Returns the contents of the internal net prototxt file. """
        path = self.getInternalNetFile()
        with open(path, 'r') as f:
            contents = f.read()
        return contents

    def readDeployedNetAsString(self):
        """ Returns the contents of the deployable net prototxt file. """
        path = self.getInternalNetFile()
        dn = DeployedNet(open(path).read())
        return dn.getProtoTxt()

    def readCaffemodelFile(self, snapshot):
        """ Returns the contents of the .caffemodel file that belongs to snapshot.

        snapshot: string
            Filename without path of the snapshot file.
        """
        path = os.path.join(self.getSnapshotDirectory(), snapshot)
        with open(path, 'r') as f:
            contents = f.read()
        return contents

    def getRunLogFileName(self, basename=False):
        """ Return the name of the logfile with session and run id.
        """
        log_file = self.project.getProjectName() + '_' + str(
            self.getSessionId()) + '.' + str(self.getRunId()) + '.log'
        if basename is True:
            return log_file
        log_file = os.path.join(self.getLogs(), log_file)
        return log_file

    def getLogFileName(self, basename=False):
        """ Return the name of the logfile with session id.
        """
        log_file = self.project.getProjectName() + '_' + str(
            self.getSessionId()) + '.log'
        if basename is True:
            return log_file
        log_file = os.path.join(self.getLogs(), log_file)
        return log_file

    def getLogId(self):
        """ Return the id of the logfile. For local sessions same as the name of the logfile.
        """
        return self.getLogFileName(True)

    def parseOldLogs(self):
        """ Parse all log files in the log directory.
        """
        locked = self.lock.acquire()
        if locked is False:
            return
        try:
            if self.parse_old:
                self.parse_old = False
                log_files = {}
                regex_filename = re.compile('[\d]+\.([\d]+)\.log$')
                for entry in os.listdir(self.getLogs()):
                    filename_match = regex_filename.search(entry)
                    if filename_match:
                        # key files by run id
                        try:
                            run_id = int(filename_match.group(1))
                            log_files[run_id] = entry
                        except:
                            pass
                log_list = []
                for run_id in sorted(log_files.keys()):
                    log_file = os.path.join(self.getLogs(), log_files[run_id])
                    log_list.append(log_file)
                con = Concatenator(log_list)
                logs = con.concate()
                for log in logs:
                    try:
                        self.getParser().addLogStream(log)
                    except Exception as e:
                        Log.error(
                            'Failed to parse log file ' +
                            self.getLogFileName(True) + ": " + str(e),
                            self.getCallerId())
        except Exception as e:
            Log.error('Failed to parse old log ' + str(e), self.getCallerId())
        finally:
            if locked:
                self.lock.release()

    def __startParsing(self):
        """ Create a parser and run it in a newly dispatched thread.
        """
        self.parseOldLogs()
        logs = self.getStream()
        self.getParser().addLogStream(logs)
        pool = SessionPool()
        pool.addSession(self)

    def getParser(self):
        """ Return the log parser of this session.
        """
        if self.parser is None:
            self.parser = Parser(None, Events.events, self.getCallerId())
            self.parser.addListener(self)
        return self.parser

    def getIteration(self):
        """ Return the current training iteration of this session.
        """
        return self.iteration

    def getMaxIteration(self):
        """ Return the maximum training iteration of this session.
        """
        return self.max_iter

    def setMaxIteration(self, maxIteration):
        """ Set the maximum training iteration of this session.
        """
        if maxIteration > 0:
            self.max_iter = maxIteration
        self.stateChanged.emit(self.getState())

    def setParserInitialized(self):
        """ Should be called after the parser finished the inital parsing of
        log files.
        """
        #if self.parser_initialized is False:
        #    self.snapshotAdded.emit(self.last_solverstate)
        self.parser_initialized = True

    def isParserInitialized(self):
        return self.parser_initialized

    def delete(self):
        """ Delete the session directory and disconnect signals.
        """
        self.pause()
        try:
            shutil.rmtree(self.getDirectory())
        except Exception as e:
            Log.error('Could not remove session directory: ' + str(e),
                      self.getCallerId())
        try:
            self.stateChanged.disconnect()
            self.iterationChanged.disconnect()
            self.snapshotAdded.disconnect()
            self.project.deleteSession.emit(self.sid)
        except Exception as e:
            pass
        Log.removeCallerId(self.caller_id, False)

    # LogCaller

    def getCallerId(self):
        """ Return the unique caller id for this session
        """
        if self.caller_id is None:
            self.caller_id = Log.getCallerId(self.getLogFileName(True))
        return self.caller_id

    # ParserListener

    def update(self, phase, row):
        """ Called when the parser has parsed a new record.
        """
        self.iteration = row['NumIters']
        self.iterationChanged.emit()

    def handle(self, event, message, groups):
        """ Called when the parser has parsed a registered event.
        """
        if event == 'OptimizationDone':
            self.setFinished()
        elif event == 'max_iter':
            self.max_iter = int(groups[0])
        elif event == 'state_snapshot':
            self.last_solverstate = groups[0]
            #if self.parser_initialized:
            self.snapshotAdded.emit(self.last_solverstate)
        elif event == 'model_snapshot':
            self.last_caffemodel = groups[0]

    def parsingFinished(self):
        """ Called when the parser has processed all available streams.
        """

        if self.proc is not None:
            # Wait for caffe process, kill tee and respond to return code
            assert self.state is State.RUNNING
            rcode = self.proc.wait()
            self.proc = None
            try:
                self.tee.kill()
            except Exception:
                pass
            self.tee = None
            if rcode is 0:
                self.setFinished()
            else:
                self.setState(State.FAILED)
                Log.error('Session failed with return code ' + str(rcode),
                          self.getCallerId())

        self.setParserInitialized()

    def hasStateDict(self):
        return self.state_dictionary is not None

    def save(self, includeProtoTxt=False):
        """Saves the current session to a json file. If includeProtoTxt is True, prototxt files are saved as well."""
        toSave = {
            "SessionState": self.state,
            "Iteration": self.iteration,
            "MaxIter": self.max_iter
        }
        toSave["ProjectID"] = self.project.projectId

        self.__ensureDirectory(self.directory)
        Log.log("Saving current Session status to disk.", self.getCallerId())
        if self.last_solverstate:
            toSave["LastSnapshot"] = self.last_solverstate
        if self.getPretrainedWeights():
            toSave["PretrainedWeights"] = self.getPretrainedWeights()
        if self.state_dictionary:
            serializedDict = copy.deepcopy(self.state_dictionary)
            if includeProtoTxt:
                if "solver" in self.state_dictionary:
                    solver = self.buildSolverPrototxt()
                    with open(self.getSolver(log=False), 'w') as f:
                        f.write(solver)
                else:
                    Log.error(
                        "Could not save a solver prototxt file, because no solver settings are defined.",
                        self.getCallerId())

            if "network" in serializedDict:
                if includeProtoTxt:
                    net = self.buildNetPrototxt(internalVersion=False)
                    with open(self.getOriginalNetFile(log=False), 'w') as f:
                        f.write(net)
                    net = self.buildNetPrototxt(internalVersion=True)
                    with open(self.getInternalNetFile(log=False), 'w') as f:
                        f.write(net)
                if "layers" in serializedDict["network"]:
                    layers = serializedDict["network"]["layers"]
                    for id in layers:
                        del layers[id]["type"]
            else:
                Log.error(
                    "Could not save the network state because no state was defined.",
                    self.getCallerId())

            toSave["NetworkState"] = serializedDict

        with open(baristaSessionFile(self.directory), "w") as f:
            json.dump(toSave, f, sort_keys=True, indent=4)

    def _modifyNetDictionaryToInternalVersion(self, net):
        """ Take an original net dictionary and apply all changes necessary for
        the internal version.

        Mainly, this will change all relative paths to be relative to a session
        folder.
        Hint: This will change net itself. You might want to create a (deep)
        copy of the dictionary before calling this method.
        """
        # define all layer parameters that can contain (relative) paths
        # (use a dot to separate nested parameters)
        layerParamsContainingPaths = [
            "data_param.source", "hdf5_data_param.source",
            "image_data_param.source", "window_data_param.source",
            "data_param.mean_file", "hdf5_output_param.file_name",
            "image_data_param.mean_file", "window_data_param.mean_file",
            "transform_param.mean_file"
        ]

        # evaluate layer after layer
        h = helper.DictHelper(net)
        for layerId, layer in net.get("layers", {}).iteritems():

            # evaluate parameter after parameter
            for paramKey in layerParamsContainingPaths:
                # if the current layer does have the current parameter..
                if h.layerParameterIsSet(layerId, paramKey):
                    paramValue = h.layerParameter(layerId, paramKey)

                    # ..and its value is a relative path: modify it
                    if paramValue is not None and not os.path.isabs(
                            paramValue):
                        if paramKey != "hdf5_data_param.source":
                            newPath = os.path.join(
                                os.pardir, os.path.join(os.pardir, paramValue))
                        else:
                            newFilename = str(layerId) + ".txt"
                            newPath = os.path.join(os.curdir, newFilename)

                        h.setLayerParameter(layerId, paramKey, newPath)

        return net

    def buildSolverPrototxt(self):
        """ Load the current solver dictionary and return the corresponding
        message object.

        :return: A solver message object.
        """

        if not self.state_dictionary:
            return None
        elif "solver" not in self.state_dictionary:
            return None
        else:
            solver = saver.saveSolver(self.state_dictionary["solver"])
            return solver

    def buildNetPrototxt(self, internalVersion=False):
        """ Load the current net dictionary and return the corresponding
        message object.

        :param internalVersion: Iff true, the loaded net will be modified to
        the internal version.
        :return: A net message object.
        """

        currentState = self.state_dictionary
        netDictionary = None
        if currentState:
            netDictionary = currentState["network"]

        if internalVersion:
            netDictionary = self._modifyNetDictionaryToInternalVersion(
                copy.deepcopy(netDictionary))

        solver = saver.saveNet(netDictionary)
        return solver

    def __parseSettings(self, settings):
        if settings:
            if "SessionState" in settings:
                self.state = settings["SessionState"]
                self.__previousState = self.state

            if "Iteration" in settings:
                self.iteration = settings["Iteration"]

            if "solver" in settings:
                if "max_iter" in settings["solver"]:
                    self.max_iter = settings["solver"]["max_iter"]
            elif "MaxIter" in settings:
                self.max_iter = settings["MaxIter"]

            if "LastSnapshot" in settings:
                self.last_solverstate = settings["LastSnapshot"]

            if "PretrainedWeights" in settings:
                self.setPretrainedWeights(settings["PretrainedWeights"])

            if "NetworkState" in settings:
                self.state_dictionary = settings["NetworkState"]
                layers = self.state_dictionary["network"]["layers"]
                for id in layers:
                    if "parameters" in layers[id]:
                        if "type" in layers[id]["parameters"]:
                            typename = layers[id]["parameters"]["type"]
                            layers[id]["type"] = info.CaffeMetaInformation(
                            ).getLayerType(typename)
                            # layers[id]["type"] = info.CaffeMetaInformation().availableLayerTypes()[typename]
                solver = self.state_dictionary["solver"]
                if solver:
                    if "snapshot_prefix" in solver:
                        self.snapshot_prefix = solver["snapshot_prefix"]

    def updateMaxIterFromStateDict(self):
        if self.state_dictionary:
            if "solver" in self.state_dictionary:
                if "max_iter" in self.state_dictionary["solver"]:
                    self.max_iter = self.state_dictionary["solver"]["max_iter"]
            elif "MaxIter" in self.state_dictionary:
                self.max_iter = self.state_dictionary["MaxIter"]

    def __parseSessionId(self):
        """ Return a tuple (session_id, run_id). The ids are parsed from the
        directory. session_id is the id of the session and run_id the highest
        found run id.
        """
        regex_sid = re.compile('[\d]{8}_[\d]{6}_([\d]+)')
        sid_match = regex_sid.search(self.directory)
        session_id = None
        if sid_match:
            try:
                session_id = int(sid_match.group(1))
            except:
                pass
        run_id = 0
        regex_rid = re.compile('([\d]+)\.([\d]+)\.log')
        if os.path.exists(self.logs):
            for entry in os.listdir(self.logs):
                rid_match = regex_rid.search(entry)
                if rid_match:
                    try:
                        _run_id = int(rid_match.group(2))
                        if run_id:
                            if _run_id > run_id:
                                run_id = _run_id
                        else:
                            run_id = _run_id
                        if session_id is None:
                            session_id = int(rid_match.group(1))
                    except:
                        pass
        return session_id, run_id

    def __parseStartTime(self):
        """ Parse the start time with second precision from the directory name.

        Return the parsed datetime.
        """
        regex_time = re.compile(
            '([\d]{4})([\d]{2})([\d]{2})_([\d]{2})([\d]{2})([\d]{2})')
        time_match = regex_time.search(self.directory)
        time = datetime.now()
        if time_match:
            try:
                year = int(time_match.group(1))
                month = int(time_match.group(2))
                day = int(time_match.group(3))
                hour = int(time_match.group(4))
                minute = int(time_match.group(5))
                second = int(time_match.group(6))
                time = time.replace(year=year,
                                    month=month,
                                    day=day,
                                    hour=hour,
                                    minute=minute,
                                    second=second)
            except Exception:
                pass
        return time

    def __getDateTimeString(self):
        """ Return the current datetime in the format YYYYMMDD_hh24mmss
        """
        now = datetime.now()
        date_string = ''
        date_string += str(now.year)
        date_string += str(now.month).zfill(2)
        date_string += str(now.day).zfill(2)
        date_string += '_'
        date_string += str(now.hour).zfill(2)
        date_string += str(now.minute).zfill(2)
        date_string += str(now.second).zfill(2)
        return date_string

    def reset(self):
        self.pause()
        for dirpath, dirnames, filenames in os.walk(self.directory,
                                                    topdown=True):
            for dirname in dirnames:
                if os.path.join(dirpath, dirname) == self.logs:
                    try:
                        shutil.rmtree(self.logs)
                    except shutil.Error as e:
                        Log.error('Failed to delete logs folder: ' + str(e),
                                  self.getCallerId())
            for filename in filenames:
                if filename.endswith(".solverstate") or filename.endswith(
                        ".caffemodel"):
                    if not filename == self.getPretrainedWeights():
                        try:
                            os.remove(os.path.join(dirpath, filename))
                        except OSError as e:
                            Log.error(
                                'Failed to delete ' + str(filename) + ': ' +
                                str(e), self.getCallerId())
                if filename in [
                        "net-internal.prototxt", "net-original.prototxt",
                        "solver.prototxt"
                ]:
                    try:
                        os.remove(os.path.join(dirpath, filename))
                    except OSError as e:
                        Log.error(
                            'Failed to delete ' + str(filename) + ': ' +
                            str(e), self.getCallerId())
            break
        self.iteration = 0
        self.iterationChanged.emit()
        self.state = State.UNDEFINED
        self.setState(self.getState())
        self.setLastModel(None)
        self.setLastSnapshot(None)
        self.project.resetSession.emit(self.getSessionId())
        self.save()
Exemple #46
0
 def stop_server(self, process: Popen) -> None:
     self.echo_message('Stopping server on localhost')
     process.send_signal(SIGINT)
Exemple #47
0
class zynthian_gui_midi_recorder(zynthian_gui_selector):

    sys_dir = os.environ.get('ZYNTHIAN_SYS_DIR', "/zynthian/zynthian-sys")

    jack_record_port = "ZynMidiRouter:main_out"
    jack_play_port = "ZynMidiRouter:seq_in"

    def __init__(self):
        self.capture_dir_sdc = os.environ.get(
            'ZYNTHIAN_MY_DATA_DIR', "/zynthian/zynthian-my-data") + "/capture"
        self.capture_dir_usb = os.environ.get('ZYNTHIAN_EX_DATA_DIR',
                                              "/media/usb0")
        self.current_record = None
        self.rec_proc = None
        self.play_proc = None
        super().__init__('MIDI Recorder', True)

    def get_status(self):
        status = None

        if self.rec_proc and self.rec_proc.poll() is None:
            status = "REC"

        if self.play_proc and self.play_proc.poll() is None:
            if status == "REC":
                status = "PLAY+REC"
            else:
                status = "PLAY"

        return status

    def fill_list(self):
        self.index = 0
        self.list_data = []

        status = self.get_status()
        if status == "REC" or status == "PLAY+REC":
            self.list_data.append(("STOP_RECORDING", 0, "Stop Recording"))
        else:
            self.list_data.append(("START_RECORDING", 0, "Start Recording"))

        if status == "PLAY" or status == "PLAY+REC":
            self.list_data.append(("STOP_PLAYING", 0, "Stop Playing"))

        if zynthian_gui_config.midi_play_loop:
            self.list_data.append(("LOOP", 0, "[x] Loop Play"))
        else:
            self.list_data.append(("LOOP", 0, "[  ] Loop Play"))

        self.list_data.append((None, 0, "-----------------------------"))

        i = 1
        # Files in SD-Card
        for f in sorted(os.listdir(self.capture_dir_sdc)):
            fpath = join(self.capture_dir_sdc, f)
            if isfile(fpath) and f[-4:].lower() == '.mid':
                #title=str.replace(f[:-3], '_', ' ')
                title = "SDC: {}".format(f[:-4])
                self.list_data.append((fpath, i, title))
                i += 1
        # Files on USB-Pendrive
        for f in sorted(os.listdir(self.capture_dir_usb)):
            fpath = join(self.capture_dir_usb, f)
            if isfile(fpath) and f[-4:].lower() == '.mid':
                #title=str.replace(f[:-3], '_', ' ')
                title = "USB: {}".format(f[:-4])
                self.list_data.append((fpath, i, title))
                i += 1

        super().fill_list()

    def fill_listbox(self):
        super().fill_listbox()
        self.highlight()

    # Highlight command and current record played, if any ...
    def highlight(self):
        if not self.play_proc or self.play_proc.poll() is not None:
            self.current_record = None
        for i, row in enumerate(self.list_data):
            if row[0] is not None and row[0] == self.current_record:
                self.listbox.itemconfig(i,
                                        {'bg': zynthian_gui_config.color_hl})
            else:
                self.listbox.itemconfig(
                    i, {'fg': zynthian_gui_config.color_panel_tx})

    def select_action(self, i, t='S'):
        fpath = self.list_data[i][0]

        if fpath == "START_RECORDING":
            self.start_recording()
        elif fpath == "STOP_PLAYING":
            self.stop_playing()
        elif fpath == "STOP_RECORDING":
            self.stop_recording()
        elif fpath == "LOOP":
            self.toggle_loop()
        elif fpath:
            if t == 'S':
                self.start_playing(fpath)
            else:
                self.zyngui.show_confirm(
                    "Do you really want to delete '{}'?".format(
                        self.list_data[i][2]), self.delete_confirmed, fpath)

    def delete_confirmed(self, fpath):
        logging.info("DELETE MIDI RECORDING: {}".format(fpath))

        try:
            os.remove(fpath)
        except Exception as e:
            logging.error(e)

        self.zyngui.show_modal("midi_recorder")

    def start_recording(self):
        logging.info("STARTING NEW MIDI RECORD ...")
        try:
            cmd = self.sys_dir + "/sbin/jack-smf-recorder.sh --port {}".format(
                self.jack_record_port)
            #logging.info("COMMAND: %s" % cmd)
            self.rec_proc = Popen(cmd.split(" "),
                                  shell=True,
                                  preexec_fn=os.setpgrp)
            sleep(0.5)
        except Exception as e:
            logging.error("ERROR STARTING MIDI RECORD: %s" % e)
            self.zyngui.show_info("ERROR STARTING MIDI RECORD:\n %s" % e)
            self.zyngui.hide_info_timer(5000)
        self.update_list()

    def stop_recording(self):
        logging.info("STOPPING MIDI RECORD ...")
        self.rec_proc.terminate()
        os.killpg(os.getpgid(self.rec_proc.pid), signal.SIGINT)
        while self.rec_proc.poll() is None:
            sleep(0.5)
        self.update_list()

    def start_playing(self, fpath):
        if self.play_proc and self.play_proc.poll() is None:
            self.stop_playing()
        logging.info("STARTING MIDI PLAY '{}' ...".format(fpath))
        try:
            if zynthian_gui_config.midi_play_loop:
                cmd = "/usr/local/bin/jack-smf-player -s -t -l -a {} {}".format(
                    self.jack_play_port, fpath)
            else:
                cmd = "/usr/local/bin/jack-smf-player -s -t -a {} {}".format(
                    self.jack_play_port, fpath)
            logging.info("COMMAND: %s" % cmd)

            def runInThread(onExit, pargs):
                self.play_proc = Popen(pargs)
                self.play_proc.wait()
                self.stop_playing()
                return

            thread = threading.Thread(target=runInThread,
                                      args=(self.stop_playing, cmd.split(" ")),
                                      daemon=True)
            thread.start()
            sleep(0.5)
            self.current_record = fpath
        except Exception as e:
            logging.error("ERROR STARTING MIDI PLAY: %s" % e)
            self.zyngui.show_info("ERROR STARTING MIDI PLAY:\n %s" % e)
            self.zyngui.hide_info_timer(5000)
        self.update_list()

    def stop_playing(self):
        logging.info("STOPPING MIDI PLAY ...")
        try:
            self.play_proc.send_signal(signal.SIGINT)
            sleep(0.5)
            self.play_proc.terminate()
        except:
            pass
        self.current_record = None
        self.update_list()

    def toggle_loop(self):
        if zynthian_gui_config.midi_play_loop:
            logging.info("MIDI play loop OFF")
            zynthian_gui_config.midi_play_loop = False
        else:
            logging.info("MIDI play loop ON")
            zynthian_gui_config.midi_play_loop = True
        zynconf.save_config({
            "ZYNTHIAN_MIDI_PLAY_LOOP":
            str(int(zynthian_gui_config.midi_play_loop))
        })
        self.update_list()

    def set_select_path(self):
        self.select_path.set("MIDI Recorder")
Exemple #48
0
class Cluster(RunningCluster):
    """This is an object that manages a postgres cluster.

    The cluster is created in a temporary directory.
    """

    dbdir = None
    _postmaster = None
    _db_counter = 0
    _existing_dbs = ()

    def __init__(self):
        self._db_preload = {}

    @property
    def host(self):
        return self.dbdir

    @property
    def started(self):
        return self._postmaster is not None

    def initdb(self):
        assert self.dbdir is None
        # setup template
        dbdir = tempfile.mkdtemp()
        new_env = os.environ.copy()
        new_env['PGOPTIONS'] = '-F'
        _pg_run(['initdb', '-E', 'utf-8', '-D', dbdir, '-A', 'trust'],
                env=new_env)
        self.dbdir = dbdir

    def cleanup(self):
        super(Cluster, self).cleanup()
        if self._postmaster is not None:
            self.stop()
        if self.dbdir is not None:
            shutil.rmtree(self.dbdir)
            self.dbdir = None

    def start(self):
        assert self._postmaster is None
        assert self.dbdir is not None
        args = [
            'postgres', '-D', self.dbdir, '-k', self.dbdir, '-F', '-h', '',
            '--log_min_messages=PANIC'
        ]
        self._postmaster = Popen(args)
        timeout = 10  # seconds
        for i in range(timeout * 20):
            try:
                time.sleep(0.05)
                self._postmaster.poll()
                if self._postmaster.returncode is not None:
                    raise Exception("Postmaster died unexpectedly")
                args = [
                    'psql', '-h', self.host, '-c', "SELECT 'YAY';", '-t', '-A',
                    'postgres'
                ]
                p = Popen(args, stdout=PIPE, stderr=PIPE)
                result, psql_err = p.communicate()
                if p.returncode == 0 and b'YAY' in result:
                    break  # success
            except:
                self.stop()
                raise
        else:
            self.stop()
            raise Exception('Timed out connecting to postgres: %s' % psql_err)

    def stop(self):
        assert self._postmaster is not None
        self._postmaster.poll()
        if self._postmaster.returncode is None:
            # http://www.postgresql.org/docs/9.1/static/server-shutdown.html
            self._postmaster.send_signal(signal.SIGQUIT)
            self._postmaster.wait()
        self._postmaster = None
Exemple #49
0
class DockerizedJobServer(object):
    """
   Spins up the JobServer in a docker container for local execution
  """
    def __init__(self,
                 job_host="localhost",
                 job_port=None,
                 artifact_port=None,
                 expansion_port=None,
                 harness_port_range=(8100, 8200),
                 max_connection_retries=5):
        self.job_host = job_host
        self.job_port = job_port
        self.expansion_port = expansion_port
        self.artifact_port = artifact_port
        self.harness_port_range = harness_port_range
        self.max_connection_retries = max_connection_retries
        self.docker_process = None
        self.process_lock = Lock()

    def start(self):
        # TODO This is hardcoded to Flink at the moment but should be changed
        job_server_image_name = os.environ['USER'] + \
            "-docker-apache.bintray.io/beam/flink-job-server:latest"
        docker_path = check_output(['which', 'docker']).strip().decode('utf-8')
        cmd = [
            "docker",
            "run",
            # We mount the docker binary and socket to be able to spin up
            # "sibling" containers for the SDK harness.
            "-v",
            ':'.join([docker_path, "/bin/docker"]),
            "-v",
            "/var/run/docker.sock:/var/run/docker.sock"
        ]

        self.job_port, self.artifact_port, self.expansion_port = \
          DockerizedJobServer._pick_port(self.job_port,
                                         self.artifact_port,
                                         self.expansion_port)

        args = [
            '--job-host', self.job_host, '--job-port',
            str(self.job_port), '--artifact-port',
            str(self.artifact_port), '--expansion-port',
            str(self.expansion_port)
        ]

        if sys.platform == "darwin":
            # Docker-for-Mac doesn't support host networking, so we need to explictly
            # publish ports from the Docker container to be able to connect to it.
            # Also, all other containers need to be aware that they run Docker-on-Mac
            # to connect against the internal Docker-for-Mac address.
            cmd += ["-e", "DOCKER_MAC_CONTAINER=1"]
            cmd += ["-p", "{}:{}".format(self.job_port, self.job_port)]
            cmd += [
                "-p", "{}:{}".format(self.artifact_port, self.artifact_port)
            ]
            cmd += [
                "-p", "{}:{}".format(self.expansion_port, self.expansion_port)
            ]
            cmd += [
                "-p", "{0}-{1}:{0}-{1}".format(self.harness_port_range[0],
                                               self.harness_port_range[1])
            ]
        else:
            # This shouldn't be set for MacOS because it detroys port forwardings,
            # even though host networking is not supported on MacOS.
            cmd.append("--network=host")

        cmd.append(job_server_image_name)
        cmd += args

        logging.debug("Starting container with %s", cmd)
        try:
            self.docker_process = Popen(cmd)
            atexit.register(self.stop)
            signal.signal(signal.SIGINT, self.stop)
        except:  # pylint:disable=bare-except
            logging.exception("Error bringing up container")
            self.stop()

        return "{}:{}".format(self.job_host, self.job_port)

    def stop(self):
        with self.process_lock:
            if not self.docker_process:
                return
            num_retries = 0
            while self.docker_process.poll() is None and \
                    num_retries < self.max_connection_retries:
                logging.debug("Sending SIGINT to job_server container")
                self.docker_process.send_signal(signal.SIGINT)
                num_retries += 1
                time.sleep(1)
            if self.docker_process.poll is None:
                self.docker_process.kill()

    @staticmethod
    def _pick_port(*ports):
        """
    Returns a list of ports, same length as input ports list, but replaces
    all None or 0 ports with a random free port.
    """
        sockets = []

        def find_free_port(port):
            if port:
                return port
            else:
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sockets.append(s)
                s.bind(('localhost', 0))
                _, free_port = s.getsockname()
                return free_port

        ports = list(map(find_free_port, ports))
        # Close sockets only now to avoid the same port to be chosen twice
        for s in sockets:
            s.close()
        return ports
def run():
    description = "Unit Test Runner for Handlers"
    parser = ArgumentParser(description=description)
    parser.add_argument('server_cloudooo_conf')
    parser.add_argument('test_name')
    parser.add_argument('--timeout_limit',
                        dest='timeout_limit',
                        type=long,
                        default=30,
                        help="Timeout to waiting for the cloudooo stop")
    parser.add_argument('--paster_path',
                        dest='paster_path',
                        default='paster',
                        help="Path to Paster script")
    namespace = parser.parse_args()
    environment_path = glob(
        path.join(resource_filename("cloudooo", "handler"), '*', 'tests'))
    sys.path.extend(environment_path)
    server_cloudooo_conf = namespace.server_cloudooo_conf
    test_name = namespace.test_name
    if server_cloudooo_conf.startswith(curdir):
        server_cloudooo_conf = path.join(path.abspath(curdir),
                                         server_cloudooo_conf)
    environ['server_cloudooo_conf'] = server_cloudooo_conf
    paster_path = namespace.paster_path

    python_extension = '.py'
    if test_name[-3:] == python_extension:
        test_name = test_name[:-3]
    handler_path = None
    for env_handler_path in environment_path:
        full_path = path.join(env_handler_path,
                              '%s%s' % (test_name, python_extension))
        if path.exists(full_path):
            handler_path = env_handler_path
            break

    if handler_path is None:
        exit("%s does not exists\n" % full_path)

    from cloudooo.tests.handlerTestCase import startFakeEnvironment
    from cloudooo.tests.handlerTestCase import stopFakeEnvironment

    config = ConfigParser()
    config.read(server_cloudooo_conf)
    module = __import__(test_name)
    if not hasattr(module, "test_suite"):
        exit("No test suite to run, exiting immediately")

    DAEMON = getattr(module, 'DAEMON', False)
    OPENOFFICE = getattr(module, 'OPENOFFICE', False)

    TestRunner = backportUnittest.TextTestRunner
    suite = unittest.TestSuite()
    suite.addTest(module.test_suite())

    if DAEMON:
        log_file = '%s/cloudooo_test.log' % config.get('app:main',
                                                       'working_path')
        if path.exists(log_file):
            remove(log_file)
        command = [
            paster_path, 'serve', '--log-file', log_file, server_cloudooo_conf
        ]
        process = Popen(command)
        wait_use_port(process.pid)
        chdir(handler_path)
        try:
            TestRunner(verbosity=2).run(suite)
        finally:
            process.send_signal(SIGQUIT)
            process.wait()
    elif OPENOFFICE:
        chdir(handler_path)
        startFakeEnvironment(conf_path=server_cloudooo_conf)
        try:
            TestRunner(verbosity=2).run(suite)
        finally:
            stopFakeEnvironment()
    else:
        chdir(handler_path)
        TestRunner(verbosity=2).run(suite)
Exemple #51
0
    def test_cli_option_dev_flag(self):
        # also test ctrl-c

        if os.name == 'nt':
            # Due to how Windows handles ctrl-c events with process groups and consoles,
            # it's not really feasible to test this on Windows because it will want to kill
            # PyTest (and/or the console on the testing system), or to just kill the
            # subprocess (kill -9 equivalent).
            #
            # It *may* be possible if we create a separate terminal for testing, join it,
            # disable ctrl-c events in our own process and our parent process (if any, f.e.
            # when running in appveyor), send a ctrl-c event, then re-enable ctrl-c events
            # for our own and parent process.  ..that *might* work, but I'm not really
            # familiar with the win32 api.
            pytest.xfail("This test is problematic on Windows.")

        TESTED_PARAMS.append(['--dev'])

        SIGINT = signal.SIGINT

        cmd = ['--dev', 'install', 'user/test']
        result = self.execute(cmd)

        # was the --dev arg accepted by argparse?
        assert result['return code'] == 0

        # We need to run a command that blocks.  To do so, I'm disabling the
        # test mocking of the command module, and executing a command that
        # blocks while waiting for input ('config').
        test_environ = os.environ.copy()
        test_environ['QUILT_TEST_CLI_SUBPROC'] = 'false'
        test_environ[
            'PYTHONUNBUFFERED'] = "true"  # prevent blank stdout due to buffering

        # With no '--dev' arg, the process should exit without a traceback
        cmd = self.quilt_command + ['config']
        proc = Popen(cmd,
                     stdin=PIPE,
                     stdout=PIPE,
                     stderr=PIPE,
                     env=test_environ)

        # Wait for some expected text
        expected = b"Please enter the URL"
        response = proc.stdout.read(len(
            expected))  # blocks if 'quilt config' produces too little output.
        assert response == expected

        # Send interrupt, and fetch result
        proc.send_signal(SIGINT)
        stdout, stderr = (b.decode() for b in proc.communicate())

        assert 'Traceback' not in stderr
        # Return code should indicate keyboard interrupt
        assert proc.returncode == EXIT_KB_INTERRUPT

        # With the '--dev' arg, the process should display a traceback
        cmd = self.quilt_command + ['--dev', 'config']
        proc = Popen(cmd,
                     stdin=PIPE,
                     stdout=PIPE,
                     stderr=PIPE,
                     env=test_environ)

        # Wait for some expected text
        expected = b"Please enter the URL"
        response = proc.stdout.read(len(
            expected))  # blocks if 'quilt config' produces too little output.
        assert response == expected

        # Send interrupt, and check result
        proc.send_signal(SIGINT)
        stdout, stderr = (b.decode() for b in proc.communicate())

        assert 'Traceback (most recent call last)' in stderr
        # Return code should be the generic exit code '1' for unhandled exception
        assert proc.returncode == 1
Exemple #52
0
class ShowRecorder(Thread):
    def __init__(
        self,
        show_instance,
        show_name,
        filelength,
        start_time,
        config: Config,
    ):
        Thread.__init__(self)
        self.api_client = api_client()
        self.config = config
        self.filelength = filelength
        self.start_time = start_time
        self.show_instance = show_instance
        self.show_name = show_name
        self.p = None

    def record_show(self):
        length = str(self.filelength)
        filename = self.start_time
        filename = filename.replace(" ", "-")

        joined_path = os.path.join(RECORD_DIR, filename)
        filepath = f"{joined_path}.{self.config.playout.record_file_format}"

        br = self.config.playout.record_bitrate
        sr = self.config.playout.record_samplerate
        c = self.config.playout.record_channels
        ss = self.config.playout.record_sample_size

        # -f:16,2,44100
        # -b:256
        command = "ecasound -f:{},{},{} -i alsa -o {},{}000 -t:{}".format(
            ss,
            c,
            sr,
            filepath,
            br,
            length,
        )
        args = command.split(" ")

        logger.info("starting record")
        logger.info("command " + command)

        self.p = Popen(args, stdout=PIPE, stderr=PIPE)

        # blocks at the following line until the child process
        # quits
        self.p.wait()
        outmsgs = self.p.stdout.readlines()
        for msg in outmsgs:
            m = re.search("^ERROR", msg)
            if not m == None:
                logger.info("Recording error is found: %s", outmsgs)
        logger.info("finishing record, return code %s", self.p.returncode)
        code = self.p.returncode

        self.p = None

        return code, filepath

    def cancel_recording(self):
        # send signal interrupt (2)
        logger.info("Show manually cancelled!")
        if self.p is not None:
            self.p.send_signal(signal.SIGINT)

    # if self.p is defined, then the child process ecasound is recording
    def is_recording(self):
        return self.p is not None

    def upload_file(self, filepath):

        filename = os.path.split(filepath)[1]

        # files is what requests actually expects
        files = {
            "file": open(filepath, "rb"),
            "name": filename,
            "show_instance": self.show_instance,
        }

        self.api_client.upload_recorded_show(files, self.show_instance)

    def set_metadata_and_save(self, filepath):
        """
        Writes song to 'filepath'. Uses metadata from:
            self.start_time, self.show_name, self.show_instance
        """
        try:
            full_date, full_time = self.start_time.split(" ", 1)
            # No idea why we translated - to : before
            # full_time = full_time.replace(":","-")
            logger.info("time: %s" % full_time)
            artist = "Airtime Show Recorder"
            # set some metadata for our file daemon
            recorded_file = mutagen.File(filepath, easy=True)
            recorded_file["artist"] = artist
            recorded_file["date"] = full_date
            recorded_file["title"] = "{}-{}-{}".format(self.show_name,
                                                       full_date, full_time)
            # You cannot pass ints into the metadata of a file. Even tracknumber needs to be a string
            recorded_file["tracknumber"] = self.show_instance
            recorded_file.save()

        except Exception as e:
            top = traceback.format_exc()
            logger.error("Exception: %s", e)
            logger.error("traceback: %s", top)

    def run(self):
        code, filepath = self.record_show()

        if code == 0:
            try:
                logger.info("Preparing to upload %s" % filepath)

                self.set_metadata_and_save(filepath)

                self.upload_file(filepath)
                os.remove(filepath)
            except Exception as e:
                logger.error(e)
        else:
            logger.info("problem recording show")
            os.remove(filepath)
Exemple #53
0
def _run_single_test_linux_perfcollect(t: SingleTest,
                                       out: TestPaths) -> TestRunStatus:

    # TODO: Could launch sudo just for the part it needs?
    # TODO: running in sudo causes output files to only be readable by super user...
    #  A future perfcollect may fix this.
    assert_admin()

    ensure_empty_dir(out.out_path_base)

    cwd = non_null(
        t.coreclr).corerun.parent  # TODO: handle self-contained executables
    env = combine_mappings(
        t.config.with_coreclr(t.coreclr_name).env(
            map_option(t.coreclr, lambda c: c.core_root)),
        {
            "COMPlus_PerfMapEnabled": "1",
            "COMPlus_EnableEventLog": "1"
        },
    )
    cmd: Sequence[str] = _benchmark_command(t)
    print(f"cd {cwd}")
    print(" ".join(cmd))
    test_process = Popen(cmd, cwd=cwd, env=env)

    test_process_pid = test_process.pid
    # launch

    print("PID", test_process_pid)

    # Now launch that thing with the stuff
    perfcollect_cmd = (
        str(_PERFCOLLECT),
        "collect",
        str(out.out_path_base),
        "-gccollectonly",  # TODO: if I pass this, only event I get is EventID(200) ?
        "-pid",
        str(test_process_pid),
    )
    print(" ".join(perfcollect_cmd))
    # TODO: not sure cwd needs to be set...
    perfcollect_process = Popen(perfcollect_cmd, cwd=_PERFCOLLECT.parent)

    print("waiting on test...")

    test_process.wait()

    assert test_process.returncode == 0

    print("sending signal...")

    perfcollect_process.send_signal(SIGINT)

    print("waiting on perfcollect...")

    perfcollect_process.wait()
    assert perfcollect_process.returncode == 0

    print("Closed")

    raise Exception("TODO:finish")
Exemple #54
0
class Pqos(object):
    """
    The Pqos class defines methods to interact with pqos_wrapper cli.
    """

    CAP_SYS_RAWIO = "cap_sys_rawio"

    def __init__(self, show_warnings=False):
        self.reset_required = False
        self.show_warnings = show_warnings
        self.mon_process = None
        self.executable_path = find_executable2("pqos_wrapper")
        if self.executable_path is None:
            if self.show_warnings:
                logging.info(
                    "Unable to find pqos_wrapper, please install it for "
                    "cache allocation and monitoring if your CPU supports Intel RDT "
                    "(cf. https://gitlab.com/sosy-lab/software/pqos-wrapper).")

    def execute_command(self, __type, function, suppress_warning, *args):
        """
        Execute a given pqos_wrapper command and log the output

            @__type: The type of command being executed (monitoring or l3ca)
            @function_name: The name of the function being executed in pqos_wrapper
            @suppress_warning: A boolean to decide wether to print warning on failing execution
        """
        if self.executable_path:
            args_list = [self.executable_path] + list(args)
            try:
                if "-m" in args_list:
                    self.mon_process = Popen(args_list,
                                             stdout=PIPE,
                                             stderr=PIPE)
                else:
                    ret = json.loads(
                        check_output(args_list, stderr=STDOUT).decode())
                    logging.debug(ret[function]["message"])
                return True
            except CalledProcessError as e:
                if self.show_warnings and (not suppress_warning):
                    self.print_error_message(e.output.decode(), __type,
                                             args_list)
        return False

    def print_error_message(self, err, __type, args_list):
        """
        Prints error message returned from pqos_wrapper

            @err: The error output returned by pqos_wrapper
            @__type: The type of command being executed (monitoring or l3ca)
            @args_list: The command being executed as a list
        """
        msg_prefix = {
            "mon": "Could not monitor events",
            "l3ca": "Could not set cache allocation",
        }
        try:
            ret = json.loads(err)
            logging.warning("{0}...{1}".format(msg_prefix[__type],
                                               ret["message"]))
            self.check_for_errors()
        except ValueError:
            logging.warning("{0}...Unable to execute command {1}".format(
                msg_prefix[__type], " ".join(args_list)))

    def check_capacity(self, technology):
        """
        Check if given intel rdt is supported.

            @technology: The intel rdt to be tested
        """
        return self.execute_command(technology, "check_capability", False,
                                    "-c", technology)

    @staticmethod
    def convert_core_list(core_assignment):
        """
        Convert a double list to a string.

            @core_assignment: The double list of cores
        """
        ret = []
        for benchmark in core_assignment:
            ret.append("[" + ",".join(str(core) for core in benchmark) + "]")
        return "[" + ",".join(ret) + "]"

    def allocate_l3ca(self, core_assignment):
        """
        This method checks if L3CAT is available and calls pqos_wrapper to
        allocate equal cache to each thread.

            @core_assignment: The list of cores assigned to each run
        """
        if self.check_capacity("l3ca"):
            core_string = self.convert_core_list(core_assignment)
            if self.execute_command("l3ca", "allocate_resource", False, "-a",
                                    "l3ca", core_string):
                self.reset_required = True
            else:
                self.reset_resources()

    def start_monitoring(self, core_assignment):
        """
        This method checks if monitoring capability is available and calls pqos_wrapper to
        monitor events on given lists of cores.

            @core_assignment: The list of cores assigned to each run
        """
        if self.check_capacity("mon"):
            core_string = self.convert_core_list(core_assignment)
            self.execute_command("mon", "monitor_events", False, "-m",
                                 core_string)

    def stop_monitoring(self):
        """
        This method stops monitoring by sending SIGINT to the monitoring process
        and resets the RMID for monitored cores to 0
        """
        ret = {}
        if self.mon_process:
            self.mon_process.send_signal(SIGINT)
            mon_output = self.mon_process.communicate()
            if self.mon_process.returncode == 0:
                mon_data = json.loads(mon_output[0].decode())
                logging.debug(mon_data["monitor_events"]["message"])
                ret = self.flatten_mon_data(
                    mon_data["monitor_events"]["function_output"]
                    ["monitoring_data"])
            else:
                if self.show_warnings:
                    self.print_error_message(mon_output[1].decode(), "mon",
                                             self.mon_process.args)
            self.mon_process.kill()
            self.mon_process = None
        else:
            if self.show_warnings:
                logging.warning("No monitoring process started")
        return ret

    def reset_monitoring(self):
        """
        Reset monitoring RMID to 0 for all cores
        """
        self.execute_command("mon", "reset_monitoring", True, "-rm")

    @staticmethod
    def flatten_mon_data(mon_data):
        """
        Converts the monitoring data array received from pqos_wrapper
        to a flattened dictionary

            @mon_data: The array of data received from pqos_wrapper monitoring cli
        """
        flatten_dict = {}
        for data in mon_data:
            core_str = ",".join(str(core) for core in data["cores"])
            data.pop("cores", None)
            for key, val in data.items():
                if isinstance(val, dict):
                    for sub_key, sub_val in val.items():
                        if len(mon_data) > 1:
                            flatten_key = "{0}_{1}_cpus{2}".format(
                                key, sub_key, core_str)
                        else:
                            flatten_key = "{0}_{1}".format(key, sub_key)
                        flatten_dict[flatten_key] = sub_val
                else:
                    if len(mon_data) > 1:
                        flatten_key = "{0}_cpus{1}".format(key, core_str)
                    else:
                        flatten_key = key
                    flatten_dict[flatten_key] = val
        return flatten_dict

    def reset_resources(self):
        """
        This method resets all resources to default.
        """
        if self.reset_required:
            self.execute_command("l3ca", "reset_resources", True, "-r")
            self.reset_required = False

    def check_for_errors(self):
        """
        This method logs a detailed error on a failed pqos_error command.
        """
        cap = get_capability(self.executable_path)
        if cap["error"] is False:
            if self.CAP_SYS_RAWIO in cap["capabilities"]:
                if not all(x in cap["set"] for x in ["e", "p"]):
                    logging.warning(
                        "Insufficient capabilities for pqos_wrapper, Please add e,p in cap_sys_rawio capability set of pqos_wrapper"
                    )
            else:
                logging.warning(
                    "Insufficient capabilities for pqos_wrapper, Please set capabilitiy cap_sys_rawio with e,p for pqos_wrapper"
                )
        msr = check_msr()
        if msr["loaded"]:
            current_user = grp.getgrgid(os.getegid()).gr_name
            if msr["read"]:
                if not msr["write"]:
                    logging.warning(
                        "Add write permissions for msr module for {}".format(
                            current_user))
            else:
                logging.warning(
                    "Add read and write permissions for msr module for {}".
                    format(current_user))
        else:
            logging.warning(
                "Load msr module for using cache allocation/monitoring")
Exemple #55
0
    print(server_path)
    print(client_path)
    exitWithCleanUp()

grade = 0
# Judge 1
print('Judge #1')
server = Popen([server_path, server_config])
time.sleep(1)
if not os.path.exists(os.path.join(fifo_dir, 'server_to_client.fifo')) or \
   not os.path.exists(os.path.join(fifo_dir, 'client_to_server.fifo')):
    print('FIFO not exist')
else:
    print('Judge #1 passed. (2pt)')
    grade += 2
server.send_signal(2)
time.sleep(1)

if server.poll() == None:
    print('Server does not terminate after judge #1.')
    #exitWithCleanUp()

# Judge 2
print('Judge #2')
client = Popen([client_path, client_config])
time.sleep(1)
mode = oct(os.stat(client_dir).st_mode)[-3:]
if mode == '000':
    print('Judge #2 passed. (2pt)')
    grade += 2
else:
Exemple #56
0
class VoiceRecognitionManager(TabClass):
    def __init__(self, language):

        self.ipWavServer = "192.168.5.80"  #"audio.openqbo.org"
        self.portWavServer = "80"  #"8588"
        self.language = language
        self.juliusPath = roslib.packages.get_pkg_dir("qbo_listen")
        self.juliusAMPath = "/usr/share/qbo-julius-model/"
        self.htmlTemplate = Template(
            filename='voiceRecognition/templates/voiceRecognitionTemplate.html'
        )
        self.jsTemplate = Template(
            filename='voiceRecognition/templates/voiceRecognitionTemplate.js')
        self.tmpdir = "/tmp/"
        self.LMPaths = "/config/LM/"
        self.LMFileName = "/sentences.conf"
        self.PhonemsFileName = "/phonems"
        self.TiedlistFileName = "/tiedlist"
        self.languages_names = {
            'en': 'English',
            'es': 'Spanish',
            'pt': 'Português',
            'de': 'Deutsch',
            'fr': 'Français',
            'it': 'Italiano'
        }
        self.path = roslib.packages.get_pkg_dir(
            "qbo_webi") + "/src/voiceRecognition/"
        self.lan = self.language["current_language"]
        self.mac = get_mac()
        self.p = None

    @cherrypy.expose
    def voiceRecognitionJs(self, parameters=None):
        self.lan = self.language["current_language"]
        return self.jsTemplate.render(language=self.language)

    def getLanguages(self):
        try:
            dirList = os.listdir(self.juliusPath + self.LMPaths)
            dirList.sort()
        except:
            dirList = -1
        return dirList

    def isQboListenInstalled(self):
        if self.getLanguages() == -1:
            return False
        else:
            return True

    def getLanguageModels(self, language):
        try:
            dirList = os.listdir(self.juliusPath + self.LMPaths + language)
            dirList.sort()
        except:
            dirList = -1
        return dirList

    def getLMSentences(self, language, model):
        try:
            f = open(
                self.juliusPath + self.LMPaths + language + "/" + model +
                self.LMFileName, 'r')
            return f.read()
        except:
            sentences = ""
        return sentences

    @cherrypy.expose
    def getModels(self, lang):
        modelList = ""
        try:
            dirList = os.listdir(self.juliusPath + self.LMPaths + lang)
            dirList.sort()
            for model in dirList:
                modelList = modelList + model + "::"
            modelList = modelList[:-2]
        except:
            modelList = -1
        return modelList

    @cherrypy.expose
    def test1(self, lang, text):
        text = text.encode("utf-8")
        f = open(self.tmpdir + 'LModel', 'w')
        f.write(text)
        f.close()
        words = gen_grammar.verrors(
            self.tmpdir + 'LModel',
            self.juliusAMPath + lang + "/" + self.PhonemsFileName)
        if words == 0:
            return ""
        else:
            wordsList = ""
            for word in words:
                wordsList = wordsList + word + "::"
                print word
                wordsList = wordsList[:-2]
            return wordsList

    @cherrypy.expose
    def test2(self, lang, text):
        errorlist = ""
        text = text.encode("utf-8")
        print text
        wordlist = text.split()
        print wordlist
        for word in wordlist:
            if word[0] != "[" and word[0] != "<":
                print word
                f = open(self.tmpdir + 'word', 'w')
                f.write("[sentence]\n")
                f.write(word)
                f.close()
                gen_grammar.createvoca(
                    self.tmpdir + 'word',
                    self.juliusAMPath + lang + "/" + self.PhonemsFileName,
                    self.tmpdir + 'word')
                print self.tmpdir + 'word'
                print self.juliusAMPath + lang + "/" + self.TiedlistFileName
                if gen_grammar.perrors(
                        self.tmpdir + 'word.voca', self.juliusAMPath + lang +
                        "/" + self.TiedlistFileName) != 0:
                    errorlist = errorlist + word + "::"
        errorlist = errorlist[:-2]
        return errorlist.upper()

    @cherrypy.expose
    def saveToFile(self, lang, text, model):
        #try:
        print self.juliusPath + self.LMPaths + lang + "/" + model + self.LMFileName
        text = text.encode("utf-8")
        f = open(
            self.juliusPath + self.LMPaths + lang + "/" + model +
            self.LMFileName, 'w')
        text11 = " will save to file!!!\n"
        print text11
        f.write(text)
        f.close()
        gen_grammar.compilegrammar(model, lang)
        #subprocess.Popen("roslaunch qbo_listen voice_recognizer.launch".split())
        #except:
        #		return "ERROR: Cant write the file"
        return ""

    @cherrypy.expose
    def getFile(self, lang="", model=""):
        if lang == "" or model == "":
            return "ERROR: lang:" + lang + "; model:" + model
        else:
            #print self.getLMSentences(lang,model)
            return self.getLMSentences(lang, model)

    @cherrypy.expose
    def index(self):
        tmp = ""
        if self.isQboListenInstalled():
            for lang in self.getLanguages():
                for LM in self.getLanguageModels(lang):
                    text = self.getLMSentences(lang, LM)
                    break
                break

            return self.htmlTemplate.render(language=self.language,
                                            lannames=self.languages_names,
                                            alllanguage=self.getLanguages())
        else:
            return "Qbo listen not installed"
#        return self.htmlTemplate.render(language=self.language)

    @cherrypy.expose
    def rec(self):

        #   n = self.getLenght("Arturo","sp")
        #   print "***** "+n

        #Borramos la anterior grabacion, si la habia
        try:
            cmd = "rm " + self.path + "tmp/*"
            self.p = Popen(cmd.split())
        except ValueError:
            print "Nada que borrar"
        '''
        try:    
            cmd="rm "+self.path+"/*_en"
            self.p = Popen(cmd.split())
        except ValueError:
            print "Nada que borrar"

        try:
            cmd="rm "+path+"/*sp"
            print cmd
            self.p = Popen(cmd.split())

        except ValueError:
            print "Nada que borrar"
        '''

        self.filename = str(self.mac) + "_" + self.lan
        #filename = filename.replace("\"","")

        #   filename = "tmp.wav"

        print "FILENAME == " + self.filename

        print "grabnando!!!! " + self.path + "tmp/" + self.filename
        cmd = "arecord -f S16_LE  -r 44100 -c 1 " + self.path + "tmp/" + self.filename

        self.p = Popen(cmd.split())

        name = "oleole"
        return name

    @cherrypy.expose
    def stop(self):

        if (self.p == None):
            print "P ES NULL!!??"
        else:
            print "matar grabacin"
            self.p.send_signal(signal.SIGINT)

        cmd = "python " + self.path + "sendWav2Server.py " + self.path + "tmp/" + self.filename + " " + self.ipWavServer + " " + self.portWavServer
        print cmd
        out = runCmd(cmd)

        print out[0]

        if out[1] != "":
            print "Error"
            return "error"

        return unicode(out[0], 'utf8')

    @cherrypy.expose
    def play(self):
        print "play sound"

        os.system('aplay ' + self.path + "tmp/" + self.filename)

        return "ok"

    @cherrypy.expose
    def save(self, transcripcion):
        print "SAVE! transcripcion=" + transcripcion

        cmd = "python " + self.path + "sendTranscription2Server.py " + str(
            self.mac
        ) + " \"" + transcripcion + "\" " + self.lan + " " + self.ipWavServer + " " + self.portWavServer
        print cmd
        out = runCmd(cmd)

        if out[1] != "":
            print "Error " + out[1]
            return "error"

        return out[0]
Exemple #57
0
class MITMProxy:
    """Manager for MITM Proxy and the mock file structure.

    Attributes:
        demisto_api_key: API key for demisto API.
        public_ip (string): The IP of the AMI instance.
        repo_folder (string): path to the local clone of the content-test-data git repo.
        tmp_folder (string): path to a temporary folder for log/mock files before pushing to git.
        current_folder (string): the current folder to use for mock/log files.
        ami (AMIConnection): Wrapper for AMI communication.
        process (Popen): object representation of the Proxy process (used to track the proxy process status).
        empty_files (list): List of playbooks that have empty mock files (indicating no usage of mock mechanism).
        rerecorded_tests (list): List of playbook ids that failed on mock playback but succeeded on new recording.
        debug (bool): enable debug prints - redirect.
    """

    PROXY_PORT = '9997'
    MOCKS_TMP_PATH = '/tmp/Mocks/'
    MOCKS_GIT_PATH = 'content-test-data/'

    def __init__(self,
                 public_ip,
                 repo_folder=MOCKS_GIT_PATH,
                 tmp_folder=MOCKS_TMP_PATH,
                 debug=False):
        self.public_ip = public_ip
        self.current_folder = self.repo_folder = repo_folder
        self.tmp_folder = tmp_folder
        self.debug = debug

        self.ami = AMIConnection(self.public_ip)

        self.process = None
        self.empty_files = []
        self.failed_tests_count = 0
        self.successful_tests_count = 0
        self.successful_rerecord_count = 0
        self.failed_rerecord_count = 0
        self.failed_rerecord_tests = []
        self.rerecorded_tests = []
        silence_output(self.ami.call, ['mkdir', '-p', tmp_folder],
                       stderr='null')

    @staticmethod
    def configure_proxy_in_demisto(username, password, server, proxy=''):
        client = demisto_client.configure(base_url=server,
                                          username=username,
                                          password=password,
                                          verify_ssl=False)

        system_conf_response = demisto_client.generic_request_func(
            self=client, path='/system/config', method='GET')
        system_conf = ast.literal_eval(system_conf_response[0]).get(
            'sysConf', {})

        http_proxy = https_proxy = proxy
        if proxy:
            http_proxy = 'http://' + proxy
            https_proxy = 'http://' + proxy
        system_conf.update({
            'http_proxy': http_proxy,
            'https_proxy': https_proxy
        })
        data = {'data': system_conf, 'version': -1}
        response = demisto_client.generic_request_func(self=client,
                                                       path='/system/config',
                                                       method='POST',
                                                       body=data)

        return response

    def get_mock_file_size(self, filepath):
        return self.ami.check_output(['stat', '-c', '%s', filepath]).strip()

    def has_mock_file(self, playbook_id):
        command = [
            "[", "-f",
            os.path.join(self.current_folder, get_mock_file_path(playbook_id)),
            "]"
        ]
        return self.ami.call(command) == 0

    def has_mock_folder(self, playbook_id):
        command = [
            "[", "-d",
            os.path.join(self.current_folder, get_folder_path(playbook_id)),
            "]"
        ]
        return self.ami.call(command) == 0

    def set_repo_folder(self):
        """Set the repo folder as the current folder (the one used to store mock and log files)."""
        self.current_folder = self.repo_folder

    def set_tmp_folder(self):
        """Set the temp folder as the current folder (the one used to store mock and log files)."""
        self.current_folder = self.tmp_folder

    def move_mock_file_to_repo(self,
                               playbook_id,
                               thread_index=0,
                               prints_manager=None):
        """Move the mock and log files of a (successful) test playbook run from the temp folder to the repo folder

        Args:
            playbook_id (string): ID of the test playbook of which the files should be moved.
            thread_index (int): Index of the relevant thread, to make printing readable.
            prints_manager (ParallelPrintsManager): Prints manager to synchronize parallel prints.
        """
        src_filepath = os.path.join(self.tmp_folder,
                                    get_mock_file_path(playbook_id))
        src_files = os.path.join(self.tmp_folder,
                                 get_folder_path(playbook_id) + '*')
        dst_folder = os.path.join(self.repo_folder,
                                  get_folder_path(playbook_id))

        if not self.has_mock_file(playbook_id):
            prints_manager.add_print_job('Mock file not created!', print,
                                         thread_index)
        elif self.get_mock_file_size(src_filepath) == '0':
            prints_manager.add_print_job('Mock file is empty, ignoring.',
                                         print, thread_index)
            self.empty_files.append(playbook_id)
        else:
            # Move to repo folder
            prints_manager.add_print_job(
                'Moving "{}" files to "{}" directory'.format(
                    src_files, dst_folder), print, thread_index)
            self.ami.call(['mkdir', '--parents', dst_folder])
            self.ami.call(['mv', src_files, dst_folder])

    def clean_mock_file(self,
                        playbook_id,
                        path=None,
                        thread_index=0,
                        prints_manager=None):
        prints_manager.add_print_job(
            f'clean_mock_file was called for test "{playbook_id}"', print,
            thread_index)
        path = path or self.current_folder
        problem_keys_filepath = os.path.join(path,
                                             get_folder_path(playbook_id),
                                             'problematic_keys.json')
        prints_manager.add_print_job(
            f'problem_keys_filepath="{problem_keys_filepath}"', print,
            thread_index)
        problem_key_file_exists = ["[", "-f", problem_keys_filepath, "]"]
        if not self.ami.call(problem_key_file_exists) == 0:
            err_msg = 'Error: The problematic_keys.json file was not written to the file path' \
                      ' "{}" when recording the "{}" test playbook'.format(problem_keys_filepath, playbook_id)
            prints_manager.add_print_job(err_msg, print_error, thread_index)
            return
        problem_keys = json.loads(
            self.ami.check_output(['cat', problem_keys_filepath]))

        # is there data in problematic_keys.json that needs whitewashing?
        prints_manager.add_print_job('checking if there is data to whitewash',
                                     print, thread_index)
        needs_whitewashing = False
        for val in problem_keys.values():
            if val:
                needs_whitewashing = True
                break

        if problem_keys and needs_whitewashing:
            mock_file_path = os.path.join(path,
                                          get_mock_file_path(playbook_id))
            cleaned_mock_filepath = mock_file_path.strip(
                '.mock') + '_cleaned.mock'
            # rewrite mock file with problematic key values replaced
            command = 'mitmdump -ns ~/timestamp_replacer.py '
            log_file = os.path.join(
                path, get_log_file_path(playbook_id, record=True))
            # Handle proxy log output
            debug_opt = f' >>{log_file} 2>&1' if not self.debug else ''
            options = f'--set script_mode=clean --set keys_filepath={problem_keys_filepath}'
            if options.strip():
                command += options
            command += ' -r {} -w {}{}'.format(mock_file_path,
                                               cleaned_mock_filepath,
                                               debug_opt)
            command = "source .bash_profile && {}".format(command)
            prints_manager.add_print_job(
                f'command to clean mockfile:\n\t{command}', print,
                thread_index)
            split_command = command.split()
            prints_manager.add_print_job(
                'Let\'s try and clean the mockfile from timestamp data!',
                print, thread_index)
            try:
                check_output(self.ami.add_ssh_prefix(split_command,
                                                     ssh_options='-t'),
                             stderr=STDOUT)
            except CalledProcessError as e:
                cleaning_err_msg = 'There may have been a problem when filtering timestamp data from the mock file.'
                prints_manager.add_print_job(cleaning_err_msg, print_error,
                                             thread_index)
                err_msg = f'command `{command}` exited with return code [{e.returncode}]'
                err_msg = f'{err_msg} and the output of "{e.output}"' if e.output else err_msg
                if e.stderr:
                    err_msg += f'STDERR: {e.stderr}'
                prints_manager.add_print_job(err_msg, print_error,
                                             thread_index)
            else:
                prints_manager.add_print_job('Success!', print_color,
                                             thread_index, LOG_COLORS.GREEN)

            # verify cleaned mock is different than original
            verify_diff_start_msg = 'verifying cleaned mock file is different than the original mock file'
            prints_manager.add_print_job(verify_diff_start_msg, print,
                                         thread_index)
            diff_cmd = 'diff -sq {} {}'.format(cleaned_mock_filepath,
                                               mock_file_path)
            try:
                diff_cmd_output = self.ami.check_output(
                    diff_cmd.split()).decode().strip()
                prints_manager.add_print_job(
                    f'diff_cmd_output={diff_cmd_output}', print, thread_index)
                if diff_cmd_output.endswith('are identical'):
                    identical_msg = 'cleaned mock file and original mock file are identical... ' \
                                    'uh oh looks like cleaning didn\'t work properly'
                    prints_manager.add_print_job(identical_msg, print_warning,
                                                 thread_index)
                else:
                    prints_manager.add_print_job(
                        'looks like the cleaning process did something!',
                        print_color, thread_index, LOG_COLORS.GREEN)

            except CalledProcessError as e:
                err_msg = 'command `{}` exited with return code [{}]'.format(
                    diff_cmd, e.returncode)
                err_msg = '{} and the output of "{}"'.format(
                    err_msg, e.output) if e.output else err_msg
                prints_manager.add_print_job(err_msg, print_error,
                                             thread_index)

            prints_manager.add_print_job('Replace old mock with cleaned one.',
                                         print, thread_index)
            mv_cmd = 'mv {} {}'.format(cleaned_mock_filepath, mock_file_path)
            self.ami.call(mv_cmd.split())
        else:
            empty_msg = '"problematic_keys.json" dictionary values were empty - ' \
                        'no data to whitewash from the mock file.'
            prints_manager.add_print_job(empty_msg, print, thread_index)

    def start(self,
              playbook_id,
              path=None,
              record=False,
              thread_index=0,
              prints_manager=None):
        """Start the proxy process and direct traffic through it.

        Args:
            playbook_id (string): ID of the test playbook to run.
            path (string): path override for the mock/log files.
            record (bool): Select proxy mode (record/playback)
            thread_index (int): Index of the relevant thread, to make printing readable.
            prints_manager (ParallelPrintsManager): Prints manager to synchronize parallel prints.
        """
        if self.process:
            raise Exception("Cannot start proxy - already running.")

        path = path or self.current_folder

        # Create mock files directory
        silence_output(
            self.ami.call,
            ['mkdir',
             os.path.join(path, get_folder_path(playbook_id))],
            stderr='null')

        repo_problem_keys_filepath = os.path.join(self.repo_folder,
                                                  get_folder_path(playbook_id),
                                                  'problematic_keys.json')
        current_problem_keys_filepath = os.path.join(
            path, get_folder_path(playbook_id), 'problematic_keys.json')

        # when recording, copy the `problematic_keys.json` for the test to current temporary directory if it exists
        # that way previously recorded or manually added keys will only be added upon and not wiped with an overwrite
        if record:
            silence_output(self.ami.call, [
                'mv', repo_problem_keys_filepath, current_problem_keys_filepath
            ],
                           stdout='null')

        script_filepath = os.path.join(
            os.path.dirname(os.path.abspath(__file__)),
            'timestamp_replacer.py')
        remote_script_path = self.ami.copy_file(script_filepath)

        # if recording
        # record with detect_timestamps and then rewrite mock file
        if record:
            actions = '-s {} '.format(remote_script_path)
            actions += '--set script_mode=record '
            actions += '--set detect_timestamps=true --set keys_filepath={} --save-stream-file'.format(
                current_problem_keys_filepath)
        else:
            actions = '-s {} '.format(remote_script_path)
            actions += '--set script_mode=playback '
            actions += '--set keys_filepath={} --server-replay-kill-extra --server-replay'.format(
                repo_problem_keys_filepath)

        log_file = os.path.join(path, get_log_file_path(playbook_id, record))
        # Handle proxy log output
        debug_opt = " >{} 2>&1".format(log_file) if not self.debug else ''

        # all mitmproxy/mitmdump commands should have the 'source .bash_profile && ' prefix to ensure the PATH
        # is correct when executing the command over ssh
        # Configure proxy server
        command = "source .bash_profile && mitmdump --ssl-insecure --verbose --listen-port {} {} {}{}".format(
            self.PROXY_PORT, actions,
            os.path.join(path, get_mock_file_path(playbook_id)), debug_opt)
        command = command.split()

        # Start proxy server
        self.process = Popen(self.ami.add_ssh_prefix(command, "-t"),
                             stdout=PIPE,
                             stderr=PIPE)
        self.process.poll()
        if self.process.returncode is not None:
            raise Exception(
                "Proxy process terminated unexpectedly.\nExit code: {}\noutputs:\nSTDOUT\n{}\n\nSTDERR\n{}"
                .format(self.process.returncode, self.process.stdout.read(),
                        self.process.stderr.read()))
        log_file_exists = False
        seconds_since_init = 0
        # Make sure process is up and running
        while not log_file_exists and seconds_since_init < PROXY_PROCESS_INIT_TIMEOUT:
            # Check if log file exist
            log_file_exists = silence_output(self.ami.call, ['ls', log_file],
                                             stdout='null',
                                             stderr='null') == 0
            time.sleep(PROXY_PROCESS_INIT_INTERVAL)
            seconds_since_init += PROXY_PROCESS_INIT_INTERVAL
        if not log_file_exists:
            self.stop(thread_index, prints_manager)
            raise Exception("Proxy process took to long to go up.")
        proxy_up_message = 'Proxy process up and running. Took {} seconds'.format(
            seconds_since_init)
        prints_manager.add_print_job(proxy_up_message, print, thread_index)

        # verify that mitmdump process is listening on port 9997
        try:
            prints_manager.add_print_job(
                'verifying that mitmdump is listening on port 9997', print,
                thread_index)
            lsof_cmd = ['sudo', 'lsof', '-iTCP:9997', '-sTCP:LISTEN']
            lsof_cmd_output = self.ami.check_output(lsof_cmd).decode().strip()
            prints_manager.add_print_job(f'lsof_cmd_output={lsof_cmd_output}',
                                         print, thread_index)
        except CalledProcessError as e:
            cleaning_err_msg = 'No process listening on port 9997'
            prints_manager.add_print_job(cleaning_err_msg, print_error,
                                         thread_index)
            err_msg = f'command `{command}` exited with return code [{e.returncode}]'
            err_msg += f'{err_msg} and the output of "{e.output}"' if e.output else err_msg
            prints_manager.add_print_job(err_msg, print_error, thread_index)

    def stop(self, thread_index=0, prints_manager=None):
        if not self.process:
            raise Exception("Cannot stop proxy - not running.")

        prints_manager.add_print_job('proxy.stop() was called', print,
                                     thread_index)
        self.process.send_signal(signal.SIGINT)  # Terminate proxy process
        self.ami.call(["rm", "-rf", "/tmp/_MEI*"])  # Clean up temp files

        # Handle logs
        if self.debug:
            prints_manager.add_print_job('proxy outputs:', print, thread_index)
            prints_manager.add_print_job(f'{self.process.stdout.read()}',
                                         print, thread_index)
            prints_manager.add_print_job(f'{self.process.stderr.read()}',
                                         print, thread_index)

        self.process = None
Exemple #58
0
class APLauncher(object):

    def __init__(self, hostapd_config_path):
        self.hostapd_config_path = hostapd_config_path

        self.ap_running = False
        self.ap_process = None
        self.credential_printer = None

        self.connected_clients_updator = None
        self.connected_clients = {}  # interface: client_list

        self.hostapd_output_parser = None

        self.file_handler = None
        self.print_creds = False

    def write_hostapd_configurations(self,  interface="wlan0",
                                            ssid=None,
                                            bssid=None,
                                            channel="1",
                                            hw_mode="g",
                                            encryption="OPN",
                                            auth="PSK",
                                            cipher="CCMP",
                                            password=None,
                                            catch_all_honeypot=False):
        """
        This method writes the configuration file of hostapd.

        According to the inputs the configuration file is written is different ways.
        One can use multiple SSIDs in order to launche more than one access point.
        If the 'catch_all_honeypot' flag is set to 'True' then the first SSID is chosen
        as the "catch all honeypot" SSID.
        """
        self.cleanup()

        try:
            self.file_handler = FileHandler(self.hostapd_config_path, backup=False)
        except Exception as e:
            print e
            return False

        ssids       = None
        encryptions = None
        auths       = None
        passwords   = None

        ssids, encryptions, auths, passwords = self._parse_configs(ssid, encryption, auth, password)
        if ssids:
            ssid, encryption, auth, password = ssids[0], encryptions[0], auths[0], passwords[0]

        configurations = dedent("""
                                interface={interface}
                                ssid={ssid}
                                driver=nl80211
                                channel={channel}
                                hw_mode={hw_mode}
                                ignore_broadcast_ssid=0
                                """.format( interface=interface,
                                            ssid=ssid,
                                            channel=channel,
                                            hw_mode=hw_mode))

        if catch_all_honeypot:
            configurations = self._get_catch_all_honeypot_configurations(configurations, interface, ssid, bssid)
        else:
            configurations = self._get_multiple_ssid_configurations( configurations, interface, bssid, ssids,
                                                                    encryptions, auths, cipher, passwords)

        self.file_handler.write(configurations)
        return True

    def _get_catch_all_honeypot_configurations(self, configurations, interface, ssid, bssid):
        """This method writes 'catch-all' honeypot configurations on the hostapd configuration file."""
        # hostapd can automatically create sub bssids if last bytes are set to 0
        if bssid:
            bssid = bssid[:-1] + "0"
            configurations += "bssid={bssid}\n".format(bssid=bssid) + "\n"

        # Adding WEP configurations
        configurations += "bss={}_0\n".format(interface)
        configurations += "ssid={}\n".format(ssid)
        configurations += self._get_wep_configurations("12345") + "\n"

        # Adding WPA-PSK configurations
        configurations += "bss={}_1\n".format(interface)
        configurations += "ssid={}\n".format(ssid)
        configurations += self._get_wpa_configurations("wpa/wpa2", "PSK", "CCMP", "12345678") + "\n"

        # Adding WPA-EAP configurations
        configurations += "bss={}_2\n".format(interface)
        configurations += "ssid={}\n".format(ssid)
        configurations += self._get_wpa_configurations("wpa/wpa2", "EAP", "CCMP", None) + "\n"

        return configurations

    def _get_multiple_ssid_configurations(self,  configurations,
                                                interface,
                                                bssid=None,
                                                ssids=[],
                                                encryptions=[],
                                                auths=[],
                                                cipher="CCMP",
                                                passwords=[]):
        """This method adds configuration lines for multiple netowrks to the configuration string."""
        # hostapd can automatically create sub bssids if last bytes are set to 0
        if bssid:
            bssid = bssid[:-1] + "0"
            configurations += "bssid={bssid}\n".format(bssid=bssid) + "\n"

        ssid_encryption = []
        if len(ssids) == len(encryptions) == len(auths) == len(passwords):
            ssid_encryption = zip(ssids, encryptions, auths, passwords)
        else:
            for ssid in ssids:
                ssid_encryption.append((ssid, encryptions[0], auths[0], passwords[0]))

        if ssid_encryption:
            counter = 0
            for ssid, encryption, auth, password in ssid_encryption:

                if counter > 0:
                    configurations += "bss={}_{}\n".format(interface, counter - 1)
                    configurations += "ssid={}\n".format(ssid)

                if "opn" in encryption.lower():
                    configurations += "\n"
                elif "wep" in encryption.lower():
                    valid_lengths = [5, 10, 13, 16, 23]
                    if len(password) not in valid_lengths:
                        print   "[-] Invalid WEP key length for multiple ssid hotspot: '{}'\
                                \nDefaulting WEP key to '12345'".format(password)
                        password = "******"

                    configurations += self._get_wep_configurations(password) + "\n"

                elif "wpa" in encryption.lower():
                    if not len(password) >= 8 and "eap" not in auth.lower():
                        print "[-] Invalid WPA key length: '{}'\nDefaulting to '12346578'".format(password)
                        password = "******"

                    # Forcing correct configuration so it doesnt come out WPA-WEP or something...
                    if auth.lower() not in ["psk", "eap"]:
                        auth = "PSK"

                    configurations += self._get_wpa_configurations(encryption, auth, cipher, password) + "\n"
                counter += 1

        return configurations

    def _get_wep_configurations(self, password):
        configurations = ""
        if (len(password) == 5 or len(password) == 13 or len(password) == 16):
            configurations += "wep_default_key=0\n"
            configurations += "wep_key0=\"{key}\"".format(key=password)
        elif (len(password) == 10 or len(password) == 23):
            configurations += "wep_default_key=0\n"
            configurations += "wep_key0={key}\n".format(key=password)
        else:
            error_msg = "WEP key must be either 5, 13, 16 ascii charachters or 10 or 23 HEX charachters.\n"
            raise InvalidConfigurationException(error_msg)

        return configurations

    def _get_wpa_configurations(self, encryption, auth, cipher, password):
        configurations = ""
        wpa_int = 1
        # Check if input is 'wpa/wpa2'
        if "/" in encryption:
            encryption = encryption.split("/")
            # wpa=1 -> wpa, wpa=2 -> wpa2, wpa=3 -> wpa/wpa2
            if "wpa2" in encryption:
                wpa_int += 1
            if "wpa" in encryption and "wpa2" in encryption:
                wpa_int += 1
        elif encryption == "wpa2":
            wpa_int += 1

        configurations += "wpa={wpa_int}\n".format(wpa_int=wpa_int)                 # configure wpa or wpa2
        configurations += "wpa_key_mgmt=WPA-{auth}\n".format(auth=auth.upper())     # authentication method: PSK or EAP
        configurations += "wpa_pairwise={cipher}\n".format(cipher=cipher.upper())   # cipher: CCMP or TKIP

        if auth.lower() == "eap":
            configurations += self._get_wpa_eap_configurations()
        else:
            configurations += self._get_and_check_wpa_password_configurations(configurations, password)

        return configurations

    def _get_wpa_eap_configurations(self):
        configurations =  "eap_user_file=/etc/hostapd-wpe/hostapd-wpe.eap_user\n"
        configurations += "ca_cert=/etc/hostapd-wpe//certs/certnew.cer\n"
        configurations += "server_cert=/etc/hostapd-wpe/certs/server.crt\n"
        configurations += "private_key=/etc/hostapd-wpe/certs/server.pem\n"
        configurations += "private_key_passwd=whatever\n"
        configurations += "dh_file=/etc/hostapd-wpe/certs/dh\n"
        configurations += "eap_server=1\n"
        configurations += "eap_fast_a_id=101112131415161718191a1b1c1d1e1f\n"
        configurations += "eap_fast_a_id_info=hostapd-wpe\n"
        configurations += "eap_fast_prov=3\n"
        configurations += "ieee8021x=1\n"
        configurations += "pac_key_lifetime=604800\n"
        configurations += "pac_key_refresh_time=86400\n"
        configurations += "pac_opaque_encr_key=000102030405060708090a0b0c0d0e0f\n"
        configurations += "auth_algs=3\n"
        return configurations

    def _get_and_check_wpa_password_configurations(self, configurations, password):
        if password is None:
            raise InvalidConfigurationException("Must specify a password when choosing wpa or wpa2 encryption!\n")
        if len(password) < 8 or len(password) > 63:
            raise InvalidConfigurationException("Specified password must have at least 8 printable charachters \
                                                and a maximum of 63\n")
        configurations = "wpa_passphrase={password}\n".format(password=password)   # password minimum is 8 digits
        return configurations

    def _parse_configs(self, ssid, encryption, auth, password):
        ssids, encryptions, auths, passwords = None, None, None, None
        if type(ssid) is list:
            ssids = ssid    # Be aware of multiple ssids
        elif type(ssid) is str:
            ssids = [ssid]

        # Specify encryption for each ssid,
        # if encryption list is different size from ssid list then it defaults to the first one
        if type(encryption) is list:
            if len(encryption) == len(ssids):
                encryptions = encryption
            else:
                encryptions = [encryption[0]]
        elif type(encryption) is str:
            encryptions = [encryption]

        # Same thing for auth suites and passwords
        if type(auth) is list:
            if len(auth) == len(encryptions):
                auths = auth
            else:
                auths = [auth[0]]

        elif type(auth) is str:
            auths = [auth]

        if type(password) is list:
            if len(password) == len(auths):
                passwords = password
            else:
                passwords = [password[0]]

        elif type(auth) is str:
            passwords = [password]

        return ssids, encryptions, auths, passwords

    def _async_cred_logging(self, log_file_path, print_creds=False):
        """This method checks for credentials in hostapd-wpe output."""
        log_file = None
        file_open = False
        output_lines = iter(self.ap_process.stdout.readline, "")
        incoming_cred = False
        username = ""
        password = ""
        jtr_challenge_response = ""
        for line in output_lines:
            if "username:"******"username:"******"a")
                file_open = True

            if "password:"******"password:"******":" + password + "\n"
                log_file.write(cred_string)
                if print_creds:
                    print cred_string

            if "NETNTLM:" in line:
                incoming_cred = False
                jtr_challenge_response = line.split("NETNTLM:")[-1].strip()
                log_file.write(jtr_challenge_response + "\n")
                if print_creds:
                    print jtr_challenge_response + "\n"

            if file_open and not incoming_cred:
                log_file.close()

        try:
            self.ap_process.stdout.close()
            log_file.close()
        except: pass

    def start_access_point(self, interface):
        """
        This method launches the preconfigures hostapd background process.

        It launches hostapd-wpe background process with Popen and sends its
        output to a thread which looks for found credentials.
        Another thread is started to monitor the connected client list.
        """
        print "[+] Starting hostapd background process"
        self.ap_process = Popen("hostapd-wpe -s {config_path}".format(config_path=self.hostapd_config_path).split(),
                                stdout=PIPE,
                                stderr=PIPE,
                                universal_newlines=True)
        self.hostapd_output_parser = Thread(target=self._async_cred_logging,
                                            args=("./data/hashes/eap_hashes{}.log".format(self._count_hash_captures()),
                                                    self.print_creds))
        self.hostapd_output_parser.start()

        self.ap_running = True
        sleep(1)
        self.connected_clients_updator = Thread(target=self._update_connected_clients, args=(interface, ))
        self.connected_clients_updator.start()

    def stop_access_point(self, wait = True):
        """
        This method stops the previously created access point.

        It sends SIGINT to the background hostapd process and turns off dnsmasq.
        """
        if self.ap_process is not None:
            print "[+] Killing hostapd background process"
            self.ap_process.send_signal(9)  # Send SIGINT to process running hostapd
            self.ap_process = None

        os.system('pkill hostapd-wpe')      # Cleanup
        self.ap_running = False
        if self.connected_clients_updator is not None:
            if wait:
                self.connected_clients_updator.join()
            self.connected_clients_updator = None

    def cleanup(self):
        """Global cleanup method."""
        if self.file_handler:
            self.file_handler.restore_file()
            self.file_handler = None

    def get_connected_clients(self):
        """Returns the list of client objects that are connected to the access point."""
        return [client for ssid in self.connected_clients.keys() for client in self.connected_clients[ssid]]

    def _update_connected_clients(self, interface):
        fail_count = 0
        while self.ap_running:
            # Gets virtual interfaces too because their name is same as ap_interface with _<index> appended
            ap_interfaces = [iface for iface in pyw.winterfaces() if interface in iface]
            for ap_interface in ap_interfaces:
                if not self._parse_connected_clients(ap_interface):
                    fail_count += 1

                if fail_count > 5:
                    print "[-] hostapd was unable to start the access point,"
                    print "check configuration file or try restarting. Stopping now."
                    self.stop_access_point(wait = False)
                    print "stop airhost manually to stop other services"
                    break

            sleep(3)

    def _parse_connected_clients(self, interface):
        try:
            if not pyw.modeget(pyw.getcard(interface)) == 'AP':
                print "[-] '{}' is not on AP mode".format(interface)
                return False
        except Exception:
            return False

        client_dump = check_output("iw dev {} station dump".format(interface).split()).split('Station')
        client_dump = [ map(str.strip, client.split("\n")) for client in client_dump if interface in client ]
        ssid = NetUtils().get_ssid_from_interface(interface)
        temp_clients = []
        # At this point a client is a list of arguments to be parsed
        for client in client_dump:
            client_mac                  = client[0].split()[0].strip()
            client_name, client_ip      = NetUtils().get_ip_from_mac(interface, client_mac)
            inactivity_time             = client[1].split(":")[1].strip()
            rx_packets                  = client[3].split(":")[1].strip()
            tx_packets                  = client[5].split(":")[1].strip()
            signal                      = client[8].split(":")[1].strip()
            tx_bitrate                  = client[10].split(":")[1].strip()
            rx_bitrate                  = client[11].split(":")[1].strip()
            id = len(temp_clients)

            client = Client(id, client_name, client_mac, client_ip, ssid, inactivity_time,
                            rx_packets, tx_packets, rx_bitrate, tx_bitrate, signal)

            try:
                if client not in self.connected_clients[interface]:
                    print "[+] New connected client on '{ssid}'-> ip: {ip}, mac: {mac} ({vendor})".\
                        format(ssid=ssid, ip=client_ip, mac=client_mac, vendor=client.vendor)
            except: pass

            temp_clients.append(client)

        self.connected_clients[interface] = temp_clients
        return True

    def _count_hash_captures(self):
        return len(os.listdir("data/hashes/"))
Exemple #59
0
class simics(object):
    error_messages = [
        'Address not mapped', 'Illegal Instruction', 'Illegal instruction',
        'Illegal memory mapping', 'Illegal Memory Mapping',
        'Error setting attribute', 'dropping memop (peer attribute not set)',
        'where nothing is mapped', 'Error'
    ]

    def __init__(self, database, options):
        self.simics = None
        self.dut = None
        self.aux = None
        self.running = False
        self.db = database
        self.options = options
        if database.campaign.architecture == 'p2020':
            self.board = 'p2020rdb'
        elif database.campaign.architecture == 'a9':
            self.board = 'a9x2'
        self.set_targets()

    def __str__(self):
        return 'Simics simulation of {}'.format(self.board)

    def set_targets(self):
        if hasattr(self.options, 'selected_targets'):
            selected_targets = self.options.selected_targets
        else:
            selected_targets = None
        if hasattr(self.options, 'selected_registers'):
            selected_registers = self.options.selected_registers
        else:
            selected_registers = None
        if self.db.campaign.architecture == 'p2020':
            self.targets = get_targets('p2020', 'simics', selected_targets,
                                       selected_registers)
        elif self.db.campaign.architecture == 'a9':
            self.targets = get_targets('a9', 'simics', selected_targets,
                                       selected_registers)

    def launch_simics(self, checkpoint=None):
        cwd = '{}/simics-workspace'.format(getcwd())
        attempts = 10
        for attempt in range(attempts):
            self.simics = Popen(
                ['{}/simics'.format(cwd), '-no-win', '-no-gui', '-q'],
                bufsize=0,
                cwd=cwd,
                universal_newlines=True,
                stdin=PIPE,
                stdout=PIPE)
            try:
                self.__command()
            except KeyboardInterrupt:
                raise KeyboardInterrupt
            except Exception as error:
                self.simics.kill()
                self.__attempt_exception(
                    attempt, attempts, error, 'Error launching Simics',
                    'Error launching Simics, check your license connection')
            else:
                self.db.log_event('Information', 'Simics', 'Launched Simics')
                break
        # TODO: Simics fails down there if no license \/
        if checkpoint is None:
            self.__command('$drseus=TRUE')
            buff = self.__command(
                'run-command-file simics-{0}/{0}-linux{1}.simics'.format(
                    self.board, '-ethernet' if self.db.campaign.aux else ''))
        else:
            buff = self.__command('read-configuration {}'.format(checkpoint))
            buff += self.__command('connect-real-network-port-in ssh '
                                   'ethernet_switch0 target-ip=10.10.0.100')
            if self.db.campaign.aux:
                buff += self.__command(
                    'connect-real-network-port-in ssh '
                    'ethernet_switch0 target-ip=10.10.0.104')
        self.__command('enable-real-time-mode')
        found_settings = 0
        if checkpoint is None:
            serial_ports = []
        else:
            serial_ports = [0, 0]
        ssh_ports = []
        for line in buff.split('\n'):
            if 'pseudo device opened: /dev/pts/' in line:
                if checkpoint is None:
                    serial_ports.append(line.split(':')[1].strip())
                else:
                    if 'AUX_' in line:
                        serial_ports[1] = line.split(':')[1].strip()
                    else:
                        serial_ports[0] = line.split(':')[1].strip()
                found_settings += 1
            elif 'Host TCP port' in line:
                ssh_ports.append(int(line.split('->')[0].split()[-1]))
                found_settings += 1
            if not self.db.campaign.aux and found_settings == 2:
                break
            elif self.db.campaign.aux and found_settings == 4:
                break
        else:
            self.close()
            raise DrSEUsError('Error finding port or pseudoterminal')
        if self.board == 'p2020rdb':
            self.options.aux_prompt = self.options.dut_prompt = \
                'root@p2020rdb:~#'
            if self.options.dut_uboot:
                self.options.dut_uboot += '; '
            self.options.dut_uboot += ('setenv ethaddr 00:01:af:07:9b:8a; '
                                       'setenv eth1addr 00:01:af:07:9b:8b; '
                                       'setenv eth2addr 00:01:af:07:9b:8c; '
                                       'setenv consoledev ttyS0; '
                                       'setenv bootargs root=/dev/ram rw '
                                       'console=$consoledev,$baudrate; '
                                       'bootm ef080000 10000000 ef040000')
            if self.options.aux_uboot:
                self.options.aux_uboot += '; '
            self.options.aux_uboot += ('setenv ethaddr 00:01:af:07:9b:8d; '
                                       'setenv eth1addr 00:01:af:07:9b:8e; '
                                       'setenv eth2addr 00:01:af:07:9b:8f; '
                                       'setenv consoledev ttyS0; '
                                       'setenv bootargs root=/dev/ram rw '
                                       'console=$consoledev,$baudrate; '
                                       'bootm ef080000 10000000 ef040000; ')
        elif self.board == 'a9x2':
            self.options.aux_prompt = self.options.dut_prompt = '\n#'
            if self.options.dut_uboot:
                self.options.dut_uboot += ';'
            self.options.dut_uboot += ('setenv bootargs console=ttyAMA0 '
                                       'root=/dev/ram0 rw;'
                                       'bootm 0x40800000 0x70000000')
            if self.options.aux_uboot:
                self.options.aux_uboot += ';'
            self.options.aux_uboot += ('setenv bootargs console=ttyAMA0 '
                                       'root=/dev/ram0 rw;'
                                       'bootm 0x40800000 0x70000000')
        self.options.dut_serial_port = serial_ports[0]
        self.options.dut_ip_address = '10.10.0.100'
        self.options.dut_scp_port = ssh_ports[0]
        self.dut = dut(self.db, self.options)
        if self.db.campaign.aux:
            self.options.aux_serial_port = serial_ports[1]
            self.options.aux_ip_address = '10.10.0.104'
            self.options.aux_scp_port = ssh_ports[1]
            self.aux = dut(self.db, self.options, aux=True)
        if checkpoint is None:
            self.continue_dut()
            self.dut.read_until('Hit any key')
            self.halt_dut()
            if self.board == 'p2020rdb':
                self.__command('DUT_p2020rdb.soc.phys_mem.load-file '
                               '$initrd_image $initrd_addr')
                if self.db.campaign.aux:
                    self.__command('AUX_p2020rdb_1.soc.phys_mem.load-file '
                                   '$initrd_image $initrd_addr')
            elif self.board == 'a9x2':
                self.__command('DUT_a9x2.coretile.mpcore.phys_mem.load-file '
                               '$kernel_image $kernel_addr')
                self.__command('DUT_a9x2.coretile.mpcore.phys_mem.load-file '
                               '$initrd_image $initrd_addr')
                if self.db.campaign.aux:
                    self.__command('AUX_a9x2_1.coretile.mpcore.phys_mem.'
                                   'load-file $kernel_image $kernel_addr')
                    self.__command('AUX_a9x2_1.coretile.mpcore.phys_mem.'
                                   'load-file $initrd_image $initrd_addr')
            self.continue_dut()
            if self.db.campaign.aux:
                aux_process = Thread(target=self.aux.do_login,
                                     kwargs={
                                         'change_prompt': self.board == 'a9x2',
                                         'flush': False
                                     })
                aux_process.start()
            self.dut.do_login(change_prompt=(self.board == 'a9x2'),
                              flush=False)
            if self.db.campaign.aux:
                aux_process.join()
        else:
            self.dut.ip_address = '127.0.0.1'
            if self.board == 'a9x2':
                self.dut.prompt = 'DrSEUs# '
            if self.db.campaign.aux:
                self.aux.ip_address = '127.0.0.1'
                if self.board == 'a9x2':
                    self.aux.prompt = 'DrSEUs# '

    def launch_simics_gui(self, checkpoint):
        if self.board == 'p2020rdb':
            serial_port = 'serial[0]'
        elif self.board == 'a9x2':
            serial_port = 'serial0'
        simics_commands = (
            'read-configuration {0}; new-text-console-comp text_console0; '
            'disconnect DUT_{1}.console0.serial DUT_{1}.{2}; '
            'connect text_console0.serial DUT_{1}.{2}; '
            'connect-real-network-port-in ssh ethernet_switch0 '
            'target-ip=10.10.0.100;'.format(checkpoint, self.board,
                                            serial_port))

        if self.db.campaign.aux:
            simics_commands += (
                'new-text-console-comp text_console1; '
                'disconnect AUX_{0}_1.console0.serial AUX_{0}_1.{1}; '
                'connect text_console1.serial AUX_{0}_1.{1}; '
                'connect-real-network-port-in ssh ethernet_switch0 '
                'target-ip=10.10.0.104;'.format(self.board, serial_port))
        cwd = '{}/simics-workspace'.format(getcwd())
        call(['{}/simics-gui'.format(cwd), '-e', simics_commands], cwd=cwd)

    def close(self):
        if self.simics:
            if self.dut:
                self.dut.close()
                self.dut = None
            if self.aux:
                self.aux.close()
                self.aux = None
            try:
                self.halt_dut()
                self.__command('quit')
            except DrSEUsError as error:
                if error.type == 'Timeout reading from Simics':
                    self.simics.kill()
                    self.db.log_event('Warning', 'Simics',
                                      'Killed unresponsive Simics',
                                      self.db.log_exception)
                    self.simics = None
                    return
            self.simics.wait()
            self.db.log_event('Information', 'Simics', 'Closed Simics')
            self.simics = None

    def halt_dut(self):
        if self.running:
            self.db.log_event('Information', 'Simics', 'Halt DUT')
            self.simics.send_signal(SIGINT)
            self.running = False
            self.__command()

    def continue_dut(self):
        if not self.running:
            self.simics.stdin.write('run\n')
            self.running = True
            if self.db.result is None:
                self.db.campaign.debugger_output += 'run\n'
            else:
                self.db.result.debugger_output += 'run\n'
            if self.options.debug:
                print(colored('run', 'yellow'))
            self.db.log_event('Information', 'Simics', 'Continue DUT')

    def reset_dut(self):
        pass

    def __command(self, command=None, time=10):
        def read_until():
            buff = ''
            hanging = False
            while True:
                try:
                    with timeout(time):
                        char = self.simics.stdout.read(1)
                except TimeoutException:
                    char = ''
                    hanging = True
                    self.db.log_event('Warning', 'Simics', 'Read timeout',
                                      self.db.log_exception)
                if not char:
                    break
                if self.db.result is None:
                    self.db.campaign.debugger_output += char
                else:
                    self.db.result.debugger_output += char
                if self.options.debug:
                    print(colored(char, 'yellow'), end='')
                    stdout.flush()
                buff += char
                if buff.endswith('simics> '):
                    break
            if self.options.debug:
                print()
            if self.db.result is None:
                self.db.campaign.save()
            else:
                self.db.result.save()
            for message in self.error_messages:
                if message in buff:
                    self.db.log_event('Error', 'Simics', message, buff)
                    raise DrSEUsError(message)
            if hanging:
                raise DrSEUsError('Timeout reading from Simics')
            return buff

    # def __command(self, command=None, time=10):

        if command:
            event = self.db.log_event('Information',
                                      'Simics',
                                      'Command',
                                      command,
                                      success=False)
        if command is not None:
            self.simics.stdin.write('{}\n'.format(command))
            if self.db.result is None:
                self.db.campaign.debugger_output += '{}\n'.format(command)
            else:
                self.db.result.debugger_output += '{}\n'.format(command)
            if self.options.debug:
                print(colored(command, 'yellow'))
        buff = read_until()
        if command:
            event.success = True
            event.save()
        return buff

    def __attempt_exception(self,
                            attempt,
                            attempts,
                            error,
                            error_type,
                            message,
                            close_items=[]):
        self.db.log_event('Warning' if attempt < attempts - 1 else 'Error',
                          'Simics', error_type, self.db.log_exception)
        print(
            colored(
                '{}: {} (attempt {}/{}): {}'.format(
                    self.options.dut_serial_port, message, attempt + 1,
                    attempts, error), 'red'))
        for item in close_items:
            item.close()
        if attempt < attempts - 1:
            sleep(30)
        else:
            raise DrSEUsError(error_type)

    def __merge_checkpoint(self, checkpoint, attempts=10):
        if self.options.debug:
            print(colored('merging checkpoint...', 'blue'), end='')
            stdout.flush()
        merged_checkpoint = '{}_merged'.format(checkpoint)
        cwd = '{}/simics-workspace'.format(getcwd())
        for attempt in range(attempts):
            try:
                check_call([
                    '{}/bin/checkpoint-merge'.format(cwd), checkpoint,
                    merged_checkpoint
                ],
                           cwd=cwd,
                           stdout=DEVNULL)
            except KeyboardInterrupt:
                raise KeyboardInterrupt
            except Exception as error:
                self.__attempt_exception(attempt, attempts, error,
                                         'Checkpoint merge error',
                                         'Error merging checkpoint')
            else:
                break
        if self.options.debug:
            print(colored('done', 'blue'))
        return merged_checkpoint

    def get_time(self):
        time_data = self.__command('print-time').split('\n')[-2].split()
        return int(time_data[2]), float(time_data[3])

    def create_checkpoints(self):
        event = self.db.log_event('Information',
                                  'Simics',
                                  'Created gold checkpoints',
                                  success=False,
                                  campaign=True)
        makedirs('simics-workspace/gold-checkpoints/{}'.format(
            self.db.campaign.id))
        if self.db.campaign.command:
            self.db.campaign.cycles_between = \
                int(self.db.campaign.cycles / self.options.checkpoints)
            if self.db.campaign.aux:
                self.db.log_event('Information', 'AUX', 'Command',
                                  self.db.campaign.aux_command)
                aux_process = Thread(target=self.aux.command,
                                     kwargs={
                                         'command':
                                         self.db.campaign.aux_command,
                                         'flush': False
                                     })
                aux_process.start()
            self.db.log_event('Information', 'DUT', 'Command',
                              self.db.campaign.command)
            self.dut.write('{}\n'.format(self.db.campaign.command))
            length = len(self.db.campaign.dut_output)
            read_thread = Thread(target=self.dut.read_until,
                                 kwargs={'flush': False})
            read_thread.start()
            checkpoint = 1
            while True:
                self.__command('run-cycles {}'.format(
                    self.db.campaign.cycles_between),
                               time=300)
                old_length = length
                length = len(self.db.campaign.dut_output)
                if length - old_length:
                    self.db.campaign.dut_output += \
                        '{}{:*^80}\n\n'.format(
                            '\n'
                            if self.db.campaign.dut_output.endswith('\n')
                            else '\n\n',
                            ' Checkpoint {} '.format(checkpoint))
                    length = len(self.db.campaign.dut_output)
                incremental_checkpoint = 'gold-checkpoints/{}/{}'.format(
                    self.db.campaign.id, checkpoint)
                self.__command(
                    'write-configuration {}'.format(incremental_checkpoint),
                    time=300)
                if not read_thread.is_alive() or \
                    (self.db.campaign.aux and
                        self.db.campaign.kill_dut and
                        not aux_process.is_alive()):
                    self.__merge_checkpoint(incremental_checkpoint)
                    break
                else:
                    checkpoint += 1
            self.db.campaign.checkpoints = checkpoint
            event.success = True
            event.timestamp = datetime.now()
            event.save()
            self.continue_dut()
            if self.db.campaign.kill_aux:
                self.aux.write('\x03')
            if self.db.campaign.aux:
                aux_process.join()
            if self.db.campaign.kill_dut:
                self.dut.write('\x03')
            read_thread.join()
        else:
            self.db.campaign.checkpoints = 1
            self.halt_dut()
            self.__command('write-configuration gold-checkpoints/{}/1'.format(
                self.db.campaign.id),
                           time=300)

    def inject_faults(self):
        def persistent_faults():
            if self.db.result.simics_memory_diff_set.count() > 0:
                return False
            injections = self.db.result.injection_set.all()
            register_diffs = self.db.result.simics_register_diff_set.all()
            for register_diff in register_diffs:
                for injection in injections:
                    if injection.register_alias is None:
                        injected_register = injection.register
                    else:
                        injected_register = injection.register_alias
                    if injection.register_index is not None:
                        injected_register = '{}:{}'.format(
                            injected_register,
                            ':'.join(map(str, injection.register_index)))
                    if register_diff.config_object == \
                        injection.config_object and \
                            register_diff.register == injected_register:
                        if (int(register_diff.monitored_value,
                                base=0) == int(injection.injected_value,
                                               base=0)):
                            break
                else:
                    return False
            else:
                return True

    # def inject_faults(self):

        checkpoint_nums = list(range(1, self.db.campaign.checkpoints))
        checkpoints_to_inject = []
        for i in range(self.options.injections):
            checkpoint_num = choice(checkpoint_nums)
            checkpoint_nums.remove(checkpoint_num)
            checkpoints_to_inject.append(checkpoint_num)
        checkpoints_to_inject = sorted(checkpoints_to_inject)
        reg_errors = 0
        mem_errors = 0
        if checkpoints_to_inject:
            for injection_number, checkpoint in \
                    enumerate(checkpoints_to_inject, start=1):
                injected_checkpoint, injection = \
                    self.__inject_checkpoint(injection_number, checkpoint)
                self.launch_simics(injected_checkpoint)
                injection.time = self.get_time(
                )[1] - self.db.campaign.start_time
                injection.save()
                injections_remaining = \
                    injection_number < len(checkpoints_to_inject)
                if injections_remaining:
                    next_checkpoint = checkpoints_to_inject[injection_number]
                else:
                    next_checkpoint = self.db.campaign.checkpoints
                reg_errors_, mem_errors_ = \
                    self.__compare_checkpoints(checkpoint, next_checkpoint)
                if reg_errors_ > reg_errors:
                    reg_errors = reg_errors_
                if mem_errors_ > mem_errors:
                    mem_errors = mem_errors_
                if injections_remaining:
                    self.close()
                else:
                    self.continue_dut()
        else:
            self.close()
            makedirs('simics-workspace/injected-checkpoints/{}/{}'.format(
                self.db.campaign.id, self.db.result.id))
            self.launch_simics('gold-checkpoints/{}/1'.format(
                self.db.campaign.id))
            reg_errors, mem_errors = \
                self.__compare_checkpoints(1, self.db.campaign.checkpoints)
        return reg_errors, mem_errors, (reg_errors and persistent_faults())

    def regenerate_checkpoints(self, injections):
        self.db.result.id = self.options.result_id
        for injection_number, injection in enumerate(injections, start=1):
            injected_checkpoint = self.__inject_checkpoint(
                injection_number, injection.checkpoint, injection)[0]
            if injection_number < len(injections):
                self.launch_simics(checkpoint=injected_checkpoint)
                for j in range(injection.checkpoint,
                               injections[injection_number].checkpoint):
                    self.__command('run-cycles {}'.format(
                        self.db.campaign.cycles_between),
                                   time=300)
                self.__command(
                    'write-configuration injected-checkpoints/{}/{}/{}'.format(
                        self.db.campaign.id, self.options.result_id,
                        injections[injection_number].checkpoint),
                    time=300)
                self.close()
        return injected_checkpoint

    def __inject_checkpoint(self,
                            injection_number,
                            checkpoint,
                            injection=None):
        def inject_config(injected_checkpoint, injection):
            def flip_bit(value, bit):
                num_bits = get_num_bits(injection.register, injection.target,
                                        self.targets)
                if bit >= num_bits or bit < 0:
                    raise Exception('invalid bit: {} for num_bits: {}'.format(
                        bit, num_bits))
                value = int(value, base=0)
                binary_list = list(bin(value)[2:].zfill(num_bits))
                binary_list[num_bits - 1 -
                            bit] = ('1' if binary_list[num_bits - 1 -
                                                       bit] == '0' else '0')
                injected_value = int(''.join(binary_list), 2)
                injected_value = hex(injected_value).rstrip('L')
                return injected_value

        # def inject_config(injected_checkpoint, injection):

            with simics_config(injected_checkpoint) as config:
                config_object = injection.config_object
                if injection.register_alias is None:
                    register = injection.register
                else:
                    register = injection.register_alias
                gold_value = config.get(config_object, register)
                if gold_value is None:
                    raise Exception('error getting register value from config')
                if injection.register_index is None:
                    if not injection.injected_value:
                        injected_value = flip_bit(gold_value, injection.bit)
                    else:
                        injected_value = injection.injected_value
                    config.set(config_object, register, injected_value)
                else:
                    register_list_ = register_list = gold_value
                    if not injection.injected_value:
                        for index in injection.register_index:
                            gold_value = gold_value[index]
                        injected_value = flip_bit(gold_value, injection.bit)
                    else:
                        injected_value = injection.injected_value
                    for index in range(len(injection.register_index) - 1):
                        register_list_ = \
                            register_list_[injection.register_index[index]]
                    register_list_[injection.register_index[-1]] = \
                        injected_value
                    config.set(config_object, register, register_list)
                config.save()
            return gold_value, injected_value

    # def __inject_checkpoint(self, injection_number, checkpoint,
    #                         injection=None):
        if injection_number == 1:
            gold_checkpoint = 'simics-workspace/gold-checkpoints/{}/{}'.format(
                self.db.campaign.id, checkpoint)
        else:
            gold_checkpoint = \
                'simics-workspace/injected-checkpoints/{}/{}/{}'.format(
                    self.db.campaign.id, self.db.result.id, checkpoint)
        injected_checkpoint = \
            'simics-workspace/injected-checkpoints/{}/{}/{}_injected'.format(
                self.db.campaign.id, self.db.result.id, checkpoint)
        makedirs(injected_checkpoint)
        checkpoint_files = listdir(gold_checkpoint)
        for checkpoint_file in checkpoint_files:
            copyfile(join(gold_checkpoint, checkpoint_file),
                     join(injected_checkpoint, checkpoint_file))
        if injection is None:
            injection = choose_injection(self.targets,
                                         self.options.selected_target_indices)
            injection = self.db.result.injection_set.create(
                checkpoint=checkpoint, success=False, **injection)
            injection.config_object = 'DUT_{}.{}'.format(
                self.board, self.targets[injection.target]['object'])
            if injection.target_index is not None:
                injection.config_object += '[{}]'.format(
                    injection.target_index)
            injection.save()
            try:
                injection.gold_value, injection.injected_value = \
                    inject_config(injected_checkpoint, injection)
            except KeyboardInterrupt:
                raise KeyboardInterrupt
            except:
                self.db.log_event('Error', 'Simics', 'Error injecting fault',
                                  self.db.log_exception)
                raise DrSEUsError('Error injecting fault')
            else:
                injection.success = True
                injection.save()
                self.db.log_event('Information', 'Simics', 'Fault injected')
            if self.options.debug:
                print(
                    colored(
                        'result id: {}\ncheckpoint number: {}\ntarget: {}\n'
                        'register: {}\nbit: {}\ngold value: {}\ninjected value: {}'
                        ''.format(self.db.result.id, checkpoint,
                                  injection.target_name, injection.register,
                                  injection.bit, injection.gold_value,
                                  injection.injected_value), 'magenta'))
                if injection.register_index is not None:
                    print(
                        colored(
                            'register index: {}'.format(
                                injection.register_index), 'magenta'))
        else:
            inject_config(injected_checkpoint, injection)
        return injected_checkpoint.replace('simics-workspace/', ''), injection

    def __compare_checkpoints(self, checkpoint, last_checkpoint):
        def compare_registers(checkpoint, gold_checkpoint,
                              monitored_checkpoint):
            """
            Compares the register values of the checkpoint for iteration
            to the gold_checkpoint and adds the differences to the database.
            """
            def get_registers(checkpoint):
                """
                Retrieves all the register values of the targets specified in
                simics_targets.py for the specified checkpoint and returns a
                dictionary with all the values.
                """
                with simics_config('simics-workspace/{}'.format(checkpoint)) \
                        as config:
                    registers = {}
                    for target in self.targets:
                        if 'count' in self.targets[target]:
                            count = self.targets[target]['count']
                        else:
                            count = 1
                        for target_index in range(count):
                            config_object = 'DUT_{}.{}'.format(
                                self.board, self.targets[target]['object'])
                            if count > 1:
                                config_object += '[{}]'.format(target_index)
                            if config_object not in registers:
                                registers[config_object] = {}
                            for register in self.targets[target]['registers']:
                                if 'alias' in (self.targets[target]
                                               ['registers'][register]):
                                    register = \
                                        (self.targets[target]['registers']
                                                     [register]['alias']
                                                     ['register'])
                                if register not in registers[config_object]:
                                    registers[config_object][register] = \
                                        config.get(config_object, register)
                return registers

            # watch out! we're gonna use recursion
            # keep your arms and legs inside the stack frame at all times
            def log_diffs(config_object, register, gold_value,
                          monitored_value):
                if isinstance(gold_value, list):
                    for index in range(len(gold_value)):
                        try:
                            log_diffs(config_object,
                                      '{}:{}'.format(register,
                                                     index), gold_value[index],
                                      monitored_value[index])
                        except IndexError:  # TODO: remove this debug statement
                            self.db.log_event(
                                'DEBUG', 'DrSEUs', 'IndexError',
                                'config object: {}\nregister: {}:{}\n gold: {}'
                                '\nmonitored: {}'.format(
                                    config_object, register, index, gold_value,
                                    monitored_value))
                else:
                    if int(monitored_value, base=0) != int(gold_value, base=0):
                        self.db.result.simics_register_diff_set.create(
                            checkpoint=checkpoint,
                            config_object=config_object,
                            register=register,
                            gold_value=gold_value,
                            monitored_value=monitored_value)

        # def compare_registers(checkpoint, gold_checkpoint,
        #                       monitored_checkpoint):
        # import pprint

            gold_registers = get_registers(gold_checkpoint)
            # with open('gold_regs.txt', 'w') as gold_out:
            #     pp = pprint.PrettyPrinter(indent=4, stream=gold_out)
            #     pp.pprint(gold_registers)
            monitored_registers = get_registers(monitored_checkpoint)
            # with open('mon_regs.txt', 'w') as mon_out:
            #     pp = pprint.PrettyPrinter(indent=4, stream=mon_out)
            #     pp.pprint(monitored_registers)
            for config_object in gold_registers:
                for register in gold_registers[config_object]:
                    log_diffs(config_object, register,
                              gold_registers[config_object][register],
                              monitored_registers[config_object][register])
                diffs = self.db.result.simics_register_diff_set.count()
            return diffs

        def compare_memory(checkpoint, gold_checkpoint, monitored_checkpoint):
            """
            Compare the memory contents of gold_checkpoint with
            monitored_checkpoint and return the list of blocks that do not
            match. If extract_blocks is true then extract any blocks that do not
            match to incremental_checkpoint/memory-blocks/.
            """
            def parse_content_map(content_map, block_size):
                """
                Parse a content_map created by the Simics craff utility and
                returns a list of the addresses of the image that contain data.
                """
                with open('simics-workspace/{}'.format(content_map), 'r') \
                        as content_map:
                    diff_addresses = []
                    for line in content_map:
                        if 'empty' not in line:
                            line = line.split()
                            base_address = int(line[0], 16)
                            offsets = [
                                index for index, value in enumerate(line[1])
                                if value == 'D'
                            ]
                            for offset in offsets:
                                diff_addresses.append(base_address +
                                                      offset * block_size)
                return diff_addresses

            def extract_diff_blocks(gold_ram, monitored_ram,
                                    incremental_checkpoint, addresses,
                                    block_size):
                """
                Extract all of the blocks of size block_size specified in
                addresses of both the gold_ram image and the monitored_ram
                image.
                """
                if len(addresses) > 0:
                    mkdir('{}/memory-blocks'.format(incremental_checkpoint))
                    for address in addresses:
                        check_call([
                            craff, gold_ram, '--extract={:#x}'.format(address),
                            '--extract-block-size={}'.format(block_size),
                            '--output={}/memory-blocks/{:#x}_gold.raw'
                            ''.format(incremental_checkpoint, address)
                        ],
                                   cwd=cwd)
                        check_call([
                            craff, monitored_ram,
                            '--extract={:#x}'.format(address),
                            '--extract-block-size={}'.format(block_size),
                            '--output={}/memory-blocks/{:#x}_monitored'
                            '.raw'.format(incremental_checkpoint, address)
                        ],
                                   cwd=cwd)

        # def compare_memory(checkpoint, gold_checkpoint, monitored_checkpoint):

            if self.board == 'p2020rdb':
                gold_rams = [
                    '{}/DUT_{}.soc.ram_image[0].craff'.format(
                        gold_checkpoint, self.board)
                ]
                monitored_rams = [
                    '{}/DUT_{}.soc.ram_image[0].craff'.format(
                        monitored_checkpoint, self.board)
                ]
            elif self.board == 'a9x2':
                gold_rams = [
                    '{}/DUT_{}.coretile.ddr_image[{}].craff'.format(
                        gold_checkpoint, self.board, index)
                    for index in range(2)
                ]
                monitored_rams = [
                    '{}/DUT_{}.coretile.ddr_image[{}].craff'.format(
                        monitored_checkpoint, self.board, index)
                    for index in range(2)
                ]
            ram_diffs = ['{}.diff'.format(ram) for ram in monitored_rams]
            diff_content_maps = [
                '{}.content_map'.format(diff) for diff in ram_diffs
            ]
            diffs = 0
            cwd = '{}/simics-workspace'.format(getcwd())
            craff = '{}/bin/craff'.format(cwd)
            for (image_index, gold_ram, monitored_ram, ram_diff,
                 diff_content_map) in zip(range(len(monitored_rams)),
                                          gold_rams, monitored_rams, ram_diffs,
                                          diff_content_maps):
                check_call([
                    craff, '--diff', gold_ram, monitored_ram,
                    '--output={}'.format(ram_diff)
                ],
                           cwd=cwd,
                           stdout=DEVNULL)
                check_call([
                    craff, '--content-map', ram_diff,
                    '--output={}'.format(diff_content_map)
                ],
                           cwd=cwd,
                           stdout=DEVNULL)
                craff_output = check_output([craff, '--info', ram_diff],
                                            cwd=cwd,
                                            universal_newlines=True)
                block_size = int(
                    findall(r'\d+',
                            craff_output.split('\n')[2])[1])
                changed_blocks = parse_content_map(diff_content_map,
                                                   block_size)
                diffs += len(changed_blocks)
                if self.options.extract_blocks:
                    extract_diff_blocks(gold_ram, monitored_ram,
                                        monitored_checkpoint, changed_blocks,
                                        block_size)
                for block in changed_blocks:
                    self.db.result.simics_memory_diff_set.create(
                        checkpoint=checkpoint,
                        image_index=image_index,
                        block=hex(block))
            return diffs

    # def __compare_checkpoints(self, checkpoint, last_checkpoint):
        reg_errors = 0
        mem_errors = 0
        if self.options.compare_all:
            checkpoints = range(checkpoint + 1, last_checkpoint + 1)
            cycles_between = self.db.campaign.cycles_between
        else:
            checkpoints = [last_checkpoint]
            cycles_between = self.db.campaign.cycles_between * \
                (last_checkpoint-checkpoint)
        for checkpoint in checkpoints:
            self.__command('run-cycles {}'.format(cycles_between),
                           time=300 if self.options.compare_all else 600)
            incremental_checkpoint = 'injected-checkpoints/{}/{}/{}'.format(
                self.db.campaign.id, self.db.result.id, checkpoint)
            monitor = self.options.compare_all or \
                checkpoint == self.db.campaign.checkpoints
            if monitor or checkpoint == last_checkpoint:
                self.__command(
                    'write-configuration {}'.format(incremental_checkpoint),
                    time=300)
            if monitor:
                monitored_checkpoint = \
                    self.__merge_checkpoint(incremental_checkpoint)
                gold_incremental_checkpoint = 'gold-checkpoints/{}/{}'.format(
                    self.db.campaign.id, checkpoint)
                gold_checkpoint = '{}_merged'.format(
                    gold_incremental_checkpoint)
                if not exists('simics-workspace/{}'.format(gold_checkpoint)):
                    self.__merge_checkpoint(gold_incremental_checkpoint)
                errors = compare_registers(checkpoint, gold_checkpoint,
                                           monitored_checkpoint)
                if errors > reg_errors:
                    reg_errors = errors
                errors = compare_memory(checkpoint, gold_checkpoint,
                                        monitored_checkpoint)
                if errors > reg_errors:
                    mem_errors = errors
        return reg_errors, mem_errors
Exemple #60
0
class FaceswapControl():
    """ Control the underlying Faceswap tasks """
    def __init__(self, wrapper):
        logger.debug("Initializing %s", self.__class__.__name__)
        self.wrapper = wrapper
        self.statusbar = get_config().statusbar
        self.command = None
        self.args = None
        self.process = None
        self.train_stats = {"iterations": 0, "timestamp": None}
        self.consoleregex = {
            "loss":
            re.compile(r"([a-zA-Z_]+):.*?(\d+\.\d+)"),
            "tqdm":
            re.compile(r".*?(?P<pct>\d+%).*?(?P<itm>\d+/\d+)\W\["
                       r"(?P<tme>\d+:\d+<.*),\W(?P<rte>.*)[a-zA-Z/]*\]"),
            "ffmpeg":
            re.compile(r"([a-zA-Z]+)=\s*(-?[\d|N/A]\S+)")
        }
        logger.debug("Initialized %s", self.__class__.__name__)

    def execute_script(self, command, args):
        """ Execute the requested Faceswap Script """
        logger.debug("Executing Faceswap: (command: '%s', args: %s)", command,
                     args)
        self.command = command
        kwargs = {
            "stdout": PIPE,
            "stderr": PIPE,
            "bufsize": 1,
            "universal_newlines": True
        }

        self.process = Popen(args, **kwargs, stdin=PIPE)
        self.thread_stdout()
        self.thread_stderr()
        logger.debug("Executed Faceswap")

    def read_stdout(self):
        """ Read stdout from the subprocess. If training, pass the loss
        values to Queue """
        logger.debug("Opening stdout reader")
        while True:
            try:
                output = self.process.stdout.readline()
            except ValueError as err:
                if str(err).lower().startswith("i/o operation on closed file"):
                    break
                raise
            if output == "" and self.process.poll() is not None:
                break
            if output:
                if ((self.command == "train" and self.capture_loss(output)) or
                    (self.command == "effmpeg" and self.capture_ffmpeg(output))
                        or (self.command not in ("train", "effmpeg")
                            and self.capture_tqdm(output))):
                    continue
                if self.command == "train" and output.strip().endswith(
                        "saved models"):
                    logger.debug("Trigger update preview")
                    self.wrapper.tk_vars["updatepreview"].set(True)
                print(output.strip())
        returncode = self.process.poll()
        message = self.set_final_status(returncode)
        self.wrapper.terminate(message)
        logger.debug("Terminated stdout reader. returncode: %s", returncode)

    def read_stderr(self):
        """ Read stdout from the subprocess. If training, pass the loss
        values to Queue """
        logger.debug("Opening stderr reader")
        while True:
            try:
                output = self.process.stderr.readline()
            except ValueError as err:
                if str(err).lower().startswith("i/o operation on closed file"):
                    break
                raise
            if output == "" and self.process.poll() is not None:
                break
            if output:
                if self.command != "train" and self.capture_tqdm(output):
                    continue
                print(output.strip(), file=sys.stderr)
        logger.debug("Terminated stderr reader")

    def thread_stdout(self):
        """ Put the subprocess stdout so that it can be read without
        blocking """
        logger.debug("Threading stdout")
        thread = Thread(target=self.read_stdout)
        thread.daemon = True
        thread.start()
        logger.debug("Threaded stdout")

    def thread_stderr(self):
        """ Put the subprocess stderr so that it can be read without
        blocking """
        logger.debug("Threading stderr")
        thread = Thread(target=self.read_stderr)
        thread.daemon = True
        thread.start()
        logger.debug("Threaded stderr")

    def capture_loss(self, string):
        """ Capture loss values from stdout """
        logger.trace("Capturing loss")
        if not str.startswith(string, "["):
            logger.trace("Not loss message. Returning False")
            return False

        loss = self.consoleregex["loss"].findall(string)
        if len(loss) < 2:
            logger.trace("Not loss message. Returning False")
            return False

        message = ""
        for item in loss:
            message += "{}: {}  ".format(item[0], item[1])
        if not message:
            logger.trace("Error creating loss message. Returning False")
            return False

        iterations = self.train_stats["iterations"]

        if iterations == 0:
            # Initialize session stats and set initial timestamp
            self.train_stats["timestamp"] = time()

        if not get_config().session.initialized and iterations > 0:
            # Don't initialize session until after the first iteration as state
            # file must exist first
            get_config().session.initialize_session(is_training=True)
            self.wrapper.tk_vars["refreshgraph"].set(True)

        iterations += 1
        if iterations % 100 == 0:
            self.wrapper.tk_vars["refreshgraph"].set(True)
        self.train_stats["iterations"] = iterations

        elapsed = self.calc_elapsed()
        message = "Elapsed: {}  Iteration: {}  {}".format(
            elapsed, self.train_stats["iterations"], message)
        self.statusbar.progress_update(message, 0, False)
        logger.trace("Succesfully captured loss: %s", message)
        return True

    def calc_elapsed(self):
        """ Calculate and format time since training started """
        now = time()
        elapsed_time = now - self.train_stats["timestamp"]
        try:
            hrs = int(elapsed_time // 3600)
            if hrs < 10:
                hrs = "{0:02d}".format(hrs)
            mins = "{0:02d}".format((int(elapsed_time % 3600) // 60))
            secs = "{0:02d}".format((int(elapsed_time % 3600) % 60))
        except ZeroDivisionError:
            hrs = "00"
            mins = "00"
            secs = "00"
        return "{}:{}:{}".format(hrs, mins, secs)

    def capture_tqdm(self, string):
        """ Capture tqdm output for progress bar """
        logger.trace("Capturing tqdm")
        tqdm = self.consoleregex["tqdm"].match(string)
        if not tqdm:
            return False
        tqdm = tqdm.groupdict()
        if any("?" in val for val in tqdm.values()):
            logger.trace("tqdm initializing. Skipping")
            return True
        processtime = "Elapsed: {}  Remaining: {}".format(
            tqdm["tme"].split("<")[0], tqdm["tme"].split("<")[1])
        message = "{}  |  {}  |  {}  |  {}".format(processtime, tqdm["rte"],
                                                   tqdm["itm"], tqdm["pct"])

        current, total = tqdm["itm"].split("/")
        position = int((float(current) / float(total)) * 1000)

        self.statusbar.progress_update(message, position, True)
        logger.trace("Succesfully captured tqdm message: %s", message)
        return True

    def capture_ffmpeg(self, string):
        """ Capture tqdm output for progress bar """
        logger.trace("Capturing ffmpeg")
        ffmpeg = self.consoleregex["ffmpeg"].findall(string)
        if len(ffmpeg) < 7:
            logger.trace("Not ffmpeg message. Returning False")
            return False

        message = ""
        for item in ffmpeg:
            message += "{}: {}  ".format(item[0], item[1])
        if not message:
            logger.trace("Error creating ffmpeg message. Returning False")
            return False

        self.statusbar.progress_update(message, 0, False)
        logger.trace("Succesfully captured ffmpeg message: %s", message)
        return True

    def terminate(self):
        """ Terminate the subprocess """
        logger.debug("Terminating wrapper")
        if self.command == "train":
            logger.debug("Sending Exit Signal")
            print("Sending Exit Signal", flush=True)
            try:
                now = time()
                if os.name == "nt":
                    try:
                        logger.debug("Sending carriage return to process")
                        self.process.communicate(input="\n", timeout=60)
                    except TimeoutExpired:
                        raise ValueError("Timeout reached sending Exit Signal")
                else:
                    logger.debug("Sending SIGINT to process")
                    self.process.send_signal(signal.SIGINT)
                    while True:
                        timeelapsed = time() - now
                        if self.process.poll() is not None:
                            break
                        if timeelapsed > 60:
                            raise ValueError(
                                "Timeout reached sending Exit Signal")
                return
            except ValueError as err:
                logger.error("Error terminating process", exc_info=True)
                print(err)
        else:
            logger.debug("Terminating Process...")
            print("Terminating Process...")
            children = psutil.Process().children(recursive=True)
            for child in children:
                child.terminate()
            _, alive = psutil.wait_procs(children, timeout=10)
            if not alive:
                logger.debug("Terminated")
                print("Terminated")
                return

            logger.debug("Termination timed out. Killing Process...")
            print("Termination timed out. Killing Process...")
            for child in alive:
                child.kill()
            _, alive = psutil.wait_procs(alive, timeout=10)
            if not alive:
                logger.debug("Killed")
                print("Killed")
            else:
                for child in alive:
                    msg = "Process {} survived SIGKILL. Giving up".format(
                        child)
                    logger.debug(msg)
                    print(msg)

    def set_final_status(self, returncode):
        """ Set the status bar output based on subprocess return code 
            and reset training stats """
        logger.debug("Setting final status. returncode: %s", returncode)
        self.train_stats = {"iterations": 0, "timestamp": None}
        if returncode in (0, 3221225786):
            status = "Ready"
        elif returncode == -15:
            status = "Terminated - {}.py".format(self.command)
        elif returncode == -9:
            status = "Killed - {}.py".format(self.command)
        elif returncode == -6:
            status = "Aborted - {}.py".format(self.command)
        else:
            status = "Failed - {}.py. Return Code: {}".format(
                self.command, returncode)
        logger.debug("Set final status: %s", status)
        return status