Example #1
0
    def connect(self, dbname, server='sqlite', create=False,
                user='', password='', host='', port=None, **kws):
        "connect to an existing database"
        creds = dict(user=user, password=password, host=host,
                     port=port, server=server)

        self.dbname = dbname
        if not self.isScanDB(dbname,  **creds) and create:
            engine, meta = create_scandb(dbname, create=True, **creds)
            self.engine = engine
            self.metadata = meta
            self.metadata.reflect()

        if self.engine is None:
            raise ValueError("Cannot use '%s' as a Scan Database!" % dbname)

        self.conn   = self.engine.connect()
        self.session = sessionmaker(bind=self.engine, autocommit=True)()

        tabs, classes, mapprops, mapkeys = map_scandb(self.metadata)
        self.tables, self.classes = tabs, classes
        self.mapprops, self.mapkeys = mapprops, mapkeys

        self.status_codes = {}
        self.status_names = {}
        for row in self.getall('status'):
            self.status_codes[row.name] = row.id
            self.status_names[row.id] = row.name
        atexit.register(self.close)
Example #2
0
 def __init__(self, sprintExecPath, minPythonControlVersion=2, sprintConfigStr="", sprintControlConfig=None, usePythonSegmentOrder=True):
   """
   :param str sprintExecPath: this executable will be called for the sub proc.
   :param int minPythonControlVersion: will be checked in the subprocess. via Sprint PythonControl
   :param str sprintConfigStr: passed to Sprint as command line args.
     can have "config:" prefix - in that case, looked up in config.
     handled via eval_shell_str(), can thus have lazy content (if it is callable, will be called).
   :param dict[str]|None sprintControlConfig: passed to SprintControl.init().
   """
   assert os.path.exists(sprintExecPath)
   self.sprintExecPath = sprintExecPath
   self.minPythonControlVersion = minPythonControlVersion
   if sprintConfigStr.startswith("config:"):
     from Config import get_global_config
     config = get_global_config()
     assert config
     sprintConfigStr = config.typed_dict[sprintConfigStr[len("config:"):]]
   self.sprintConfig = eval_shell_str(sprintConfigStr)
   self.sprintControlConfig = sprintControlConfig
   self.usePythonSegmentOrder = usePythonSegmentOrder
   self.child_pid = None
   self.parent_pid = os.getpid()
   # There is no generic way to see whether Python is exiting.
   # This is our workaround. We check for it in self.run_inner().
   self.python_exit = False
   atexit.register(self.exit_handler)
   self._cur_seg_name = None
   self._cur_posteriors_shape = None
   self.is_calculating = False
   self.init()
Example #3
0
def listen(queue):
    def low_level_mouse_handler(nCode, wParam, lParam):
        struct = lParam.contents
        # Can't use struct.time because it's usually zero.
        t = time.time()

        if wParam == WM_MOUSEMOVE:
            event = MoveEvent(struct.x, struct.y, t)
        elif wParam == WM_MOUSEWHEEL:
            event = WheelEvent(struct.data / (WHEEL_DELTA * (2<<15)), t)
        elif wParam in buttons_by_wm_code:
            type, button = buttons_by_wm_code.get(wParam, ('?', '?'))
            if wParam >= WM_XBUTTONDOWN:
                button = {0x10000: X, 0x20000: X2}[struct.data]
            event = ButtonEvent(type, button, t)

        queue.put(event)
        return CallNextHookEx(NULL, nCode, wParam, lParam)

    WH_MOUSE_LL = c_int(14)
    mouse_callback = LowLevelMouseProc(low_level_mouse_handler)
    mouse_hook = SetWindowsHookEx(WH_MOUSE_LL, mouse_callback, NULL, NULL)

    # Register to remove the hook when the interpreter exits. Unfortunately a
    # try/finally block doesn't seem to work here.
    atexit.register(UnhookWindowsHookEx, mouse_hook)

    msg = LPMSG()
    while not GetMessage(msg, NULL, NULL, NULL):
        TranslateMessage(msg)
        DispatchMessage(msg)
Example #4
0
File: sht1x.py Project: g-vidal/upm
def main():
    # Instantiate a SHT1X sensor using D2 as the clock, and D3 as the
    # data pin.
    sensor = sensorObj.SHT1X(2, 3)

    ## Exit handlers ##
    # This function stops python from printing a stacktrace when you hit control-C
    def SIGINTHandler(signum, frame):
        raise SystemExit

    # This function lets you run code on exit
    def exitHandler():
        print("Exiting")
        sys.exit(0)

    # Register exit handlers
    atexit.register(exitHandler)
    signal.signal(signal.SIGINT, SIGINTHandler)

    # Every 2 seconds, update and print values
    while (True):
        sensor.update()

        print("Temperature:", sensor.getTemperature(), "C")
        print("Humidity:   ", sensor.getHumidity(), "RH")
        print()

        time.sleep(2)
Example #5
0
def start_services():
    if bokeh_app.backend['type'] == 'redis' and \
       bokeh_app.backend.get('start_redis', True):
        work_dir = getattr(bokeh_app, 'work_dir', os.getcwd())
        data_file = getattr(bokeh_app, 'data_file', 'redis.db')
        stdout = getattr(bokeh_app, 'stdout', sys.stdout)
        stderr = getattr(bokeh_app, 'stdout', sys.stderr)
        redis_save = getattr(bokeh_app, 'redis_save', True)
        mproc = services.start_redis(pidfilename=os.path.join(work_dir, "bokehpids.json"),
                                     port=bokeh_app.backend.get('redis_port', REDIS_PORT),
                                     data_dir=work_dir,
                                     data_file=data_file,
                                     stdout=stdout,
                                     stderr=stderr,
                                     save=redis_save)
        bokeh_app.redis_proc = mproc

    bokeh_app.publisher.start()
    if not bokeh_app.websocket_params['no_ws_start']:
        bokeh_app.subscriber = websocket.make_app(bokeh_app.url_prefix,
                                                  [bokeh_app.publisher.zmqaddr],
                                                  bokeh_app.websocket_params['ws_port']
        )
        bokeh_app.subscriber.start(thread=True)
    atexit.register(stop_services)
Example #6
0
  def StartAgentTracing(self, options, _, timeout=None):
    """Starts tracing.

    Args:
        options: Tracing options.

    Raises:
        RuntimeError: If trace already in progress.
    """
    if options.update_map or not path.isfile(options.serial_map):
      battor_device_mapping.GenerateSerialMapFile(options.serial_map,
                                                  options.hub_types)
    self._battor_wrapper = battor_wrapper.BattorWrapper(
        target_platform=options.target,
        android_device=options.device_serial_number,
        battor_path=options.battor_path,
        battor_map_file=options.serial_map)

    dev_utils = device_utils.DeviceUtils(options.device_serial_number)
    self._battery_utils = battery_utils.BatteryUtils(dev_utils)
    self._battery_utils.SetCharging(False)
    atexit.register(_reenable_charging_if_needed, self._battery_utils)
    self._battor_wrapper.StartShell()
    self._battor_wrapper.StartTracing()
    return True
Example #7
0
def start_pipe_worker(command, env=None, priority='normal'):
    import subprocess, atexit
    from functools import partial
    w = Worker(env or {})
    args = {'stdout':subprocess.PIPE, 'stdin':subprocess.PIPE, 'env':w.env}
    if iswindows:
        import win32process
        priority = {
                'high'   : win32process.HIGH_PRIORITY_CLASS,
                'normal' : win32process.NORMAL_PRIORITY_CLASS,
                'low'    : win32process.IDLE_PRIORITY_CLASS}[priority]
        args['creationflags'] = win32process.CREATE_NO_WINDOW|priority
    else:
        def renice(niceness):
            try:
                os.nice(niceness)
            except:
                pass
        niceness = {'normal' : 0, 'low'    : 10, 'high'   : 20}[priority]
        args['preexec_fn'] = partial(renice, niceness)
        args['close_fds'] = True

    p = subprocess.Popen([w.executable, '--pipe-worker', command], **args)
    atexit.register(w.kill)
    return p
Example #8
0
def main():
    # Instantiate the Infrared-Thermopile Sensor on I2C on bus 1
    mySensor = TMP006.TMP006(1)

    ## Exit handlers ##
    # This stops python from printing a stacktrace when you hit control-C
    def SIGINTHandler(signum, frame):
        raise SystemExit

    # This lets you run code on exit,
    # including functions from mySensor
    def exitHandler():
        print("Exiting")
        sys.exit(0)

    # Register exit handlers
    atexit.register(exitHandler)
    signal.signal(signal.SIGINT, SIGINTHandler)

    # activate periodic measurements
    mySensor.setActive();

    # Print out temperature value and config-reg in hex every 0.5 seconds
    while(1):
        mySensor.getTemperature(True)
        hex(mySensor.getConfig())

        time.sleep(.5)
Example #9
0
def reg():
    histfile = os.path.join(os.environ['HOME'], '.pythonhistory')
    try:
        readline.read_history_file(histfile)
    except IOError:
        pass
    atexit.register(readline.write_history_file,histfile)
Example #10
0
def main(argv):
    global command
    try:
        opts, args = getopt.getopt(argv, "sari:t:", ["sort", "rand", "rev", "item", "test"])
    except getopt.GetoptError:
        err()

    a = ["sort", "rand", "rev"]
    i = 1
    test = "100"
    for opt, arg in opts:
        if opt == "-u":
            err()
        elif opt in ("-s", "sort"):
            command = "sort"
        elif opt in ("-a", "random"):
            command = "rand"
        elif opt in ("-r", "reversed"):
            command = "rev"
        elif opt in ("-i", "item"):
            i = int(arg)
        elif opt in ("-t", "test"):
            test = arg
        else:
            err()
    atexit.register(exit, (test))
    signal.signal(signal.SIGTERM,exit)
    run(i, test)
def main():
	device_opt = ["ipaddr", "login", "passwd", "no_login", "no_password", \
		       "port", "snmp_version", "snmp"]

	atexit.register(atexit_handler)

	all_opt["switch"]["default"] = 1
	all_opt["power_wait"]["default"] = 2
	all_opt["snmp_version"]["default"] = "1"
	all_opt["community"]["default"] = "private"
	options = check_input(device_opt, process_input(device_opt))

	# Plug indexing start from zero on ePDU Managed, so we substract '1' from
	# the user's given plug number.
	# For Switched ePDU, we will add this back again later.
	if "--plug" in options and options["--plug"].isdigit():
		options["--plug"] = str(int(options["--plug"]) - 1)

	docs = {}
	docs["shortdesc"] = "Fence agent for Eaton over SNMP"
	docs["longdesc"] = "fence_eaton_snmp is an I/O Fencing agent \
which can be used with the Eaton network power switch. It logs \
into a device via SNMP and reboots a specified outlet. It supports \
SNMP v1 and v3 with all combinations of  authenticity/privacy settings."
	docs["vendorurl"] = "http://powerquality.eaton.com"
	show_docs(options, docs)

	# Operate the fencing device
	result = fence_action(FencingSnmp(options), options, set_power_status, get_power_status, get_outlets_status)

	sys.exit(result)
Example #12
0
  def forward_observatory_ports(self):
    """Forwards the ports used by the dart observatories to the host machine.
    """
    logcat = subprocess.Popen(self._adb_command(['logcat']),
                              stdout=subprocess.PIPE)
    atexit.register(_exit_if_needed, logcat)

    def _forward_observatories_as_needed():
      while True:
        line = logcat.stdout.readline()
        if not line:
          break
        match = re.search(r'Observatory listening on http://127.0.0.1:(\d+)',
                          line)
        if match:
          device_port = int(match.group(1))
          host_port = self._forward_host_port_to_device(0, device_port)
          print ('Dart observatory available at the host at http://127.0.0.1:%d'
                 % host_port)
          sys.stdout.flush()

    logcat_watch_thread = threading.Thread(
        target=_forward_observatories_as_needed)
    logcat_watch_thread.daemon = True
    logcat_watch_thread.start()
Example #13
0
    def __init__(self, process):
        threading.Thread.__init__(self)

        self._process = process
        self._exit = False
        self.setDaemon(True)
        atexit.register(self.stop)
Example #14
0
def main():
   """
   Simple command-line program for listing the virtual machines on a system.
   """
   host = 'svalleru-esx'
   user = '******'
   password = '******'
   port = 443
   try:
      si = None
      try:
         si = SmartConnect(host=host,
                user=user,
                pwd=password,
                port=port)
      except IOError, e:
        pass
      if not si:
         print "Could not connect to the specified host using specified username and password"
         return -1

      atexit.register(Disconnect, si)

      content = si.RetrieveContent()
      datacenter = content.rootFolder.childEntity[0]
      vmFolder = datacenter.vmFolder
      vmList = vmFolder.childEntity
      for vm in vmList:
         PrintVmInfo(vm)
Example #15
0
  def __init__(self, config, bridge_user, bridge_host, dest_host, bridge_port=22, dest_port=22, local_port=2022, strictHostKeyChecking = True, timeout=90):
    self.local_port = local_port
    self.bridge_host = bridge_host
    self.bridge_user = bridge_user
    if not strictHostKeyChecking:
      remote_cmd = 'ssh ' + ssh_no_strict_key_host_checking_params
      cmd = 'ssh ' + ssh_no_strict_key_host_checking_params
    else:
      remote_cmd = 'ssh'
      cmd = 'ssh'
    remote_cmd = remote_cmd + ' -v -L %d:%s:%d %s@%s -A -N -M ' % (local_port, dest_host, dest_port, bridge_user, bridge_host)
    with hide('running', 'output', 'warnings'):
      run('rm -f ~/.ssh-tunnel-from-fabric')

    ssh_port = 22
    if 'port' in config:
      ssh_port = config['port']

    cmd = cmd + ' -vA -p %d %s@%s' % (ssh_port, config['user'], config['host'])
    cmd = cmd + " '" + remote_cmd + "'"

    print(cmd),

    self.p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    start_time = time.time()

    start_time = time.time()
    atexit.register(self.p.kill)
    while not 'Entering interactive session' in self.p.stderr.readline():
      if time.time() > start_time + timeout:
        raise Exception('SSH tunnel timed out with command "%s"' % cmd)
Example #16
0
    def __init__(self):
        self.name = "Default name"
        self.tag = None
        #self.skype = Skype4Py.Skype(Events=self, Transport="dbus")
        self.skype = Skype4Py.Skype(Events=self)

        self.plugin_classlist = []
        self.enabled_plugins = []
        self.disabled_plugins = []

        self.load_settings()
        self.load_plugins()
        plugin_names = [type(x).__name__ for x in self.plugin_classlist]
        print("loaded plugins\n Active plugins: {}".format(" ".join(plugin_names)))
        if not self.skype.Client.IsRunning:
            print("skype not running, starting skype")
            self.skype.Client.Start()
            time.sleep(30)
            print("skype is now running")
        self.skype.FriendlyName = "Beaker"
        self.skype._SetTimeout = 20
        self.skype.Attach()
        if not self.tag:
            self.tag = "@"
        print("I'm ready")
        self.skype.ChangeUserStatus(Skype4Py.cusOnline)
        atexit.register(self.on_exit)
        self.enabled_plugins = self.plugin_classlist
def main():
    atexit.register(fabric_cleanup, True)
    parser = OptionParser(usage="%prog RELEASE_DIR DESTINATION")
    (options, args) = parser.parse_args(sys.argv[1:])
    
    comm_obj = _CommObj()
    
    if len(args) != 2:
        parser.print_help()
        sys.exit(-1)
        
    if not os.path.isdir(args[0]):
        print "release directory %s not found" % args[0]
        sys.exit(-1)
    
    destparts = args[1].split(':', 1)
    if len(destparts)==1: # it's a local release test area
        if not os.path.isdir(args[1]):
            _setup_local_release_dir(args[1])
        comm_obj.put = shutil.copy
        comm_obj.put_dir = shutil.copytree
        comm_obj.run = local
        
        _push_release(args[0], args[1], comm_obj)
    else: # assume args[1] is a remote host:destdir
        comm_obj.put = put
        comm_obj.put_dir = put_dir
        comm_obj.run = run
        
        home = destparts[1]

        with settings(host_string=destparts[0]):
            _push_release(args[0], home, comm_obj)
Example #18
0
 def write(self, text):
     if self._file is None and self._error is None:
         if self._logFilePath is None:
             import os
             prgName = os.path.splitext(os.path.basename(sys.executable))[0]
             prgAppDataPath = os.path.join(os.environ["APPDATA"], prgName)
             self._logFilePath = os.path.join(prgAppDataPath, "Log.txt")
         try:
             if not os.path.exists(prgAppDataPath):
                 os.mkdir(prgAppDataPath)
             self._file = open(self._logFilePath, 'a')
         except Exception, details:
             self._error = details
             import atexit
             import ctypes
             atexit.register(
                 ctypes.windll.user32.MessageBoxA,
                 0,
                 "The logfile '%s' could not be opened:\n %s" % (
                     self._logFilePath,
                     details
                 ),
                 "Error occurred in EventGhost",
                 0
             )
         else:
             import atexit
             atexit.register(self.__DisplayMessage)
Example #19
0
def includeme(config):
    """Configure marrow.mailer"""
    settings = config.registry.settings
    prefix = settings.get('pyramid_marrowmailer.prefix', 'mail.').rstrip('.')

    # handle boolean options and int options .digit .on
    mailer_config = dict(filter(lambda d: d[0].startswith(prefix),
                                settings.items()))
    for key, value in dict(mailer_config).items():
        if key.endswith('.on'):
            mailer_config[key[:-3]] = asbool(value)
        if key.endswith('.int'):
            mailer_config[key[:-4]] = int(value)

    # bugfix for https://github.com/marrow/marrow.mailer/issues/45
    manager = '%s.manager.use' % prefix
    if manager not in mailer_config:
        mailer_config[manager] = 'immediate'

    mode = '%s.mode' % prefix
    if mailer_config.get(mode) == 'direct':
        mailer = Mailer(mailer_config, prefix)
    else:
        mailer = TransactionMailer(mailer_config, prefix)

    mailer.start()

    config.registry.registerUtility(mailer, IMarrowMailer)
    config.set_request_property(get_mailer, "mailer", reify=True)

    # shutdown mailer when process stops
    atexit.register(lambda: mailer.stop())
Example #20
0
File: help.py Project: kivhift/qmk
 def __init__(self):
     self._name = 'help'
     self._help = self.__doc__
     h, self.__filename = tempfile.mkstemp(suffix = '.html',
         prefix = 'qmkhelp')
     os.close(h)
     atexit.register(os.remove, self.__filename)
Example #21
0
def run(config):
    import atexit
    import sys

    # Reset sys.modules so that g-event can re-monkeypatch.
    # This is needed because distribute's pkg_resources imports urllib & co, before we can properly monkey patch it. ;(
    modules_to_reset = {'urllib', 'socket', '_ssl', 'ssl', 'select', 'thread',
                        'threading', 'time', 'os', 'subprocess'}
    for k in list(sys.modules.keys()):
        if k.startswith('dissonance.') or k in modules_to_reset:
            del sys.modules[k]

    from gevent.monkey import patch_all

    patch_all()

    from .dissonance import Dissonance
    import logging

    logging.basicConfig(**getattr(config, 'logging', {}))
    j = Dissonance(config)

    try:
        j.run()
        atexit.register(j.stop)
        j.join()

    except KeyboardInterrupt:
        print("Got ^C. Stopping!")
Example #22
0
def tofile(fiter, fname):
    fh = nopen(fname, "w")
    for line in fiter:
        print >>fh, line.rstrip("\r\n")
    fh.close()
    atexit.register(os.unlink, fname)
    return fname
Example #23
0
    def register_readline():
        import atexit
        try:
            import readline
            import rlcompleter
        except ImportError:
            return

        # Reading the initialization (config) file may not be enough to set a
        # completion key, so we set one first and then read the file
        if 'libedit' in getattr(readline, '__doc__', ''):
            readline.parse_and_bind('bind ^I rl_complete')
        else:
            readline.parse_and_bind('tab: complete')

        try:
            readline.read_init_file()
        except OSError:
            # An OSError here could have many causes, but the most likely one
            # is that there's no .inputrc file (or .editrc file in the case of
            # Mac OS X + libedit) in the expected location.  In that case, we
            # want to ignore the exception.
            pass

        history = os.path.join(os.path.expanduser('~'), '.python_history')
        try:
            readline.read_history_file(history)
        except IOError:
            pass
        atexit.register(readline.write_history_file, history)
Example #24
0
  def _WaitForProcessIdAndStartGdb(self, process):
    '''
    Waits until we see the process id from the remote device, starts up
    gdbserver on the remote device, and gdb on the local device.
    '''
    # Wait until we see 'PID'
    pid = self._GetProcessId(process)
    assert pid != 0
    # No longer need the logcat process.
    process.kill()
    # Disable python's processing of SIGINT while running gdb. Otherwise
    # control-c doesn't work well in gdb.
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    gdbserver_process = subprocess.Popen(self._CreateADBCommand(['shell',
                                                                 'gdbserver',
                                                                 '--attach',
                                                                 ':5039',
                                                                 pid]))
    atexit.register(_ExitIfNeeded, gdbserver_process)

    gdbinit_path = os.path.join(self.temp_gdb_dir, 'gdbinit')
    _CreateGdbInit(self.temp_gdb_dir, gdbinit_path, self.paths.build_dir)

    # Wait a second for gdb to start up on the device. Without this the local
    # gdb starts before the remote side has registered the port.
    # TODO(sky): maybe we should try a couple of times and then give up?
    time.sleep(1)

    local_gdb_process = subprocess.Popen([self._GetLocalGdbPath(),
                                          '-x',
                                          gdbinit_path],
                                         cwd=self.temp_gdb_dir)
    atexit.register(_ExitIfNeeded, local_gdb_process)
    local_gdb_process.wait()
    signal.signal(signal.SIGINT, signal.SIG_DFL)
Example #25
0
 def _daemonize():
     pid = os.fork() 
     if pid > 0:
         # exit first parent
         sys.exit(0) 
 
     # decouple from parent environment
     os.chdir(WORKDIR) 
     os.setsid() 
     os.umask(0) 
 
     # do second fork     
     pid = os.fork() 
     if pid > 0:
         # exit from second parent
         sys.exit(0) 
 
     # redirect standard file descriptors
     sys.stdout.flush()
     sys.stderr.flush()
     si = open(LOG_FILE, 'r')
     so = open(LOG_FILE, 'a+')
     se = open(LOG_FILE, 'a+', 0)
     os.dup2(si.fileno(), sys.stdin.fileno())
     os.dup2(so.fileno(), sys.stdout.fileno())
     os.dup2(se.fileno(), sys.stderr.fileno())
 
     # write pidfile
     pid = str(os.getpid())
     f = open(PID_FILE,'w')
     f.write("%s\n" % pid)
     f.close()
     atexit.register(lambda: os.remove(PID_FILE))
Example #26
0
def main(cli_args=sys.argv[1:]):
    """Command line argument parsing and main script execution."""
    sys.excepthook = functools.partial(_handle_exception, args=None)

    # note: arg parser internally handles --help (and exits afterwards)
    plugins = plugins_disco.PluginsRegistry.find_all()
    parser, tweaked_cli_args = create_parser(plugins, cli_args)
    args = parser.parse_args(tweaked_cli_args)
    config = configuration.NamespaceConfig(args)
    zope.component.provideUtility(config)

    # Setup logging ASAP, otherwise "No handlers could be found for
    # logger ..." TODO: this should be done before plugins discovery
    for directory in config.config_dir, config.work_dir:
        le_util.make_or_verify_dir(
            directory, constants.CONFIG_DIRS_MODE, os.geteuid(),
            "--strict-permissions" in cli_args)
    # TODO: logs might contain sensitive data such as contents of the
    # private key! #525
    le_util.make_or_verify_dir(
        args.logs_dir, 0o700, os.geteuid(), "--strict-permissions" in cli_args)
    setup_logging(args, _cli_log_handler, logfile='letsencrypt.log')

    # do not log `args`, as it contains sensitive data (e.g. revoke --key)!
    logger.debug("Arguments: %r", cli_args)
    logger.debug("Discovered plugins: %r", plugins)

    sys.excepthook = functools.partial(_handle_exception, args=args)

    # Displayer
    if args.text_mode:
        displayer = display_util.FileDisplay(sys.stdout)
    else:
        displayer = display_util.NcursesDisplay()
    zope.component.provideUtility(displayer)

    # Reporter
    report = reporter.Reporter()
    zope.component.provideUtility(report)
    atexit.register(report.atexit_print_messages)

    # TODO: remove developer EULA prompt for the launch
    if not config.eula:
        eula = pkg_resources.resource_string("letsencrypt", "EULA")
        if not zope.component.getUtility(interfaces.IDisplay).yesno(
                eula, "Agree", "Cancel"):
            raise errors.Error("Must agree to TOS")

    if not os.geteuid() == 0:
        logger.warning(
            "Root (sudo) is required to run most of letsencrypt functionality.")
        # check must be done after arg parsing as --help should work
        # w/o root; on the other hand, e.g. "letsencrypt run
        # --authenticator dns" or "letsencrypt plugins" does not
        # require root as well
        #return (
        #    "{0}Root is required to run letsencrypt.  Please use sudo.{0}"
        #    .format(os.linesep))

    return args.func(args, config, plugins)
Example #27
0
 def init_crash_handler(self):
     """Create a crash handler, typically setting sys.excepthook to it."""
     self.crash_handler = self.crash_handler_class(self)
     sys.excepthook = self.excepthook
     def unset_crashhandler():
         sys.excepthook = sys.__excepthook__
     atexit.register(unset_crashhandler)
Example #28
0
    def __init__(self, opts, station):
        """Initilize the interface

        opts: a dictionary of command line options
        station: station requested as an argument"""
        cleanDebug()
        self.histfile = None
        self.config = Config()

        self.sirius = Sirius()
        self.player = Player(opts)
        self.options = opts

        atexit.register(self.onExit)

        if opts.list:
            self.list()
            sys.exit(0)

        if opts.setup:
            self.setup()
            sys.exit(0)

        self.notification = toBool(self.config.settings.notifications)

        if station != None:
            self.play(station)
        else:
            self.repl()
Example #29
0
def sink_to_temp(content, key='', **opts):
    import tempfile
    import atexit
    path = p(tempfile.gettempdir()) / hashlib.sha1(key).hexdigest()
    atexit.register(lambda: os.unlink(str(path)))
    open(str(path), 'w').write(content)
    return p(path)
Example #30
0
def main():
    # This was tested with the  Electromagnetic Module
    # Instantiate a  Electromagnet on digital pin D2
    myElectromagnet = upmelectromagnet.Electromagnet(2)

    ## Exit handlers ##
    # This stops python from printing a stacktrace when you hit control-C
    def SIGINTHandler(signum, frame):
        raise SystemExit

    # This lets you run code on exit,
    # including functions from myElectromagnet
    def exitHandler():
        print("Exiting")
        myElectromagnet.off()
        sys.exit(0)

    # Register exit handlers
    atexit.register(exitHandler)
    signal.signal(signal.SIGINT, SIGINTHandler)

    magnetState = False

    # Turn magnet on and off every 5 seconds
    while(1):
        magnetState = not magnetState
        if (magnetState):
            myElectromagnet.on()
        else:
            myElectromagnet.off()
        print("Turning magnet", ("on" if magnetState else "off"))

        time.sleep(5)
Example #31
0
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

import atexit
import re
import yaml
import itertools
import string
import shared_telemetry_utils as utils

from shared_telemetry_utils import ParserError
atexit.register(ParserError.exit_func)

MAX_CATEGORY_NAME_LENGTH = 30
MAX_METHOD_NAME_LENGTH = 20
MAX_OBJECT_NAME_LENGTH = 20
MAX_EXTRA_KEYS_COUNT = 10
MAX_EXTRA_KEY_NAME_LENGTH = 15

IDENTIFIER_PATTERN = r'^[a-zA-Z][a-zA-Z0-9_.]*[a-zA-Z0-9]$'


def nice_type_name(t):
    if issubclass(t, basestring):
        return "string"
    return t.__name__


def convert_to_cpp_identifier(s, sep):
    return string.capwords(s, sep).replace(sep, "")
Example #32
0
		except OSError, e:
			sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
			sys.exit(1)
		
		# redirect standard file descriptors
		sys.stdout.flush()
		sys.stderr.flush()
		si = file(self.stdin, 'r')
		so = file(self.stdout, 'a+')
		se = file(self.stderr, 'a+', 0)
		os.dup2(si.fileno(), sys.stdin.fileno())
		os.dup2(so.fileno(), sys.stdout.fileno())
		os.dup2(se.fileno(), sys.stderr.fileno())
		
		# write pidfile
		atexit.register(self.delpid)
		pid = str(os.getpid())
		file(self.pidfile, 'w+').write("%s\n" % pid)
		self.run()
	
	def delpid(self):
		os.remove(self.pidfile)
	
	def start(self):
		"""
			Start the daemon
		"""
		
		# Check for a pidfile to see if the daemon already runs
		try:
			pf = file(self.pidfile, 'r')
                    substrs=[
                        'system is enabled', 'gnu-libstdc++ is enabled',
                        'AppKit is enabled'
                    ])

        self.expect("frame variable numbers",
                    substrs=['[0] = 1', '[3] = 1234'])

        self.expect('frame variable string1', substrs=['hello world'])

        self.expect('type category list',
                    substrs=[
                        'system is enabled', 'gnu-libstdc++ is enabled',
                        'AppKit is enabled'
                    ])

        # last check - our cleanup will re-enable everything
        self.runCmd('type category disable *')
        self.expect('type category list',
                    substrs=[
                        'system is not enabled',
                        'gnu-libstdc++ is not enabled', 'AppKit is not enabled'
                    ])


if __name__ == '__main__':
    import atexit
    lldb.SBDebugger.Initialize()
    atexit.register(lambda: lldb.SBDebugger.Terminate())
    unittest2.main()
Example #34
0
    def __init__(self, opt):
        self.opt = opt
        self.batch_size = self.opt.batch_size
        self.seq_per_img = opt.seq_per_img

        # feature related options
        self.use_fc = getattr(opt, 'use_fc', True)
        self.use_att = getattr(opt, 'use_att', True)
        self.use_box = getattr(opt, 'use_box', 0)
        self.norm_att_feat = getattr(opt, 'norm_att_feat', 0)
        self.norm_box_feat = getattr(opt, 'norm_box_feat', 0)

        # load the json file which contains additional information about the dataset
        print('DataLoader loading json file: ', opt.input_json)
        self.info = json.load(open(self.opt.input_json))
        self.ix_to_word = self.info['ix_to_word']
        self.vocab_size = len(self.ix_to_word)
        print('vocab size is ', self.vocab_size)

        # open the hdf5 file
        print('DataLoader loading h5 file: ', opt.input_fc_dir,
              opt.input_att_dir, opt.input_box_dir, opt.input_label_h5)
        self.h5_label_file = h5py.File(
            self.opt.input_label_h5, 'r', driver='core')

        self.fc_loader = HybridLoader(self.opt.input_fc_dir, '.npy')
        self.att_loader = HybridLoader(self.opt.input_att_dir, '.npz')
        self.box_loader = HybridLoader(self.opt.input_box_dir, '.npy')

        # load in the sequence data
        seq_size = self.h5_label_file['labels'].shape
        self.seq_length = seq_size[1]
        print('max sequence length in data is', self.seq_length)
        # load the pointers in full to RAM (should be small enough)
        self.label_start_ix = self.h5_label_file['label_start_ix'][:]
        self.label_end_ix = self.h5_label_file['label_end_ix'][:]

        self.num_images = self.label_start_ix.shape[0]
        print('read %d image features' % (self.num_images))

        # separate out indexes for each of the provided splits
        self.split_ix = {'train': [], 'val': [], 'test': []}
        for ix in range(len(self.info['images'])):
            img = self.info['images'][ix]
            if img['split'] == 'train':
                self.split_ix['train'].append(ix)
            elif img['split'] == 'val':
                self.split_ix['val'].append(ix)
            elif img['split'] == 'test':
                self.split_ix['test'].append(ix)
            elif opt.train_only == 0:  # restval
                self.split_ix['train'].append(ix)

        print('assigned %d images to split train' %
              len(self.split_ix['train']))
        print('assigned %d images to split val' % len(self.split_ix['val']))
        print('assigned %d images to split test' % len(self.split_ix['test']))

        self.iterators = {'train': 0, 'val': 0, 'test': 0}

        self._prefetch_process = {}  # The three prefetch process
        for split in self.iterators.keys():
            self._prefetch_process[split] = BlobFetcher(
                split, self, split == 'train')
            # Terminate the child process when the parent exists

        def cleanup():
            print('Terminating BlobFetcher')
            for split in self.iterators.keys():
                del self._prefetch_process[split]
        import atexit
        atexit.register(cleanup)
Example #35
0
def main():

    module = AnsibleModule(
        argument_spec=dict(
            hostname=dict(type='str', required=True, aliases=['host', 'ip']),
            username=dict(type='str', default='admin', aliases=['user']),
            password=dict(type='str', default='password', no_log=True),
            content=dict(type='str'),
            path=dict(type='path', aliases=['config_file', 'src']),
            protocol=dict(type='str', default='https', choices=['http', 'https']),
            timeout=dict(type='int', default=60),
            validate_certs=dict(type='bool', default=True),
        ),
        supports_check_mode=True,
        mutually_exclusive=[['content', 'path']],
    )

    if not HAS_LXML_ETREE:
        module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR)

    if not HAS_XMLJSON_COBRA:
        module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR)

    hostname = module.params['hostname']
    username = module.params['username']
    password = module.params['password']

    content = module.params['content']
    path = module.params['path']

    protocol = module.params['protocol']
    timeout = module.params['timeout']

    result = dict(
        failed=False,
        changed=False,
    )

    # Report missing file
    file_exists = False
    if path:
        if os.path.isfile(path):
            file_exists = True
        else:
            module.fail_json(msg='Cannot find/access path:\n%s' % path)

    start = datetime.datetime.utcnow()

    # Perform login first
    url = '%s://%s/nuova' % (protocol, hostname)
    data = '<aaaLogin inName="%s" inPassword="******"/>' % (username, password)
    resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout)
    if resp is None or auth['status'] != 200:
        result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
        module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result)
    result.update(imc_response(module, resp.read()))

    # Store cookie for future requests
    try:
        cookie = result['aaaLogin']['attributes']['outCookie']
    except Exception:
        module.fail_json(msg='Could not find cookie in output', **result)

    # If we would not log out properly, we run out of sessions quickly
    atexit.register(logout, module, url, cookie, timeout)

    # Prepare request data
    if content:
        rawdata = content
    elif file_exists:
        with open(path, 'r') as config_object:
            rawdata = config_object.read()

    # Wrap the XML documents in a <root> element
    xmldata = lxml.etree.fromstring('<root>%s</root>' % rawdata.replace('\n', ''))

    # Handle each XML document separately in the same session
    for xmldoc in list(xmldata):
        if xmldoc.tag is lxml.etree.Comment:
            continue
        # Add cookie to XML
        xmldoc.set('cookie', cookie)
        data = lxml.etree.tostring(xmldoc)

        # Perform actual request
        resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout)
        if resp is None or info['status'] != 200:
            result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
            module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result)

        # Merge results with previous results
        rawoutput = resp.read()
        result = merge(result, imc_response(module, rawoutput, rawinput=data))
        result['response'] = info['msg']
        result['status'] = info['status']

        # Check for any changes
        # NOTE: Unfortunately IMC API always report status as 'modified'
        xmloutput = lxml.etree.fromstring(rawoutput)
        results = xmloutput.xpath('/configConfMo/outConfig/*/@status')
        result['changed'] = ('modified' in results)

    # Report success
    result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
    module.exit_json(**result)
Example #36
0
        self.clusters = clusters
        sysfs_utils.setUserSpace(clusters=clusters)
        self.avail_freqs = {x: list() for x in clusters}
        self.sel_cluster_freq = {x: 0 for x in clusters}
        for cluster in clusters:
            self.avail_freqs[cluster] = sysfs_utils.getAvailFreqs(cluster)

    def tick(self):
        for cluster in self.clusters:
            self.sel_cluster_freq[cluster] = random.randint(0, \
                 len(self.avail_freqs[cluster])-1 )
            sysfs_utils.setClusterFreq(cluster, \
                 self.avail_freqs[cluster][self.sel_cluster_freq[cluster]])


if __name__ == "__main__":
    random.seed()
    clusters = [0, 4]
    if len(sys.argv) > 1:
        try:
            clusters = [int(x) for x in sys.argv[1].split(",")]
        except:
            usage()
    print("Starting random frequency governor.")
    atexit.register(sysfs_utils.unsetUserSpace, clusters=clusters)
    gov = RandomGovernor(clusters=clusters)
    while True:
        last_time = time.time()
        gov.tick()
        time.sleep(random.randint(0, REFRESH_PERIOD))
Example #37
0

def print_total_timer():
    """
    Print the content of the TotalTimer, if it's not empty. This function will automatically get
    called when program exits.
    """
    if len(_TOTAL_TIMER_DATA) == 0:
        return
    for k, v in six.iteritems(_TOTAL_TIMER_DATA):
        logger.info(
            "Total Time: {} -> {:.2f} sec, {} times, {:.3g} sec/time".format(
                k, v.sum, v.count, v.average))


atexit.register(print_total_timer)


class IterSpeedCounter(object):
    """ Test how often some code gets reached.

    Example:
        Print the speed of the iteration every 100 times.

        .. code-block:: python

            speed = IterSpeedCounter(100)
            for k in range(1000):
                # do something
                speed()
    """
Example #38
0
wpath = "/var/www/sensor"
statepath = "/var/www/sensor/state"

#Create the pyinotify variables, only watch for creates
wm = pyinotify.WatchManager()
mask = pyinotify.IN_CREATE
logpath = "/var/log/sensord.log"

#Remove the stop file incase it didn't get cleaned up properly (incorrect shutdown perhaps)
if (os.path.isfile("/var/www/sensor/stop")):
    writeLog("Stop file removed")
    os.remove("/var/www/sensor/stop")

#Registers the cleanExit method as the last thing to run when being terminated. This allows the stop command from stop-start-daemon to cleanly
#shut down the sensorw service
atexit.register(cleanExit)

#Adds a watcher to pyinotify
notifier = pyinotify.Notifier(wm, PTmp())
wdd = wm.add_watch(wpath, mask, rec=True)

#Add a gpio event on the IR sensor, when it is a falling signal, it will run my_callback.
#bouncetime will reduce flapping of the sensor
GPIO.add_event_detect(IR_SENSOR,
                      GPIO.FALLING,
                      callback=my_callback,
                      bouncetime=600)
writeState("ready")
writeLog("Listening for events")
#Main loop to watch for file events. The IR sensor is handled by the callback delegate
thread1 = httpThread(1, "Thread-1", 1)
Example #39
0
#
# This cleanup routine prevents the __del__ method that cleans up the tree of
# the last TestPipEnvironment from firing after shutil has already been
# unloaded.  It also ensures that FastTestPipEnvironment doesn't leave an
# environment hanging around that might confuse the next test run.
#
def _cleanup():
    global env
    del env
    rmtree(download_cache, ignore_errors=True)
    rmtree(fast_test_env_root, ignore_errors=True)
    rmtree(fast_test_env_backup, ignore_errors=True)


atexit.register(_cleanup)


class TestPipResult(object):
    def __init__(self, impl, verbose=False):
        self._impl = impl

        if verbose:
            print(self.stdout)
            if self.stderr:
                print('======= stderr ========')
                print(self.stderr)
                print('=======================')

    def __getattr__(self, attr):
        return getattr(self._impl, attr)
Example #40
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     atexit.register(install_styles)
Example #41
0
        server.terminate()
        print("Server failed to launch, see tests/logs/live_mock_server.log")
        try:
            print("=" * 40)
            with open("tests/logs/live_mock_server.log") as f:
                for l in f.readlines():
                    print(l.strip())
            print("=" * 40)
        except Exception as e:
            print("EXCEPTION:", e)
        raise ValueError("Failed to start server!  Exit code %s" % server.returncode)
    return server


start_mock_server()
atexit.register(test_cleanup)


@pytest.fixture
def test_name(request):
    # change "test[1]" to "test__1__"
    name = urllib.parse.quote(request.node.name.replace("[", "__").replace("]", "__"))
    return name


@pytest.fixture
def test_dir(test_name):
    orig_dir = os.getcwd()
    root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
    test_dir = os.path.join(root, "tests", "logs", test_name)
    if os.path.exists(test_dir):
Example #42
0
            conf.session = session_name
            session={"conf":conf}
            
    else:
        session={"conf": conf}

    __builtin__.__dict__["scapy_session"] = session


    if READLINE:
        if conf.histfile:
            try:
                readline.read_history_file(conf.histfile)
            except IOError:
                pass
        atexit.register(scapy_write_history_file,readline)
    
    atexit.register(scapy_delete_temp_files)
    
    IPYTHON=False
    if conf.interactive_shell.lower() == "ipython":
        try:
            import IPython
            IPYTHON=True
        except ImportError, e:
            log_loading.warning("IPython not available. Using standard Python shell instead.")
            IPYTHON=False
        
    if IPYTHON:
        banner = the_banner % (conf.version) + " using IPython %s" % IPython.__version__
        args = ['']  # IPython command line args (will be seen as sys.argv)
    def setUp(self):
        TestBase.setUp(self)

    @skipUnlessDarwin
    @swiftTest
    @add_test_categories(["swiftpr"])
    def testSwiftRemoteASTImport(self):
        """This tests that RemoteAST querying the dynamic type of a variable
        doesn't import any modules into a module SwiftASTContext that
        weren't imported by that module in the source code.

        """
        self.build()
        # The Makefile doesn't build a .dSYM, so we need to help with
        # finding the .swiftmodules.
        os.chdir(self.getBuildDir())
        lldbutil.run_to_source_breakpoint(self, "break here",
                                          lldb.SBFileSpec('Library.swift'))
        # FIXME: Reversing the order of these two commands does not work!
        self.expect("expr -d no-dynamic-values -- input",
                    substrs=['(Library.LibraryProtocol) $R0'])
        self.expect("expr -d run-target -- input",
                    substrs=['(a.FromMainModule) $R2'])


if __name__ == '__main__':
    import atexit
    lldb.SBDebugger.Initialize()
    atexit.register(lldb.SBDebugger.Terminate)
    unittest2.main()
Example #44
0
import sys_platform
from config import config
import web_control
import module_init
import update
import update_from_github
import download_modules


def exit_handler():
    xlog.info('Stopping all modules before exit!')
    module_init.stop_all()
    web_control.stop()


atexit.register(exit_handler)


def main():
    # change path to launcher
    global __file__
    __file__ = os.path.abspath(__file__)
    if os.path.islink(__file__):
        __file__ = getattr(os, 'readlink', lambda x: x)(__file__)
    os.chdir(os.path.dirname(os.path.abspath(__file__)))

    if sys.platform == "win32" and config.show_compat_suggest:
        import win_compat_suggest
        win_compat_suggest.main()

    current_version = update_from_github.current_version()
Example #45
0
            AnacondaThread(name=constants.THREAD_STORAGE,
                           target=reset_storage))

    # Initialize the system clock.
    startup_utils.initialize_system_clock()

    if flags.rescue_mode:
        rescue.start_rescue_mode_ui(anaconda)
    else:
        startup_utils.clean_pstore()

    # add our own additional signal handlers
    signal.signal(signal.SIGUSR1,
                  lambda signum, frame: exception.test_exception_handling())
    signal.signal(signal.SIGUSR2, lambda signum, frame: anaconda.dumpState())
    atexit.register(exitHandler, ksdata.reboot)

    from pyanaconda import exception
    anaconda.mehConfig = exception.initExceptionHandling(anaconda)

    # Start the subscription handling thread if the Subscription DBus module
    # provides enough authentication data.
    # - as kickstart only supports org + key authentication & nothing
    #   else currently talks to the Subscription DBus module,
    #   we only check if organization id & at least one activation
    #   key are available
    from pyanaconda.modules.common.util import is_module_available
    from pyanaconda.modules.common.constants.services import SUBSCRIPTION
    if is_module_available(SUBSCRIPTION):
        from pyanaconda.ui.lib.subscription import org_keys_sufficient, register_and_subscribe
        if org_keys_sufficient():
Example #46
0
    )
    print("Previous behaviour is restored on process exit")
    print("This tool does what it does by appending lines to /etc/hosts")
    exit(1)


def reset_etc_hosts_file():
    shutil.copy2("hosts", "/etc/hosts")
    print("Reset /etc/hosts to original")
    os.remove("hosts")
    print(
        "Deleted the backup copy of /etc/hosts, which has now been reset to its previous behaviour"
    )


atexit.register(reset_etc_hosts_file)


def copy_etc_hosts_file():
    shutil.copy2("/etc/hosts", "hosts")
    print("Saved current state of /etc/hosts")


copy_etc_hosts_file()


def append_localhost_redirect_to_etc_hosts_file(hostname):
    line_to_add = "127.0.0.1\t" + hostname + "\n"
    with open("/etc/hosts", "a") as hosts_file:
        hosts_file.write(line_to_add)
Example #47
0
    def run_from_argv(self, prog_name, argv=None, **_kwargs):
        env_options = filter(lambda x: x.startswith(self.ENV_VAR_PREFIX),
                             os.environ)
        for env_var_name in env_options:
            name = env_var_name.replace(self.ENV_VAR_PREFIX, '', 1).lower()
            value = os.environ[env_var_name]
            option = options._options[name]
            if option.multiple:
                value = map(option.type, value.split(','))
            else:
                value = option.type(value)
            setattr(options, name, value)

        argv = list(filter(self.is_flower_option, argv))
        # parse the command line to get --conf option
        parse_command_line([prog_name] + argv)
        try:
            parse_config_file(options.conf, final=False)
            parse_command_line([prog_name] + argv)
        except IOError:
            if options.conf != DEFAULT_CONFIG_FILE:
                raise

        settings['debug'] = options.debug
        if options.cookie_secret:
            settings['cookie_secret'] = options.cookie_secret

        if options.url_prefix:
            logger.error('url_prefix option is not supported anymore')

        if options.debug and options.logging == 'info':
            options.logging = 'debug'
            enable_pretty_logging()
        else:
            logging.getLogger("tornado.access").addHandler(NullHandler())
            logging.getLogger("tornado.access").propagate = False

        if options.auth:
            settings['oauth'] = {
                'key':
                options.oauth2_key or os.environ.get('FLOWER_OAUTH2_KEY'),
                'secret':
                options.oauth2_secret
                or os.environ.get('FLOWER_OAUTH2_SECRET'),
                'redirect_uri':
                options.oauth2_redirect_uri
                or os.environ.get('FLOWER_AUTH2_REDIRECT_URI'),
            }

        if options.certfile and options.keyfile:
            settings['ssl_options'] = dict(certfile=abs_path(options.certfile),
                                           keyfile=abs_path(options.keyfile))
            if options.ca_certs:
                settings['ssl_options']['ca_certs'] = abs_path(
                    options.ca_certs)

        # Monkey-patch to support Celery 2.5.5
        self.app.connection = self.app.broker_connection

        self.app.loader.import_default_modules()
        flower = Flower(capp=self.app, options=options, **settings)
        atexit.register(flower.stop)

        def sigterm_handler(signal, frame):
            logger.info('SIGTERM detected, shutting down')
            sys.exit(0)

        signal.signal(signal.SIGTERM, sigterm_handler)

        self.print_banner('ssl_options' in settings)

        try:
            flower.start()
        except (KeyboardInterrupt, SystemExit):
            pass
Example #48
0
 def mktemp(self):
     if self.tmp:
         return
     self.tmp = tempfile.mkdtemp(prefix='emcflt-', suffix='.d')
     atexit.register(lambda: shutil.rmtree(self.tmp))
Example #49
0
def daemonize(pidfile, *, stdin='/dev/null',
              stdout='/dev/null',
              stderr='/dev/null'):
    """The code below is adapted by:
    https://github.com/dabeaz/python-cookbook/blob/master/
    src/12/launching_a_daemon_process_on_unix/daemon.py
    It uses Unix double-fork magic based on Stevens's book
    "Advanced Programming in the UNIX Environment".
    Creates a daemon that is diassociated with the terminal
    and has no root privileges. Once double-forking is successful
    it writes its pid to a designated pidfile. The pidfile
    is later used to kill the daemon.
    """

    # If pidfile exists, there is a server program that is currently running
    if os.path.exists(pidfile):
        raise RuntimeError('Already running')

    # First fork (detaches from parent)
    try:
        if os.fork() > 0:
            # Parent exit
            raise SystemExit(0)
    except OSError as e:
        raise RuntimeError(f'fork #1 failed: {e}')

    # Decouple from parent environment
    os.chdir('/tmp')
    os.umask(0)
    os.setsid()
    dropPrivileges()

    logger.info("fork#1 successfull")
    # Second fork (relinquish session leadership)
    try:
        if os.fork() > 0:
            raise SystemExit(0)
    except OSError as e:
        raise RuntimeError(f'fork #2 failed: {e}')

    # Flush I/O buffers
    sys.stdout.flush()
    sys.stderr.flush()

    # Replace file descriptors for stdin, stdout, and stderr
    with open(stdin, 'rb', 0) as f:
        os.dup2(f.fileno(), sys.stdin.fileno())
    with open(stdout, 'ab', 0) as f:
        os.dup2(f.fileno(), sys.stdout.fileno())
    with open(stderr, 'ab', 0) as f:
        os.dup2(f.fileno(), sys.stderr.fileno())

    # PID of the double-forked daemon
    fork2DaemonPID = os.getpid()

    # Write the PID file
    with open(pidfile, 'w') as f:
        print(fork2DaemonPID, file=f)

    logger.info(f"fork#2 successful pid[{fork2DaemonPID}]")

    # Arrange to have the PID file removed on exit/signal
    atexit.register(lambda: os.remove(pidfile))
    atexit.register(lambda: removePidProcess())

    # Signal handler for termination (required)
    def sigterm_handler(signo, frame):
        raise SystemExit(1)

    signal.signal(signal.SIGTERM, sigterm_handler)
Example #50
0
def hdf5_cache(filepath=None, parent=None, group=None, names=None, typed=False,
               hashed_key=False, **h5dcreate_kwargs):
    """HDF5 cache decorator.

    Parameters
    ----------
    filepath : string, optional
        Path to HDF5 file. If None a temporary file name will be used.
    parent : string, optional
        Path to group within HDF5 file to use as parent. If None the root
        group will be used.
    group : string, optional
        Path to group within HDF5 file, relative to parent, to use as
        container for cached data. If None the name of the wrapped function
        will be used.
    names : sequence of strings, optional
        Name(s) of dataset(s). If None, default names will be 'f00', 'f01',
        etc.
    typed : bool, optional
        If True, arguments of different types will be cached separately.
        For example, f(3.0) and f(3) will be treated as distinct calls with
        distinct results.
    hashed_key : bool, optional
        If False (default) the key will not be hashed, which makes for
        readable cache group names. If True the key will be hashed, however
        note that on Python >= 3.3 the hash value will not be the same between
        sessions unless the environment variable PYTHONHASHSEED has been set
        to the same value.

    Returns
    -------
    decorator : function

    Examples
    --------

    Without any arguments, will cache using a temporary HDF5 file::

        >>> import allel
        >>> @allel.util.hdf5_cache()
        ... def foo(n):
        ...     print('executing foo')
        ...     return np.arange(n)
        ...
        >>> foo(3)
        executing foo
        array([0, 1, 2])
        >>> foo(3)
        array([0, 1, 2])
        >>> foo.cache_filepath # doctest: +SKIP
        '/tmp/tmp_jwtwgjz'

    Supports multiple return values, including scalars, e.g.::

        >>> @allel.util.hdf5_cache()
        ... def bar(n):
        ...     print('executing bar')
        ...     a = np.arange(n)
        ...     return a, a**2, n**2
        ...
        >>> bar(3)
        executing bar
        (array([0, 1, 2]), array([0, 1, 4]), 9)
        >>> bar(3)
        (array([0, 1, 2]), array([0, 1, 4]), 9)

    Names can also be specified for the datasets, e.g.::

        >>> @allel.util.hdf5_cache(names=['z', 'x', 'y'])
        ... def baz(n):
        ...     print('executing baz')
        ...     a = np.arange(n)
        ...     return a, a**2, n**2
        ...
        >>> baz(3)
        executing baz
        (array([0, 1, 2]), array([0, 1, 4]), 9)
        >>> baz(3)
        (array([0, 1, 2]), array([0, 1, 4]), 9)

    """

    # initialise HDF5 file path
    if filepath is None:
        import tempfile
        filepath = tempfile.mktemp(prefix='scikit_allel_', suffix='.h5')
        atexit.register(os.remove, filepath)

    # initialise defaults for dataset creation
    h5dcreate_kwargs.setdefault('chunks', True)

    def decorator(user_function):

        # setup the name for the cache container group
        if group is None:
            container = user_function.__name__
        else:
            container = group

        def wrapper(*args, **kwargs):

            # load from cache or not
            no_cache = kwargs.pop('no_cache', False)

            # compute a key from the function arguments
            key = _make_key(args, kwargs, typed)
            if hashed_key:
                key = str(hash(key))
            else:
                key = str(key).replace('/', '__slash__')

            return _hdf5_cache_act(filepath, parent, container, key, names,
                                   no_cache, user_function, args, kwargs,
                                   h5dcreate_kwargs)

        wrapper.cache_filepath = filepath
        return update_wrapper(wrapper, user_function)

    return decorator

def start_events_device(log_dir, timeout=5):
    EVENTS_PROCESSES['MainDevice'] = EventsDevice()
    EVENTS_PROCESSES['MainDevice'].start(log_dir)
    EVENTS_PROCESSES['MainDevice'].ready_event.wait(timeout=timeout)

    EVENTS_PROCESSES['EVENTS_FILE_LOOGER'] = EventsFileLogger()
    EVENTS_PROCESSES['EVENTS_GRAFANA_ANNOTATOR'] = GrafanaAnnotator()

    EVENTS_PROCESSES['EVENTS_FILE_LOOGER'].start(log_dir)
    EVENTS_PROCESSES['EVENTS_GRAFANA_ANNOTATOR'].start()

    # default filters
    EVENTS_PROCESSES['default_filter'] = []
    EVENTS_PROCESSES['default_filter'] += [DbEventsFilter(type='BACKTRACE', line='Rate-limit: supressed')]
    EVENTS_PROCESSES['default_filter'] += [DbEventsFilter(type='BACKTRACE', line='Rate-limit: suppressed')]


def stop_events_device():
    processes = ['EVENTS_FILE_LOOGER', 'EVENTS_GRAFANA_ANNOTATOR', 'MainDevice']
    for proc_name in processes:
        if proc_name in EVENTS_PROCESSES:
            EVENTS_PROCESSES[proc_name].terminate()
    for proc_name in processes:
        if proc_name in EVENTS_PROCESSES:
            EVENTS_PROCESSES[proc_name].join(timeout=60)


atexit.register(stop_events_device)
def setup_automation_execution(pid_file):
    """Setup procedures for transfer.py."""
    atexit.register(manage_automation_execution, pid_file)
Example #53
0
from sklearn.base import BaseEstimator
import tensorflow
from tensorflow.python.client.timeline import Timeline
from tensorflow.python.client import device_lib

available_gpus = len([
    x.name for x in device_lib.list_local_devices() if x.device_type == 'GPU'
])

if available_gpus:
    from keras.layers import CuDNNLSTM, CuDNNGRU

logger = logging.getLogger(__name__)

# prevents random gc exception at exit
atexit.register(keras.backend.clear_session)


class Base(BaseEstimator):
    def __init__(
        self,
        model=None,
        embed_size=10,
        sequence_embedding='flatten',
        sequence_embed_size=None,
        hidden_width=1024,
        hidden_layers=4,
        layer_shrink=0.5,
        dropout=0,
        batch_size=32,
        learning_rate=0.001,
Example #54
0
 def go(self):
     """
     Launch the program main loop.
     """
     atexit.register(self.unbind_hotkeys)
     gtk.main()
Example #55
0
    """
    Any additional configuration (register callback) for airflow.utils.action_loggers
    module
    :return: None
    """
    pass


try:
    from airflow_local_settings import *
    log.info("Loaded airflow_local_settings.")
except:
    pass

configure_logging()
configure_vars()
configure_adapters()
# The webservers import this file from models.py with the default settings.
configure_orm()
configure_action_logging()

# Ensure we close DB connections at scheduler and gunicon worker terminations
atexit.register(dispose_orm)

# Const stuff

KILOBYTE = 1024
MEGABYTE = KILOBYTE * KILOBYTE
WEB_COLORS = {'LIGHTBLUE': '#4d9de0',
              'LIGHTORANGE': '#FF9933'}
Example #56
0
def run_build(session, engine):

    ot.print_center("============ Beginning build ============")

    # Take in the 'run' argument from the command line
    parser = argparse.ArgumentParser(
        description="Resuspend a plate of DNA on an Opentrons OT-1 robot.")
    parser.add_argument(
        '-r',
        '--run',
        required=False,
        action="store_true",
        help="Send commands to the robot and print command output.")
    args = parser.parse_args()

    # Verify that the correct robot is being used
    if args.run:
        ot.check_robot()

    # Choose which build plan to build
    build_options = []
    builds = [
        build
        for build in session.query(Build).filter(Build.status == 'planning')
    ]
    if len(builds) == 0:
        sys.exit(
            'No plans available, run `create_build_plan.py` to generate them')

    for num, build in enumerate([
            build for build in session.query(Build).filter(
                Build.status == 'planning')
    ]):
        print('{} - {}'.format(num, build.build_name))
        build_options.append(build)
    ans = ot.request_info("Enter desired plan number: ", type='int')
    target_build = build_options[ans]

    # Use that build name to create a dataframe with the information from the plan
    query = "SELECT parts.part_id,builds.build_name,part_wells.address as destination,fragments.fragment_name,frag_plates.plate_name,frag_plates.plate_id,frag_wells.address as source,frag_wells.volume FROM parts \
            INNER JOIN wells AS part_wells ON parts.id = part_wells.part_id\
            INNER JOIN plates AS part_plates ON part_wells.plate_id = part_plates.id\
            INNER JOIN builds ON part_plates.build_id = builds.id\
            INNER JOIN part_frag ON parts.id = part_frag.part_id\
            INNER JOIN fragments ON part_frag.fragment_id = fragments.id\
            INNER JOIN wells AS frag_wells ON fragments.id = frag_wells.fragment_id\
            INNER JOIN plates AS frag_plates ON frag_wells.plate_id = frag_plates.id\
            WHERE builds.build_name = '{}'".format(target_build.build_name)

    build_plan = pd.read_sql_query(query, con=engine)
    print(build_plan)
    frags = build_plan.groupby('part_id').agg(len)
    if len(frags) == len(
        [frag for frag in frags.fragment_name.tolist() if frag == 2]):
        print('Build only contains 2 fragment assemblies')
        num_frags = 2
        rxn_vol = 0.6
    else:
        print('Using MM for single fragment')
        rxn_vol = 0.8
        num_frags = 1

    unique_plates = build_plan.plate_id.unique().tolist()

    # Give each row a rank based on the order of the plates to sort on later
    plate_dict = dict([[y, x] for x, y in enumerate(unique_plates)])
    build_plan['plate_rank'] = build_plan.plate_id.apply(
        lambda x: plate_dict[x])
    build_plan = build_plan.sort_values('plate_rank')

    # Currently available spots on the OT-one deck
    SOURCE_SLOTS = ['D2', 'D3', 'B2']

    ## Generate a list of unique plates that are needed
    plate_index = [(y, x) for x, y in enumerate(unique_plates)]
    plate_index = dict(plate_index)

    ## Group the plates so that they can be swapped in batches
    ot.print_center("...Grouping plates...")
    group_plates = [
        unique_plates[n:n + len(SOURCE_SLOTS)]
        for n in range(0, len(unique_plates), len(SOURCE_SLOTS))
    ]
    for num, group in enumerate(group_plates):
        print("Group{}: {}".format(num + 1, group))

    ot.print_center("...Checking if plates need to be resuspended...")

    query_resuspend = "SELECT plates.plate_id,plates.resuspended FROM plates\
                        WHERE plates.resuspended = 'not_resuspended'\
                            AND plates.plate_id IN ({})".format(
        ot.list_to_string(unique_plates))

    resuspended = pd.read_sql_query(query_resuspend, con=engine)
    if len(resuspended) == 0:
        print('All plates are resuspended')
    else:
        for i, plate in resuspended.iterrows():
            ans = ot.request_info(
                'Plate {} is not resuspended, would you like to resuspend it? y/n: '
                .format(plate['plate_id']),
                type='string')
            if ans == 'y':
                resuspend.resuspension(
                    session, engine,
                    session.query(Plate).filter(
                        Plate.plate_id == plate['plate_id']).one())

    query = "SELECT parts.part_id,builds.build_name,part_wells.address as destination,fragments.fragment_name,frag_plates.plate_name,frag_plates.plate_id,frag_wells.address as source,frag_wells.volume FROM parts \
            INNER JOIN wells AS part_wells ON parts.id = part_wells.part_id\
            INNER JOIN plates AS part_plates ON part_wells.plate_id = part_plates.id\
            INNER JOIN builds ON part_plates.build_id = builds.id\
            INNER JOIN part_frag ON parts.id = part_frag.part_id\
            INNER JOIN fragments ON part_frag.fragment_id = fragments.id\
            INNER JOIN wells AS frag_wells ON fragments.id = frag_wells.fragment_id\
            INNER JOIN plates AS frag_plates ON frag_wells.plate_id = frag_plates.id\
            WHERE builds.build_name = '{}'".format(target_build.build_name)

    build_plan = pd.read_sql_query(query, con=engine)
    build_plan['plate_rank'] = build_plan.plate_id.apply(
        lambda x: plate_dict[x])
    build_plan = build_plan.sort_values('plate_rank')

    input("Press enter to continue")

    ## =============================================
    ## SETUP THE OT-1 DECK
    ## =============================================

    # Specify the locations of each object on the deck
    locations = {
        "tiprack-200": "A3",
        "tiprack-10": "E1",
        "tiprack-10s1": "E3",
        "tiprack-10s2": "E2",
        "trash": "D1",
        "PCR-strip-tall": "C3",
        "DEST_PLATE": "C2",
        "Tube_rack": "B1"
    }
    # Sets the first group of plates
    used_plates = []
    plate_counter = 0
    current_group = group_plates[plate_counter]
    source_plates = ot.change_plates(locations, current_group, SOURCE_SLOTS)

    ## =============================================
    ## SETUP THE MASTER MIX
    ## =============================================

    vol = int(
        ot.request_info('Enter desired reaction volume (i.e. 5,10,20): ',
                        type='int'))

    # Set the proportion of master mix to fragment to 4:1
    master_volume = rxn_vol * vol
    frag_vol = 0.2 * vol

    ot.print_center('...Calculating master mix volumes...')

    # Set a multiplier to account for pipetting error and evaporation
    extra_master = 1.3

    unique_frag = build_plan[['part_id', 'fragment_name',
                              'destination']].drop_duplicates()

    frag_df = unique_frag.groupby('destination').agg(len).part_id
    frag_df = frag_df.reset_index()
    frag_df = frag_df.rename(columns={'part_id': 'frag_num'})
    frag_dict = dict(
        zip(frag_df.destination.tolist(), frag_df.frag_num.tolist()))

    build_plan['frag_num'] = build_plan.destination.apply(
        lambda x: frag_dict[x])

    unique_df = build_plan[['part_id', 'destination',
                            'frag_num']].drop_duplicates()

    total_rxns = unique_df.frag_num.sum()

    need_extra = unique_df[unique_df.frag_num > 1]

    num_wells = len(build_plan.part_id.unique().tolist())
    num_rows = num_wells // 8
    master_reactions = math.ceil((total_rxns) * extra_master)
    print("Total rxns: {}".format(total_rxns, master_reactions))

    # Generate the dataframe to present the master mix composition
    master_mix = ot.make_gg_rxns(master_reactions, master_volume)
    print("Use the table below to create the master mix")
    print()
    print(master_mix)
    print()
    print("Place the master mix in the 'A1' position of the tube rack")
    print("Also place a tube of with 1.2 mL of water in the 'B1' position ")
    input("Press enter to continue")

    ## =============================================
    ## INITIALIZE THE OT-1
    ## =============================================
    # Determine whether to simulate or run the protocol

    if args.run:
        port = os.environ["ROBOT_DEV"]
        print("Connecting robot to port {}".format(port))
        robot.connect(port)
    else:
        print("Simulating protcol run")
        robot.connect()

    # Declare components on the deck
    p200_tipracks = [
        containers.load('tiprack-200ul', locations["tiprack-200"]),
    ]
    p10_tipracks = [
        containers.load('tiprack-10ul', locations["tiprack-10"]),
    ]
    p10s_tipracks = [
        containers.load('tiprack-10ul', locations["tiprack-10s1"]),
        containers.load('tiprack-10ul', locations["tiprack-10s2"])
    ]
    trash = containers.load('point', locations["trash"],
                            'holywastedplasticbatman')
    centrifuge_tube = containers.load('tube-rack-2ml', locations["Tube_rack"])
    master = containers.load('PCR-strip-tall', locations["PCR-strip-tall"])
    dest_plate = containers.load('96-PCR-tall', locations["DEST_PLATE"])

    p10, p10s, p200 = ot.initialize_pipettes(p10_tipracks, p10s_tipracks,
                                             p200_tipracks, trash)

    # Update database status
    def exit_handler():
        print('Choose one of the following options:')
        print('1-Save successful assembly\n2-Restore plan\n3-Abandon plan')
        ans = ot.request_info('Select what to do: ',
                              type='int',
                              select_from=[1, 2, 3])
        if ans == 1:
            ot.print_center('...Assembly is complete...')
        elif ans == 2:
            target_build.status = 'planning'
            ot.print_center('...Restoring the build plan...')
            for part in session.query(Part).filter(
                    Part.part_id.in_(build_plan.part_id.unique().tolist())):
                part.change_status('planning')
        elif ans == 3:
            target_build.status = 'abandoned'
            ot.print_center('...Unstaging all parts in build plan...')
            for part in session.query(Part).filter(
                    Part.part_id.in_(build_plan.part_id.unique().tolist())):
                part.change_status('received')
        session.commit()

    target_build.status = 'building'
    session.commit()

    atexit.register(exit_handler)

    ## =============================================
    ## OT-1 PROTOCOL
    ## =============================================

    # Start timer
    start = datetime.now()
    print("Starting run at: ", start)

    # Home the robot to start
    robot.home()

    # Aliquot the master mix into the PCR tube strip
    vol_per_tube = round((num_rows * master_volume * extra_master), 2)
    print("Aliquoting MM into PCR tubes")
    print("{}ul into each tube".format(vol_per_tube))
    p200.pick_up_tip()
    for well in range(8):
        print("Transferring {}ul to well {}".format(vol_per_tube, well))
        p200.transfer(vol_per_tube,
                      centrifuge_tube['A1'].bottom(),
                      master.wells(well).bottom(),
                      mix_before=(3, 50),
                      new_tip='never')
    p200.drop_tip()

    # Aliquot the master mix into all of the desired wells
    p10.pick_up_tip()
    for row in range(num_rows):
        print("Transferring {}ul of master mix to row {}".format(
            master_volume,
            int(row) + 1))
        p10.transfer(master_volume,
                     master['A1'].bottom(),
                     dest_plate.rows(row).bottom(),
                     mix_before=(1, 8),
                     blow_out=True,
                     new_tip='never')
    p10.drop_tip()

    # Aliquot master mix into the last row if not a complete row
    if num_wells % 8 > 0:
        p10s.pick_up_tip()
        print("need single channel for {}".format(num_wells % 8))
        for missing in range(num_wells % 8):
            current_well = (8 * num_rows) + (missing)
            print("Transferring {}ul of extra MM to {}".format(
                master_volume, current_well))
            p10s.transfer(master_volume,
                          centrifuge_tube['A1'].bottom(),
                          dest_plate.wells(current_well).bottom(),
                          blow_out=True,
                          mix_before=(1, 8),
                          new_tip='never')
        p10s.drop_tip()

    # Aliquot extra master mix into wells with multiple fragments
    if len(need_extra) != 0:
        p10s.pick_up_tip()
        for i, transfer in need_extra.iterrows():
            extra_volume = (int(transfer['frag_num']) - 1) * master_volume
            current_well = transfer['destination']
            print("Transferring {}ul of extra MM to {}".format(
                extra_volume, current_well))
            p10s.transfer(extra_volume,
                          centrifuge_tube['A1'].bottom(),
                          dest_plate.wells(current_well).bottom(),
                          blow_out=True,
                          mix_before=(1, 8),
                          new_tip='never')
        p10s.drop_tip()
    else:
        print('No extra MM must be aliquoted')

## Add the fragments from the source plates to the destination plate
## ============================================

# Sets the volume of water to dilute with, if needed
    dil_vol = 5

    build_plan = build_plan.sort_values('plate_rank')
    for i, row in build_plan.iterrows():
        start_well = row['source']
        dest_well = row['destination']
        gene = row['part_id']
        plate = row['plate_id']
        volume = row['volume']

        if plate not in current_group:
            plate_counter += 1
            current_group = group_plates[plate_counter]
            source_plates = ot.change_plates(locations, current_group,
                                             SOURCE_SLOTS)

        p10s.pick_up_tip()

        # Only dilutes wells that have low starting volume
        if volume < 30:
            print("Diluting sample in plate {} well {} with {}uL of water".
                  format(plate, start_well, dil_vol))
            p10s.transfer(dil_vol,
                          centrifuge_tube['B1'].bottom(),
                          source_plates[plate].wells(start_well).bottom(),
                          new_tip='never')

        print("Transferring {} of {} from plate {} well {} to well {}".format(
            frag_vol, gene, plate, start_well, dest_well))
        p10s.mix(3, 8, source_plates[plate].wells(start_well).bottom())

        # Checks the calibration to make sure that it can aspirate correctly
        p10s.aspirate(frag_vol,
                      source_plates[plate].wells(start_well).bottom())
        # if plate not in used_plates:
        #     ot.change_height(p10s,source_plates[plate],source_plates[plate].wells(start_well))
        p10s.dispense(frag_vol, dest_plate.wells(dest_well).bottom())
        used_plates.append(plate)

        p10s.drop_tip()

    robot.home()

    ot.print_center('...Updating part status...')

    to_build = [well.parts for well in target_build.plates[0].wells]
    for part in to_build:
        part.change_status('building')
    session.commit()

    return
Example #57
0
from .filtering import blockmedian
from .gridding import surface
from .sampling import grdtrack
from .mathops import makecpt
from .modules import config, info, grdinfo, which
from . import datasets


# Get the version number through versioneer
__version__ = _get_versions()["version"]
__commit__ = _get_versions()["full-revisionid"]

# Start our global modern mode session
_begin()
# Tell Python to run _end when shutting down
_atexit.register(_end)


def print_clib_info():
    """
    Print information about the GMT shared library that we can find.

    Includes the GMT version, default values for parameters, the path to the
    ``libgmt`` shared library, and GMT directories.
    """
    from .clib import Session

    lines = ["Loaded libgmt:"]
    with Session() as ses:
        for key in sorted(ses.info):
            lines.append("  {}: {}".format(key, ses.info[key]))
Example #58
0
import threading
import time
import atexit

sys.modules["RPi.GPIO"] = mock.Mock()
sys.modules["RPi"] = mock.Mock()
sys.path.insert(0, "../examples/")

import motephat

_running = True


def watch():
    while _running:
        print(motephat.pixels)
        time.sleep(0.5)


def stop():
    global _running
    _running = False


_t_watch = threading.Thread(target=watch)
_t_watch.start()
atexit.register(stop)

import bilgetank

_running = False
Example #59
0
def do_run_server(error_cb,
                  opts,
                  mode,
                  xpra_file,
                  extra_args,
                  desktop_display=None,
                  progress_cb=None):
    assert mode in (
        "start",
        "start-desktop",
        "upgrade",
        "upgrade-desktop",
        "shadow",
        "proxy",
    )

    def _progress(i, msg):
        if progress_cb:
            progress_cb(i, msg)

    progress = _progress

    progress(10, "initializing environment")
    try:
        cwd = os.getcwd()
    except OSError:
        cwd = os.path.expanduser("~")
        warn("current working directory does not exist, using '%s'\n" % cwd)
    validate_encryption(opts)
    if opts.encoding == "help" or "help" in opts.encodings:
        return show_encoding_help(opts)

    #remove anything pointing to dbus from the current env
    #(so we only detect a dbus instance started by pam,
    # and override everything else)
    for k in tuple(os.environ.keys()):
        if k.startswith("DBUS_"):
            del os.environ[k]

    use_display = parse_bool("use-display", opts.use_display)
    starting = mode == "start"
    starting_desktop = mode == "start-desktop"
    upgrading = mode == "upgrade"
    upgrading_desktop = mode == "upgrade-desktop"
    shadowing = mode == "shadow"
    proxying = mode == "proxy"

    if not proxying and POSIX and not OSX:
        #we don't support wayland servers,
        #so make sure GDK will use the X11 backend:
        from xpra.os_util import saved_env
        saved_env["GDK_BACKEND"] = "x11"
        os.environ["GDK_BACKEND"] = "x11"

    has_child_arg = (opts.start_child or opts.start_child_on_connect
                     or opts.start_child_after_connect
                     or opts.start_child_on_last_client_exit)
    if proxying or upgrading or upgrading_desktop:
        #when proxying or upgrading, don't exec any plain start commands:
        opts.start = opts.start_child = []
    elif opts.exit_with_children:
        assert has_child_arg, "exit-with-children was specified but start-child* is missing!"
    elif opts.start_child:
        warn("Warning: the 'start-child' option is used,")
        warn(" but 'exit-with-children' is not enabled,")
        warn(" use 'start' instead")

    if opts.bind_rfb and (proxying or starting):
        get_util_logger().warn(
            "Warning: bind-rfb sockets cannot be used with '%s' mode" % mode)
        opts.bind_rfb = []

    if not shadowing and not starting_desktop:
        opts.rfb_upgrade = 0

    if upgrading or upgrading_desktop or shadowing:
        #there should already be one running
        #so change None ('auto') to False
        if opts.pulseaudio is None:
            opts.pulseaudio = False

    #get the display name:
    if shadowing and not extra_args:
        if WIN32 or OSX:
            #just a virtual name for the only display available:
            display_name = "Main"
        else:
            from xpra.scripts.main import guess_X11_display
            dotxpra = DotXpra(opts.socket_dir, opts.socket_dirs)
            display_name = guess_X11_display(dotxpra, desktop_display)
    elif (upgrading or upgrading_desktop) and not extra_args:
        display_name = guess_xpra_display(opts.socket_dir, opts.socket_dirs)
    else:
        if len(extra_args) > 1:
            error_cb(
                "too many extra arguments (%i): only expected a display number"
                % len(extra_args))
        if len(extra_args) == 1:
            display_name = extra_args[0]
            if not shadowing and not upgrading and not use_display:
                display_name_check(display_name)
        else:
            if proxying:
                #find a free display number:
                dotxpra = DotXpra(opts.socket_dir, opts.socket_dirs)
                all_displays = dotxpra.sockets()
                #ie: [("LIVE", ":100"), ("LIVE", ":200"), ...]
                displays = [v[1] for v in all_displays]
                display_name = None
                for x in range(1000, 20000):
                    v = ":%s" % x
                    if v not in displays:
                        display_name = v
                        break
                if not display_name:
                    error_cb(
                        "you must specify a free virtual display name to use with the proxy server"
                    )
            elif use_display:
                #only use automatic guess for xpra displays and not X11 displays:
                display_name = guess_xpra_display(opts.socket_dir,
                                                  opts.socket_dirs)
            else:
                # We will try to find one automaticaly
                # Use the temporary magic value 'S' as marker:
                display_name = 'S' + str(os.getpid())

    if not (shadowing or proxying or upgrading or upgrading_desktop) and \
    opts.exit_with_children and not has_child_arg:
        error_cb(
            "--exit-with-children specified without any children to spawn; exiting immediately"
        )

    atexit.register(run_cleanups)

    # Generate the script text now, because os.getcwd() will
    # change if/when we daemonize:
    from xpra.server.server_util import (
        xpra_runner_shell_script,
        write_runner_shell_scripts,
        find_log_dir,
        create_input_devices,
        source_env,
    )
    script = None
    if POSIX and getuid() != 0:
        script = xpra_runner_shell_script(xpra_file, cwd, opts.socket_dir)

    uid = int(opts.uid)
    gid = int(opts.gid)
    username = get_username_for_uid(uid)
    home = get_home_for_uid(uid)
    ROOT = POSIX and getuid() == 0

    protected_fds = []
    protected_env = {}
    stdout = sys.stdout
    stderr = sys.stderr
    # Daemonize:
    if POSIX and opts.daemon:
        #daemonize will chdir to "/", so try to use an absolute path:
        if opts.password_file:
            opts.password_file = tuple(
                os.path.abspath(x) for x in opts.password_file)
        from xpra.server.server_util import daemonize
        daemonize()

    displayfd = 0
    if POSIX and opts.displayfd:
        try:
            displayfd = int(opts.displayfd)
            if displayfd > 0:
                protected_fds.append(displayfd)
        except ValueError as e:
            stderr.write("Error: invalid displayfd '%s':\n" % opts.displayfd)
            stderr.write(" %s\n" % e)
            del e

    clobber = int(upgrading or upgrading_desktop) * CLOBBER_UPGRADE | int(
        use_display or 0) * CLOBBER_USE_DISPLAY
    start_vfb = not (shadowing or proxying or clobber)
    xauth_data = None
    if start_vfb:
        xauth_data = get_hex_uuid()

    # if pam is present, try to create a new session:
    pam = None
    PAM_OPEN = POSIX and envbool("XPRA_PAM_OPEN", ROOT and uid != 0)
    if PAM_OPEN:
        try:
            from xpra.server.pam import pam_session  #@UnresolvedImport
        except ImportError as e:
            stderr.write("Error: failed to import pam module\n")
            stderr.write(" %s" % e)
            del e
            PAM_OPEN = False
    if PAM_OPEN:
        fdc = FDChangeCaptureContext()
        with fdc:
            pam = pam_session(username)
            env = {
                #"XDG_SEAT"               : "seat1",
                #"XDG_VTNR"               : "0",
                "XDG_SESSION_TYPE": "x11",
                #"XDG_SESSION_CLASS"      : "user",
                "XDG_SESSION_DESKTOP": "xpra",
            }
            #maybe we should just bail out instead?
            if pam.start():
                pam.set_env(env)
                items = {}
                if display_name.startswith(":"):
                    items["XDISPLAY"] = display_name
                if xauth_data:
                    items["XAUTHDATA"] = xauth_data
                pam.set_items(items)
                if pam.open():
                    #we can't close it, because we're not going to be root any more,
                    #but since we're the process leader for the session,
                    #terminating will also close the session
                    #add_cleanup(pam.close)
                    protected_env = pam.get_envlist()
                    os.environ.update(protected_env)
        #closing the pam fd causes the session to be closed,
        #and we don't want that!
        protected_fds += fdc.get_new_fds()

    #get XDG_RUNTIME_DIR from env options,
    #which may not be have updated os.environ yet when running as root with "--uid="
    xrd = os.path.abspath(parse_env(opts.env).get("XDG_RUNTIME_DIR", ""))
    if ROOT and (uid > 0 or gid > 0):
        #we're going to chown the directory if we create it,
        #ensure this cannot be abused, only use "safe" paths:
        if not any(x for x in ("/run/user/%i" % uid, "/tmp", "/var/tmp")
                   if xrd.startswith(x)):
            xrd = ""
        #these paths could cause problems if we were to create and chown them:
        if xrd.startswith("/tmp/.X11-unix") or xrd.startswith(
                "/tmp/.XIM-unix"):
            xrd = ""
    if not xrd:
        xrd = os.environ.get("XDG_RUNTIME_DIR")
    xrd = create_runtime_dir(xrd, uid, gid)
    if xrd:
        #this may override the value we get from pam
        #with the value supplied by the user:
        protected_env["XDG_RUNTIME_DIR"] = xrd

    if script:
        # Write out a shell-script so that we can start our proxy in a clean
        # environment:
        write_runner_shell_scripts(script)

    import datetime
    extra_expand = {
        "TIMESTAMP": datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    }
    log_to_file = opts.daemon or os.environ.get("XPRA_LOG_TO_FILE", "") == "1"
    if start_vfb or log_to_file:
        #we will probably need a log dir
        #either for the vfb, or for our own log file
        log_dir = opts.log_dir or ""
        if not log_dir or log_dir.lower() == "auto":
            log_dir = find_log_dir(username, uid=uid, gid=gid)
            if not log_dir:
                raise InitException(
                    "cannot find or create a logging directory")
        #expose the log-dir as "XPRA_LOG_DIR",
        #this is used by Xdummy for the Xorg log file
        if "XPRA_LOG_DIR" not in os.environ:
            os.environ["XPRA_LOG_DIR"] = log_dir

    if log_to_file:
        from xpra.server.server_util import select_log_file, open_log_file, redirect_std_to_log
        log_filename0 = osexpand(
            select_log_file(log_dir, opts.log_file, display_name), username,
            uid, gid, extra_expand)
        if os.path.exists(log_filename0) and not display_name.startswith("S"):
            #don't overwrite the log file just yet,
            #as we may still fail to start
            log_filename0 += ".new"
        logfd = open_log_file(log_filename0)
        if POSIX and ROOT and (uid > 0 or gid > 0):
            try:
                os.fchown(logfd, uid, gid)
            except OSError as e:
                noerr(stderr.write,
                      "failed to chown the log file '%s'\n" % log_filename0)
                noerr(stderr.flush)
        stdout, stderr = redirect_std_to_log(logfd, *protected_fds)
        noerr(
            stderr.write, "Entering daemon mode; " +
            "any further errors will be reported to:\n" +
            ("  %s\n" % log_filename0))
        noerr(stderr.flush)
        os.environ["XPRA_SERVER_LOG"] = log_filename0
    else:
        #server log does not exist:
        os.environ.pop("XPRA_SERVER_LOG", None)

    #warn early about this:
    if (starting or starting_desktop
        ) and desktop_display and opts.notifications and not opts.dbus_launch:
        print_DE_warnings()

    if start_vfb and opts.xvfb.find("Xephyr") >= 0 and opts.sync_xvfb <= 0:
        warn("Warning: using Xephyr as vfb")
        warn(" you should also enable the sync-xvfb option")
        warn(" to keep the Xephyr window updated")

    progress(10, "creating sockets")
    from xpra.net.socket_util import get_network_logger, setup_local_sockets, create_sockets
    sockets = create_sockets(opts, error_cb)

    sanitize_env()
    os.environ.update(source_env(opts.source))
    if POSIX:
        if xrd:
            os.environ["XDG_RUNTIME_DIR"] = xrd
        if not OSX:
            os.environ["XDG_SESSION_TYPE"] = "x11"
        if not starting_desktop:
            os.environ["XDG_CURRENT_DESKTOP"] = opts.wm_name
        configure_imsettings_env(opts.input_method)
    if display_name[0] != 'S':
        os.environ["DISPLAY"] = display_name
        if POSIX:
            os.environ["CKCON_X11_DISPLAY"] = display_name
    elif not start_vfb or opts.xvfb.find("Xephyr") < 0:
        os.environ.pop("DISPLAY", None)
    os.environ.update(protected_env)
    from xpra.log import Logger
    log = Logger("server")
    log("env=%s", os.environ)

    UINPUT_UUID_LEN = 12
    UINPUT_UUID_MIN_LEN = 12
    UINPUT_UUID_MAX_LEN = 32
    # Start the Xvfb server first to get the display_name if needed
    odisplay_name = display_name
    xvfb = None
    xvfb_pid = None
    uinput_uuid = None
    if start_vfb and use_display is None:
        #use-display='auto' so we have to figure out
        #if we have to start the vfb or not:
        if not display_name:
            use_display = False
        else:
            progress(20, "connecting to the display")
            start_vfb = verify_display(
                None, display_name, log_errors=False, timeout=1) != 0
    if start_vfb:
        progress(20, "starting a virtual display")
        assert not proxying and xauth_data
        pixel_depth = validate_pixel_depth(opts.pixel_depth, starting_desktop)
        from xpra.x11.vfb_util import start_Xvfb, check_xvfb_process, parse_resolution
        from xpra.server.server_util import has_uinput
        uinput_uuid = None
        if has_uinput() and opts.input_devices.lower() in (
                "uinput", "auto") and not shadowing:
            from xpra.os_util import get_rand_chars
            uinput_uuid = get_rand_chars(UINPUT_UUID_LEN)
        vfb_geom = ""
        try:
            vfb_geom = parse_resolution(opts.resize_display)
        except Exception:
            pass
        xvfb, display_name, cleanups = start_Xvfb(opts.xvfb, vfb_geom,
                                                  pixel_depth, display_name,
                                                  cwd, uid, gid, username,
                                                  xauth_data, uinput_uuid)
        for f in cleanups:
            add_cleanup(f)
        xvfb_pid = xvfb.pid
        #always update as we may now have the "real" display name:
        os.environ["DISPLAY"] = display_name
        os.environ["CKCON_X11_DISPLAY"] = display_name
        os.environ.update(protected_env)
        if display_name != odisplay_name and pam:
            pam.set_items({"XDISPLAY": display_name})

        def check_xvfb(timeout=0):
            return check_xvfb_process(xvfb, timeout=timeout, command=opts.xvfb)
    else:
        if POSIX and clobber:
            #if we're meant to be using a private XAUTHORITY file,
            #make sure to point to it:
            from xpra.x11.vfb_util import get_xauthority_path
            xauthority = get_xauthority_path(display_name, username, uid, gid)
            if os.path.exists(xauthority):
                log("found XAUTHORITY=%s", xauthority)
                os.environ["XAUTHORITY"] = xauthority

        def check_xvfb(timeout=0):
            return True

    if POSIX and not OSX and displayfd > 0:
        from xpra.platform.displayfd import write_displayfd
        try:
            display_no = display_name[1:]
            #ensure it is a string containing the number:
            display_no = str(int(display_no))
            log("writing display_no='%s' to displayfd=%i", display_no,
                displayfd)
            assert write_displayfd(displayfd, display_no), "timeout"
        except Exception as e:
            log.error("write_displayfd failed", exc_info=True)
            log.error("Error: failed to write '%s' to fd=%s", display_name,
                      displayfd)
            log.error(" %s", str(e) or type(e))
            del e

    if not check_xvfb(1):
        noerr(stderr.write, "vfb failed to start, exiting\n")
        return EXIT_VFB_ERROR

    if WIN32 and os.environ.get("XPRA_LOG_FILENAME"):
        os.environ["XPRA_SERVER_LOG"] = os.environ["XPRA_LOG_FILENAME"]
    if opts.daemon:
        log_filename1 = osexpand(
            select_log_file(log_dir, opts.log_file, display_name), username,
            uid, gid, extra_expand)
        if log_filename0 != log_filename1:
            # we now have the correct log filename, so use it:
            try:
                os.rename(log_filename0, log_filename1)
            except (OSError, IOError):
                pass
            else:
                os.environ["XPRA_SERVER_LOG"] = log_filename1
            if odisplay_name != display_name:
                #this may be used by scripts, let's try not to change it:
                noerr(stderr.write, "Actual display used: %s\n" % display_name)
            noerr(stderr.write,
                  "Actual log file name is now: %s\n" % log_filename1)
            noerr(stderr.flush)
        noerr(stdout.close)
        noerr(stderr.close)
    #we should not be using stdout or stderr from this point:
    del stdout
    del stderr

    if not check_xvfb():
        noerr(stderr.write, "vfb failed to start, exiting\n")
        return EXIT_VFB_ERROR

    #create devices for vfb if needed:
    devices = {}
    if not start_vfb and not proxying and not shadowing and envbool(
            "XPRA_UINPUT", True):
        #try to find the existing uinput uuid:
        #use a subprocess to avoid polluting our current process
        #with X11 connections before we get a chance to change uid
        prop = "_XPRA_UINPUT_ID"
        cmd = ["xprop", "-display", display_name, "-root", prop]
        log("looking for '%s' on display '%s' with XAUTHORITY='%s'", prop,
            display_name, os.environ.get("XAUTHORITY"))
        try:
            code, out, err = get_status_output(cmd)
        except Exception as e:
            log("failed to get existing uinput id: %s", e)
            del e
        else:
            log("Popen(%s)=%s", cmd, (code, out, err))
            if code == 0 and out.find("=") > 0:
                uinput_uuid = out.split("=", 1)[1]
                log("raw uinput uuid=%s", uinput_uuid)
                uinput_uuid = strtobytes(uinput_uuid.strip('\n\r"\\ '))
                if uinput_uuid:
                    if len(uinput_uuid) > UINPUT_UUID_MAX_LEN or len(
                            uinput_uuid) < UINPUT_UUID_MIN_LEN:
                        log.warn("Warning: ignoring invalid uinput id:")
                        log.warn(" '%s'", uinput_uuid)
                        uinput_uuid = None
                    else:
                        log.info("retrieved existing uinput id: %s",
                                 bytestostr(uinput_uuid))
    if uinput_uuid:
        devices = create_input_devices(uinput_uuid, uid)

    if ROOT and (uid != 0 or gid != 0):
        log("root: switching to uid=%i, gid=%i", uid, gid)
        setuidgid(uid, gid)
        os.environ.update({
            "HOME": home,
            "USER": username,
            "LOGNAME": username,
        })
        shell = get_shell_for_uid(uid)
        if shell:
            os.environ["SHELL"] = shell
        #now we've changed uid, it is safe to honour all the env updates:
        configure_env(opts.env)
        os.environ.update(protected_env)

    if opts.chdir:
        log("chdir(%s)", opts.chdir)
        os.chdir(opts.chdir)

    dbus_pid, dbus_env = 0, {}
    if not shadowing and POSIX and not OSX and not clobber:
        no_gtk()
        assert starting or starting_desktop or proxying
        try:
            from xpra.server.dbus.dbus_start import start_dbus
        except ImportError as e:
            log("dbus components are not installed: %s", e)
        else:
            dbus_pid, dbus_env = start_dbus(opts.dbus_launch)
            if dbus_env:
                os.environ.update(dbus_env)

    if not proxying:
        if POSIX and not OSX:
            no_gtk()
            if starting or starting_desktop or shadowing:
                r = verify_display(xvfb, display_name, shadowing)
                if r:
                    return r
        #on win32, this ensures that we get the correct screen size to shadow:
        from xpra.platform.gui import init as gui_init
        log("gui_init()")
        gui_init()

    progress(50, "creating local sockets")
    #setup unix domain socket:
    netlog = get_network_logger()
    local_sockets = setup_local_sockets(opts.bind, opts.socket_dir,
                                        opts.socket_dirs, display_name,
                                        clobber, opts.mmap_group,
                                        opts.socket_permissions, username, uid,
                                        gid)
    netlog("setting up local sockets: %s", local_sockets)
    sockets.update(local_sockets)
    if POSIX and (starting or upgrading or starting_desktop
                  or upgrading_desktop):
        #all unix domain sockets:
        ud_paths = [
            sockpath for stype, _, sockpath, _ in local_sockets
            if stype == "unix-domain"
        ]
        if ud_paths:
            #choose one so our xdg-open override script can use to talk back to us:
            if opts.forward_xdg_open:
                for x in ("/usr/libexec/xpra", "/usr/lib/xpra"):
                    xdg_override = os.path.join(x, "xdg-open")
                    if os.path.exists(xdg_override):
                        os.environ["PATH"] = x + os.pathsep + os.environ.get(
                            "PATH", "")
                        os.environ["XPRA_SERVER_SOCKET"] = ud_paths[0]
                        break
        else:
            log.warn("Warning: no local server sockets,")
            if opts.forward_xdg_open:
                log.warn(" forward-xdg-open cannot be enabled")
            log.warn(" non-embedded ssh connections will not be available")

    set_server_features(opts)

    if not proxying and POSIX and not OSX:
        if not check_xvfb():
            return 1
        from xpra.x11.gtk_x11.gdk_display_source import init_gdk_display_source
        if os.environ.get("NO_AT_BRIDGE") is None:
            os.environ["NO_AT_BRIDGE"] = "1"
        init_gdk_display_source()
        #(now we can access the X11 server)
        if uinput_uuid:
            save_uinput_id(uinput_uuid)

    progress(60, "initializing server")
    if shadowing:
        app = make_shadow_server()
    elif proxying:
        app = make_proxy_server()
    else:
        if starting or upgrading:
            app = make_server(clobber)
        else:
            assert starting_desktop or upgrading_desktop
            app = make_desktop_server(clobber)
        app.init_virtual_devices(devices)

    try:
        app.exec_cwd = opts.chdir or cwd
        app.display_name = display_name
        app.init(opts)
        progress(70, "initializing sockets")
        app.init_sockets(sockets)
        app.init_dbus(dbus_pid, dbus_env)
        if not shadowing and not proxying:
            app.init_display_pid(xvfb_pid)
        app.original_desktop_display = desktop_display
        del opts
        if not app.server_ready():
            return 1
        progress(80, "finalizing")
        app.server_init()
        app.setup()
        app.init_when_ready(_when_ready)
    except InitException as e:
        log.error("xpra server initialization error:")
        log.error(" %s", e)
        app.cleanup()
        return 1
    except Exception as e:
        log.error("Error: cannot start the %s server",
                  app.session_type,
                  exc_info=True)
        log.error(str(e))
        log.info("")
        if upgrading or upgrading_desktop:
            #something abnormal occurred,
            #don't kill the vfb on exit:
            from xpra.server import EXITING_CODE
            app._upgrading = EXITING_CODE
        app.cleanup()
        return 1

    try:
        progress(100, "running")
        log("running %s", app.run)
        r = app.run()
        log("%s()=%s", app.run, r)
    except KeyboardInterrupt:
        log.info("stopping on KeyboardInterrupt")
        app.cleanup()
        return EXIT_OK
    except Exception:
        log.error("server error", exc_info=True)
        app.cleanup()
        return -128
    else:
        if r > 0:
            r = 0
    return r
Example #60
0
import asyncio
import atexit
import math
import os
import signal

PORT = 8888

server = os.fork()
if server == 0:
    loop = asyncio.get_event_loop()
    coro = asyncio.start_server(lambda *_: None, port=PORT)
    loop.run_until_complete(coro)
    loop.run_forever()
else:
    atexit.register(os.kill, server, signal.SIGTERM)


async def write_joined_bytearray(writer, chunks):
    body = bytearray(chunks[0])
    for c in chunks[1:]:
        body += c
    writer.write(body)


async def write_joined_list(writer, chunks):
    body = b"".join(chunks)
    writer.write(body)


async def write_separately(writer, chunks):