def close_process(self, process): if process.poll() is None: log.debug('kill process ' + str(process.pid)) if platform == 'win32': sub.call(['taskkill', '/F', '/T', '/PID', str(process.pid)]) else: process.kill()
def modify_loginserver_blacklist(add_or_remove, player): if add_or_remove not in ('add', 'remove'): raise RuntimeError('Invalid argument provided') ipstring = '%d.%d.%d.%d' % player.ip sp.call('..\\scripts\\modifyfirewall.py blacklist %s %s' % (add_or_remove, ipstring), shell=True)
def player_worker(args): while True: # Fetch the most recent item in the queue. # Blocks the current greenlet until either # it gets a new item or timeout lapses try: # XXX: Raises queue.Empty exception after # timeout (thus the try block) pl_item = q.get(timeout=10) #print "QUEUE ITEM PICKED: %s" % pl_item # XXX: Not your usual subprocess. We're using # the gevent_subprocess module. Blocks only # this greenlet until the process (playlist # item) ends. command = ["vlc", pl_item, "--play-and-exit", "--quiet"] print command if args.fullscreen: command.append("--fullscreen") print "cool" subprocess.call(command) print "wat" except queue.Empty: # Resume loop continue
def cmd_connect(node, cmd_name, node_info): """Connect to node.""" # FUTURE: call function to check for custom connection-info conn_info = "Defaults" conf_mess = ("\r{0}{1} TO{2} {3} using {5}{4}{2} - Confirm [y/N]: ".format( C_STAT[cmd_name.upper()], cmd_name.upper(), C_NORM, node_info, conn_info, C_HEAD2)) cmd_result = None if input_yn(conf_mess): exec_mess = ("\r{0}CONNECTING TO{1} {2} using {4}{3}{1}: ".format( C_STAT[cmd_name.upper()], C_NORM, node_info, conn_info, C_HEAD2)) ui_erase_ln() ui_print(exec_mess) (ssh_user, ssh_key) = ssh_get_info(node) if ssh_user: ssh_cmd = "ssh {0}{1}@{2}".format(ssh_key, ssh_user, node.public_ips) else: ssh_cmd = "ssh {0}{1}".format(ssh_key, node.public_ips) print("\n") ui_print("\033[?25h") # cursor on subprocess.call(ssh_cmd, shell=True) ui_print("\033[?25l") # cursor off print() cmd_result = True else: ui_print_suffix("Command Aborted") sleep(0.75) return cmd_result
def cleanup(self): print "cleaning up ServoActor..." try: self.run_heartbeat = False call(["killall", self.driver_name]) print "killed successfully..." except Exception, e: print "error killing driver program..."
def cleanup(self): print "cleaning up ServoActor..." try: self.run_heartbeat = False call(['killall', self.driver_name]) print "killed successfully..." except Exception, e: print "error killing driver program..."
def close_process(self, p): if p.process and p.process.poll() is None: log.debug('kill process ' + str(p.process.pid)) if platform == 'win32': sub.call(['taskkill', '/F', '/T', '/PID', str(p.process.pid)]) else: p.process.kill() p.status = 1
def remove_rules_by_name(self, name): args = [ 'c:\\windows\\system32\\Netsh.exe', 'advfirewall', 'firewall', 'delete', 'rule', 'name=%s' % name ] # Don't check for failure here, because it is expected to # fail if there are no left-over rules from a previous run. sp.call(args, stdout=sp.DEVNULL)
def removeallrules(): args = [ 'c:\\windows\\system32\\Netsh.exe', 'advfirewall', 'firewall', 'delete', 'rule', 'name="TAserverfirewall"' ] # Don't check for failure here, because it is expected to # fail if there are no left-over rules from a previous run. print('Removing any previous TAserverfirewall rules') sp.call(args, stdout=sp.DEVNULL)
def delete_chain(target): for table in ('filter', 'nat'): rules = dump_table(table) for chain, chain_rules in rules.items(): for i, rule in enumerate(reversed(chain_rules)): index = len(chain_rules) - i if target == rule['target']: delete_rule(table, chain, str(index)) subprocess.call(shlex.split('iptables -t %s --flush %s' % (table, target))) subprocess.call(shlex.split('iptables -t %s -X %s' % (table, target)))
def cleanup(self): """Cleanup the sandbox. To be called at the end of the execution, regardless of whether the sandbox should be deleted or not. """ # Tell isolate to cleanup the sandbox. subprocess.call([self.box_exec] + (["--cg"] if self.cgroup else []) + ["--box-id=%d" % self.box_id] + ["--cleanup"])
def do_stop(self): if self.is_ok(): self.log.info('stop '+self.name) self.stop = True call(['taskkill', '/F', '/T', '/PID', str(self.process.pid)]) self.process = None msg = self.name + ' stopped' else: msg = self.name + ' has already stopped' return msg
def delete_chain(target): for table in ('filter', 'nat'): rules = dump_table(table) for chain, chain_rules in rules.items(): for i, rule in enumerate(reversed(chain_rules)): index = len(chain_rules) - i if target == rule['target']: delete_rule(table, chain, str(index)) subprocess.call( shlex.split('iptables -t %s --flush %s' % (table, target))) subprocess.call(shlex.split('iptables -t %s -X %s' % (table, target)))
def insert_rules(rules): created_chains = set() for signature, rule_args in reversed(rules): # insert the last one first table, chain, _ = rule_args if chain not in ['OUTPUT', 'INPUT', 'FORWARD', 'PREROUTING', 'POSTROUTING'] and chain not in created_chains: subprocess.call(shlex.split('iptables -t %s -N %s' % (table, chain))) created_chains.add(chain) if contains_rule(table, chain, signature): LOGGER.info('skip insert rule: -t %s -I %s %s' % rule_args) else: insert_rule(signature.get('optional'), *rule_args)
def do_stop(self): if self.is_ok(): self.log.info('stop '+self.name) self.stop = STOPPED self.stop_time = datetime.datetime.now() call(['taskkill', '/F', '/T', '/PID', str(self.process.pid)]) self.process = None msg = self.name + ' stopped' else: msg = self.name + ' has already stopped' return msg
def cleanup(self): """Cleanup the sandbox. """ logger.debug("Cleaning up sandbox in %s.", self.path) # Tell isolate to cleanup the sandbox. box_cmd = [self.box_exec] + (["--cg"] if self.cgroup else []) \ + ["--box-id=%d" % self.box_id] subprocess.call(box_cmd + ["--cleanup"]) self._has_cleanedup = True
def do_stop(self): self.send_success('stop ok') DEBUG('------------task stop------------') task_ids = [int(task_id) for task_id in self.task_ids.split(',') if int(task_id) in TASK] for task_id in task_ids: pid = TASK.pop(task_id) try: INFO('%s stop a new task,task_id:%s,pid:%s' %(self.MODULE_NAME, task_id, pid)) call(['taskkill', '/F', '/T', '/PID', str(pid)]) except Exception: ERROR('%s taskkill a task failed,task_id:%s,pid:%s' %(self.MODULE_NAME, task_id, pid))
def delete(self): """Delete the directory where the sandbox operated. """ logger.debug("Deleting sandbox in %s" % self.path) # Tell isolate to cleanup the sandbox. box_cmd = [self.box_exec] + (["--cg"] if self.cgroup else []) + ["--box-id=%d" % self.box_id] subprocess.call(box_cmd + ["--cleanup"]) # Delete the working directory. rmtree(self.outer_temp_dir)
def cleanup(self): """Cleanup the sandbox. To be called at the end of the execution, regardless of whether the sandbox should be deleted or not. """ # Tell isolate to cleanup the sandbox. subprocess.call( [self.box_exec] + (["--cg"] if self.cgroup else []) + ["--box-id=%d" % self.box_id] + ["--cleanup"])
def delete(self): """Delete the directory where the sandbox operated. """ logger.debug("Deleting sandbox in %s.", self.path) # Tell isolate to cleanup the sandbox. box_cmd = [self.box_exec] + (["--cg"] if self.cgroup else []) \ + ["--box-id=%d" % self.box_id] subprocess.call(box_cmd + ["--cleanup"]) # Delete the working directory. rmtree(self.outer_temp_dir)
def cleanup(self): """Cleanup the sandbox. To be called at the end of the execution, regardless of whether the sandbox should be deleted or not. """ # Tell isolate to cleanup the sandbox. subprocess.call( [self.box_exec] + (["--cg"] if self.cgroup else []) + ["--box-id=%d" % self.box_id] + ["--cleanup"], # Use subprocess.DEVNULL when dropping Python 2. stdout=io.open(os.devnull, "r+b"), stderr=subprocess.STDOUT)
def insert_rules(rules): created_chains = set() for signature, rule_args in reversed(rules): # insert the last one first table, chain, _ = rule_args if chain not in [ 'OUTPUT', 'INPUT', 'FORWARD', 'PREROUTING', 'POSTROUTING' ] and chain not in created_chains: subprocess.call( shlex.split('iptables -t %s -N %s' % (table, chain))) created_chains.add(chain) if contains_rule(table, chain, signature): LOGGER.info('skip insert rule: -t %s -I %s %s' % rule_args) else: insert_rule(signature.get('optional'), *rule_args)
def prepare_tasks(self, tender_id, start_date): cmd = deepcopy(sys.argv) cmd[0] = os.path.abspath(cmd[0]) cmd[1] = 'run' home_dir = os.path.expanduser('~') with open(os.path.join(home_dir, SYSTEMD_RELATIVE_PATH.format(self.auction_doc_id, 'service')), 'w') as service_file: template = get_template('systemd.service') logger.info( "Write configuration to {}".format(service_file.name), extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS_WRITE_SERVICE_CONFIG}) service_file.write( template.render(cmd=' '.join(cmd), description='Auction ' + tender_id, id='auction_' + self.auction_doc_id + '.service'), ) start_time = (start_date - timedelta(minutes=15)).astimezone(tzlocal()) extra_start_time = datetime.now(tzlocal()) + timedelta(seconds=15) if extra_start_time > start_time: logger.warning( 'Planned auction\'s starts date in the past', extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS_START_DATE_IN_PAST} ) start_time = extra_start_time if start_time > start_date: logger.error( 'We not have a time to start auction', extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS_NO_TIME} ) sys.exit() with open(os.path.join(home_dir, SYSTEMD_RELATIVE_PATH.format(self.auction_doc_id, 'timer')), 'w') as timer_file: template = get_template('systemd.timer') logger.info( "Write configuration to {}".format(timer_file.name), extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS_WRITE_TIMER_CONFIG} ) timer_file.write(template.render( timestamp=start_time.strftime("%Y-%m-%d %H:%M:%S"), description='Auction ' + tender_id) ) if self.activate: logger.info( "Reload Systemd", extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS_RELOAD} ) response = call(['/usr/bin/systemctl', '--user', 'daemon-reload']) logger.info( "Systemctl return code: {}".format(response), extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS_SYSTEMCTL_RESPONSE} ) self.activate_systemd_unit()
def do_stop(self): self.send_success('stop ok') DEBUG('------------task stop------------') task_ids = [ int(task_id) for task_id in self.task_ids.split(',') if int(task_id) in TASK ] for task_id in task_ids: pid = TASK.pop(task_id) try: INFO('%s stop a new task,task_id:%s,pid:%s' % (self.MODULE_NAME, task_id, pid)) call(['taskkill', '/F', '/T', '/PID', str(pid)]) except Exception: ERROR('%s taskkill a task failed,task_id:%s,pid:%s' % (self.MODULE_NAME, task_id, pid))
def cleanup(): today_datestamp = datetime.now() today_datestamp = today_datestamp.replace( today_datestamp.year, today_datestamp.month, today_datestamp.day, 0, 0, 0 ) systemd_files_dir = os.path.join(os.path.expanduser('~'), SYSTEMD_DIRECORY) for filename in os.listdir(systemd_files_dir): if filename.startswith('auction_') and filename.endswith('.timer'): tender_id = filename[8:-6] full_filename = os.path.join(systemd_files_dir, filename) with open(full_filename) as timer_file: r = TIMER_STAMP.search(timer_file.read()) if r: datetime_args = [int(term) for term in r.groups()] if datetime(*datetime_args) < today_datestamp: code = call(['/usr/bin/systemctl', '--user', 'stop', filename]) logger.info( "systemctl stop {} - return code: {}".format(filename, code), extra={'JOURNAL_TENDER_ID': tender_id, 'MESSAGE_ID': AUCTION_WORKER_SYSTEMD_UNITS_SYSTEMCTL_STOP_AUCTION_TIMER} ) code = call(['/usr/bin/systemctl', '--user', 'disable', filename, '--no-reload']) logger.info( "systemctl disable {} --no-reload - return code: {}".format(filename, code), extra={'JOURNAL_TENDER_ID': tender_id, 'MESSAGE_ID': AUCTION_WORKER_SYSTEMD_UNITS_SYSTEMCTL_DISABLE_AUCTION_TIMER} ) logger.info( 'Remove systemd file: {}'.format(full_filename), extra={'JOURNAL_TENDER_ID': tender_id, 'MESSAGE_ID': AUCTION_WORKER_CLEANUP_REMOVE_SYSTEMD_AUCTION_TIMER} ) os.remove(full_filename) full_filename = full_filename[:-5] + 'service' logger.info( 'Remove systemd file: {}'.format(full_filename), extra={'JOURNAL_TENDER_ID': tender_id, 'MESSAGE_ID': AUCTION_WORKER_CLEANUP_REMOVE_SYSTEMD_AUCTION_SERVICE} ) os.remove(full_filename) code = call(['/usr/bin/systemctl', '--user', 'daemon-reload']) logger.info( "systemctl --user daemon-reload - return code: {}".format(code), extra={"MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS_DAEMON_RELOAD} )
def call(args): if USE_SU: proc = subprocess.Popen('su', stdin=subprocess.PIPE) proc.stdin.write(' '.join(args)) proc.stdin.write('\nexit\n') proc.communicate() return proc.poll() else: return subprocess.call(args)
def hook(self, name, *args): """ Executes a given hook. All additional arguments are passed to the hook as script arguments. :param name: Hook name (like session.pre-up) """ script = self.hooks.get(name, None) if not script: return # Execute the registered hook logger.debug("Executing hook '%s' via script '%s %s'." % (name, script, str([str(x) for x in args]))) try: gevent_subprocess.call([script] + [str(x) for x in args]) except: logger.warning("Failed to execute hook '%s'!" % script) logger.warning(traceback.format_exc())
def initialize_isolate(self): """Initialize isolate's box.""" init_cmd = ([self.box_exec] + (["--cg"] if self.cgroup else []) + ["--box-id=%d" % self.box_id, "--init"]) ret = subprocess.call(init_cmd) if ret != 0: raise SandboxInterfaceException( "Failed to initialize sandbox with command: %s " "(error %d)" % (pretty_print_cmdline(init_cmd), ret))
def start_driver(self): while True: try: try: call(['rm', '/var/run/pigpio.pid']) except Exception, e: pass self.cleanup() self.driver_name = 'swave2' swave_path = os.path.join(self.file_dir, self.driver_name) print "swave executable: ", swave_path self.swave = Popen([swave_path]) sleep(1) # change this line with "as soon as started" self.send_signal(self.HB) self.run_heartbeat = True gevent.spawn(self.send_heartbeat) break except:
def call(args): if USE_SU: proc = subprocess.Popen('su', stdin=subprocess.PIPE) proc.terminate = functools.partial(sudo_kill, proc.pid) proc.stdin.write('exec ') proc.stdin.write(' '.join(args)) proc.stdin.write('\n') proc.communicate() return proc.poll() else: return subprocess.call(args)
def start_driver(self): while True: try: try: call(["rm", "/var/run/pigpio.pid"]) except Exception, e: pass self.cleanup() self.driver_name = "swave2" swave_path = os.path.join(self.file_dir, self.driver_name) print "swave executable: ", swave_path self.swave = Popen([swave_path]) sleep(1) # change this line with "as soon as started" self.send_signal(self.HB) self.run_heartbeat = True gevent.spawn(self.send_heartbeat) break except:
def call(args): if USE_SU: proc = subprocess.Popen("su", stdin=subprocess.PIPE) proc.terminate = functools.partial(sudo_kill, proc.pid) proc.stdin.write("exec ") proc.stdin.write(" ".join(args)) proc.stdin.write("\n") proc.communicate() return proc.poll() else: return subprocess.call(args)
def startfile(path): try: return os.startfile(path) except AttributeError: if sys.platform == "darwin": tool = "open" else: tool = "xdg-open" try: return subprocess.call([tool, path]) except: return False
def activate_systemd_unit(self): logger.info( "Start timer", extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS_START_TIMER} ) timer_file = 'auction_' + '.'.join([self.auction_doc_id, 'timer']) response = call(['/usr/bin/systemctl', '--user', 'reload-or-restart', timer_file]) logger.info( "Systemctl 'reload-or-restart' return code: {}".format(response), extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS_SYSTEMCTL_RELOAD_OR_RESTART} ) response = call(['/usr/bin/systemctl', '--user', 'enable', timer_file]) logger.info( "Systemctl 'enable' return code: {}".format(response), extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS_SYSTEMCTL_ENABLE} )
def run_hard_kill(self): self.killed_nodes = [] failed_nodes = [] kill_results = [] kill_failed_results = [] shutdown_count = math.floor(len(self.running_nodes)*self.shutdown_fraction) while len(self.killed_nodes) + len(failed_nodes) < shutdown_count: address = random.choice(self.running_nodes) node_name = self.get_node_name(address) ssh_login = '******'.format( slice=self.slices[self.extra['slice']], node=node_name.split(':')[0]) ssh_command = "sudo killall -u {user} -9" results = set() successful_result_values = [0, 1, 255] for user in ['$USER', 'root']: command = ssh_command.format(user=user) l.info('Running command (' + command + ') on ' + ssh_login) try: result = subprocess.call(['ssh', '-o', 'UserKnownHostsFile=/dev/null', '-o', 'StrictHostKeyChecking=no', ssh_login, command]) except: l.exception('Command failed') result = -1 if result not in successful_result_values: results.add(result) if len(results) == 0: self.killed_nodes.append(address) kill_results.append(TextTestResult(node_name, 'All processes terminated.')) else: failed_nodes.append(address) kill_failed_results.append(TextTestResult(node_name, 'Processes not successfully terminated, error code(s) ' + (', '.join(map(str, results))) + '.')) self.running_nodes.remove(address) if len(kill_failed_results) > 0: self.results.extend(kill_failed_results) else: self.results.append(TextTestResult( 'Node kill', 'All nodes properly killed.' )) if len(kill_results): self.results.append(ClusteredTestResult( map(self.get_node_name, self.killed_nodes), kill_results )) else: self.results.append(TextTestResult( 'Node kill', 'No nodes were killed.' ))
def _execute(self, command): """Execute the given command in the sandbox. command (list): executable filename and arguments of the command. return (bool): True if the sandbox didn't report errors (caused by the sandbox itself), False otherwise """ self.exec_num += 1 self.log = None args = [self.box_exec] + self.build_box_options() + ["--"] + command logger.debug("Executing program in sandbox with command: %s" % " ".join(args)) with open(self.relative_path(self.cmd_file), 'a') as commands: commands.write("%s\n" % (" ".join(args))) return self.translate_box_exitcode(subprocess.call(args))
def event_producer(fd, server): while True: restart = False events = inotify.get_events(fd, None) for event in events: ignore = False if call(['git', 'check-ignore', event.name]) == 0 or fnmatch.fnmatch(event.name, '.git/*'): ignore = True break if not ignore: print "File changed:", event.name, event.get_mask_description() restart = True if restart: print 'Restarting %s ...\n' % sys.argv server.stop(timeout=1) server.close() print 'Waiting...' time.sleep(3) print 'Restart.' os.execvp(sys.argv[0], sys.argv)
def call1(cmd): subprocess.call([cmd])
def __init__(self, multithreaded, file_cacher, name=None, temp_dir=None): """Initialization. For arguments documentation, see SandboxBase.__init__. """ SandboxBase.__init__(self, multithreaded, file_cacher, name, temp_dir) # Isolate only accepts ids between 0 and 99. We assign the # range [(shard+1)*10, (shard+2)*10) to each Worker and keep # the range [0, 10) for other uses (command-line scripts like # cmsMake or direct console users of isolate). Inside each # range ids are assigned sequentially, with a wrap-around. # FIXME This is the only use of FileCacher.service, and it's an # improper use! Avoid it! if file_cacher is not None and file_cacher.service is not None: box_id = ((file_cacher.service.shard + 1) * 10 + (IsolateSandbox.next_id % 10)) % 100 else: box_id = IsolateSandbox.next_id % 10 IsolateSandbox.next_id += 1 # We create a directory "tmp" inside the outer temporary directory, # because the sandbox will bind-mount the inner one. The sandbox also # runs code as a different user, and so we need to ensure that they can # read and write to the directory. But we don't want everybody on the # system to, which is why the outer directory exists with no read # permissions. self.inner_temp_dir = "/tmp" self.outer_temp_dir = tempfile.mkdtemp( dir=self.temp_dir, prefix="cms-%s-" % (self.name)) # Don't use os.path.join here, because the absoluteness of /tmp will # bite you. self.path = self.outer_temp_dir + self.inner_temp_dir os.mkdir(self.path) self.allow_writing_all() self.exec_name = 'isolate' self.box_exec = self.detect_box_executable() self.info_basename = "run.log" # Used for -M self.log = None self.exec_num = -1 logger.debug("Sandbox in `%s' created, using box `%s'.", self.path, self.box_exec) # Default parameters for isolate self.box_id = box_id # -b self.cgroup = config.use_cgroups # --cg self.chdir = self.inner_temp_dir # -c self.dirs = [] # -d self.dirs += [(self.inner_temp_dir, self.path, "rw")] self.preserve_env = False # -e self.inherit_env = [] # -E self.set_env = {} # -E self.fsize = None # -f self.stdin_file = None # -i self.stack_space = None # -k self.address_space = None # -m self.stdout_file = None # -o self.stderr_file = None # -r self.timeout = None # -t self.verbosity = 0 # -v self.wallclock_timeout = None # -w self.extra_timeout = None # -x # Set common environment variables. # Specifically needed by Python, that searches the home for # packages. self.set_env["HOME"] = "./" # Needed on Ubuntu by PHP (and more, ) that # have in /usr/bin only a symlink to one out of many # alternatives. if os.path.isdir("/etc/alternatives"): self.add_mapped_directories(["/etc/alternatives"]) # Tell isolate to get the sandbox ready. We do our best to # cleanup after ourselves, but we might have missed something # if the worker was interrupted in the middle of an execution. self.cleanup() init_cmd = [self.box_exec] + (["--cg"] if self.cgroup else []) \ + ["--box-id=%d" % self.box_id] + ["--init"] ret = subprocess.call(init_cmd) if ret != 0: raise SandboxInterfaceException( "Failed to initialize sandbox with command: %s " "(error %d)" % (pretty_print_cmdline(init_cmd), ret))
def makeInstance(self): if not self.exists(): scrpt = os.path.join(self.installdir, Gameinstace.OPSCRIPT) return 0 == call([scrpt, u'instance', self.datadir, self.rundir]) return True
def __init__(self, file_cacher=None, temp_dir=None): """Initialization. For arguments documentation, see SandboxBase.__init__. """ SandboxBase.__init__(self, file_cacher, temp_dir) # Get our shard number, to use as a unique identifier for the sandbox # on this machine. if file_cacher is not None and file_cacher.service is not None: # We add 1 to avoid conflicting with console users of the # sandbox who use the default box id of 0. box_id = file_cacher.service._my_coord.shard + 1 else: box_id = 0 # We create a directory "tmp" inside the outer temporary directory, # because the sandbox will bind-mount the inner one. The sandbox also # runs code as a different user, and so we need to ensure that they can # read and write to the directory. But we don't want everybody on the # system to, which is why the outer directory exists with no read # permissions. self.inner_temp_dir = "/tmp" if temp_dir is None: temp_dir = config.temp_dir self.outer_temp_dir = tempfile.mkdtemp(dir=temp_dir) # Don't use os.path.join here, because the absoluteness of /tmp will # bite you. self.path = self.outer_temp_dir + self.inner_temp_dir os.mkdir(self.path) os.chmod(self.path, 0777) self.exec_name = 'isolate' self.box_exec = self.detect_box_executable() self.info_basename = "run.log" # Used for -M self.cmd_file = "commands.log" self.log = None self.exec_num = -1 logger.debug("Sandbox in `%s' created, using box `%s'." % (self.path, self.box_exec)) # Default parameters for isolate self.box_id = box_id # -b self.cgroup = config.use_cgroups # --cg self.chdir = self.inner_temp_dir # -c self.dirs = [] # -d self.dirs += [(self.inner_temp_dir, self.path, "rw")] self.preserve_env = False # -e self.inherit_env = [] # -E self.set_env = {} # -E self.stdin_file = None # -i self.stack_space = None # -k self.address_space = None # -m self.stdout_file = None # -o self.max_processes = 1 # -p self.stderr_file = None # -r self.timeout = None # -t self.verbosity = 0 # -v self.wallclock_timeout = None # -w self.extra_timeout = None # -x # Tell isolate to get the sandbox ready. box_cmd = [self.box_exec] + (["--cg"] if self.cgroup else []) \ + ["-b", str(self.box_id)] ret = subprocess.call(box_cmd + ["--init"]) if ret != 0: raise SandboxInterfaceException( "Failed to initialize sandbox (error %d)" % ret)
def removeInstance(self): if self.exists(): self.stop() scrpt = os.path.join(self.installdir, Gameinstace.OPSCRIPT) return 0 == call([scrpt, u'remove', self.datadir, self.rundir]) return True
def __init__(self, file_cacher=None, temp_dir=None): """Initialization. For arguments documentation, see SandboxBase.__init__. """ SandboxBase.__init__(self, file_cacher) # Get our shard number, to use as a unique identifier for the # sandbox on this machine. FIXME This is the only use of # FileCacher.service, and it's an improper use! Avoid it! if file_cacher is not None and file_cacher.service is not None: # We add 1 to avoid conflicting with console users of the # sandbox who use the default box id of 0. box_id = file_cacher.service.shard + 1 else: box_id = 0 # We create a directory "tmp" inside the outer temporary directory, # because the sandbox will bind-mount the inner one. The sandbox also # runs code as a different user, and so we need to ensure that they can # read and write to the directory. But we don't want everybody on the # system to, which is why the outer directory exists with no read # permissions. self.inner_temp_dir = "/tmp" if temp_dir is None: temp_dir = config.temp_dir self.outer_temp_dir = tempfile.mkdtemp(dir=temp_dir) # Don't use os.path.join here, because the absoluteness of /tmp will # bite you. self.path = self.outer_temp_dir + self.inner_temp_dir os.mkdir(self.path) os.chmod(self.path, 0777) self.exec_name = 'isolate' self.box_exec = self.detect_box_executable() self.info_basename = "run.log" # Used for -M self.cmd_file = "commands.log" self.log = None self.exec_num = -1 logger.debug("Sandbox in `%s' created, using box `%s'." % (self.path, self.box_exec)) # Default parameters for isolate self.box_id = box_id # -b self.cgroup = config.use_cgroups # --cg self.chdir = self.inner_temp_dir # -c self.dirs = [] # -d self.dirs += [(self.inner_temp_dir, self.path, "rw")] self.preserve_env = False # -e self.inherit_env = [] # -E self.set_env = {} # -E self.stdin_file = None # -i self.stack_space = None # -k self.address_space = None # -m self.stdout_file = None # -o self.max_processes = 1 # -p self.stderr_file = None # -r self.timeout = None # -t self.verbosity = 0 # -v self.wallclock_timeout = None # -w self.extra_timeout = None # -x # Tell isolate to get the sandbox ready. box_cmd = [self.box_exec] + (["--cg"] if self.cgroup else []) \ + ["-b", str(self.box_id)] ret = subprocess.call(box_cmd + ["--init"]) if ret != 0: raise SandboxInterfaceException( "Failed to initialize sandbox (error %d)" % ret)
def _complete(self, pathname): if pathname.rstrip('.lck').endswith('.db.tar.gz'): return if os.path.islink(pathname): return if not subprocess.call((self._command_fuser, '-s', pathname), stdout=subprocess.PIPE, stderr=subprocess.PIPE): logging.info('Uploading ' + pathname) return partial = False args = (sys.executable, self._command_pkginfo, pathname) if self._verify: args += ('-v',) info_p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = info_p.communicate() if info_p.returncode: if info_p.returncode == 2: partial = True else: logging.info('Ignoring, ' + err.strip()) return info = ujson.loads(out) name = info[u'pkgname'] version = info[u'pkgver'] arch = info[u'arch'] packager = info.get(u'packager') uploader = pwd.getpwuid(os.stat(pathname).st_uid)[0] mtime = datetime.utcfromtimestamp(os.path.getmtime(pathname)) if self._auto_rename and not partial: dest_dir = os.path.join(self._repo_dir, arch) if not os.path.isdir(dest_dir): os.mkdir(dest_dir) dest_path = os.path.join(dest_dir, '%s-%s-%s.pkg.tar.%s' % ( name, version, arch, pathname.rsplit('.', 1)[-1])) if pathname != dest_path: self._ignored_move_events.add((pathname, dest_path)) os.rename(pathname, dest_path) pathname = dest_path with self._same_pkg_locks[(name, arch)], self._pool.cursor() as cur: owner = OwnerFinderInAll(cur)(packager, uploader) cur.execute( 'SELECT id, latest, enabled FROM packages ' 'WHERE name=%s AND arch=%s AND version=%s', (name, arch, version)) result = cur.fetchone() fields = ( 'description', 'url', 'pkg_group', 'license', 'packager', 'base_name', 'build_date', 'size', 'depends', 'uploader', 'owner', 'opt_depends', 'enabled', 'file_path', 'last_update') values = ( info.get(u'pkgdesc'), info.get(u'url'), info.get(u'group'), info.get(u'license'), packager, info.get(u'pkgbase', name), int(info.get(u'builddate', time.time())), info.get(u'size'), to_list(info.get(u'depend', [])), uploader, owner, to_list(info.get(u'optdepend', [])), not partial, pathname, mtime) if not result: logging.info('Adding new file %s(%s)', name, arch) cur.execute( 'INSERT INTO packages (name, arch, version, %s) ' 'VALUES (%%s, %%s, %%s, %s) RETURNING id' % ( ', '.join(fields), ', '.join(['%s'] * len(values))), (name, arch, version) + values) pid, = cur.fetchone() logging.debug('Inserted with id %s', pid) if not partial: self._checkLatest(cur, name, arch, pathname, pid, version) else: pid, latest, enabled = result logging.info('Updating file #%s %s arch:%s', pid, name, arch) if latest and partial: fields += ('latest',) values += (False,) cur.execute( 'UPDATE packages SET %s WHERE id=%%s' % ( ', '.join([x + '=%s' for x in fields]),), values + (pid,)) if latest and partial: self._removeLatest(cur, name, arch) if not enabled and not partial: self._checkLatest(cur, name, arch, pathname, pid, version)
def prepare_tasks(self, tenderID, startDate): cmd = deepcopy(sys.argv) cmd[0] = os.path.abspath(cmd[0]) cmd[1] = 'run' home_dir = os.path.expanduser('~') with open(os.path.join(home_dir, SYSTEMD_RELATIVE_PATH.format(self.auction_doc_id, 'service')), 'w') as service_file: template = get_template('systemd.service') logger.info( "Write configuration to {}".format(service_file.name), extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS}) service_file.write( template.render(cmd=' '.join(cmd), description='Auction ' + tenderID, id='auction_' + self.auction_doc_id + '.service'), ) start_time = (startDate - timedelta(minutes=15)).astimezone(tzlocal()) extra_start_time = datetime.now(tzlocal()) + timedelta(seconds=15) if extra_start_time > start_time: logger.warning( 'Planned auction\'s starts date in the past', extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS} ) start_time = extra_start_time if start_time > startDate: logger.error( 'We not have a time to start auction', extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS} ) sys.exit() with open(os.path.join(home_dir, SYSTEMD_RELATIVE_PATH.format(self.auction_doc_id, 'timer')), 'w') as timer_file: template = get_template('systemd.timer') logger.info( "Write configuration to {}".format(timer_file.name), extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS} ) timer_file.write(template.render( timestamp=start_time.strftime("%Y-%m-%d %H:%M:%S"), description='Auction ' + tenderID) ) logger.info( "Reload Systemd", extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS} ) response = call(['/usr/bin/systemctl', '--user', 'daemon-reload']) logger.info( "Systemctl return code: {}".format(response), extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS} ) logger.info( "Start timer", extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS} ) timer_file = 'auction_' + '.'.join([self.auction_doc_id, 'timer']) response = call(['/usr/bin/systemctl', '--user', 'reload-or-restart', timer_file]) logger.info( "Systemctl 'reload-or-restart' return code: {}".format(response), extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS} ) response = call(['/usr/bin/systemctl', '--user', 'enable', timer_file]) logger.info( "Systemctl 'enable' return code: {}".format(response), extra={"JOURNAL_REQUEST_ID": self.request_id, "MESSAGE_ID": AUCTION_WORKER_SYSTEMD_UNITS} )
def __init__(self, multithreaded, file_cacher, temp_dir=None): """Initialization. For arguments documentation, see SandboxBase.__init__. """ SandboxBase.__init__(self, multithreaded, file_cacher, temp_dir) # Isolate only accepts ids between 0 and 99. We assign the # range [(shard+1)*10, (shard+2)*10) to each Worker and keep # the range [0, 10) for other uses (command-line scripts like # cmsMake or direct console users of isolate). Inside each # range ids are assigned sequentially, with a wrap-around. # FIXME This is the only use of FileCacher.service, and it's an # improper use! Avoid it! if file_cacher is not None and file_cacher.service is not None: box_id = ((file_cacher.service.shard + 1) * 10 + (IsolateSandbox.next_id % 10)) % 100 else: box_id = IsolateSandbox.next_id % 10 IsolateSandbox.next_id += 1 # We create a directory "tmp" inside the outer temporary directory, # because the sandbox will bind-mount the inner one. The sandbox also # runs code as a different user, and so we need to ensure that they can # read and write to the directory. But we don't want everybody on the # system to, which is why the outer directory exists with no read # permissions. self.inner_temp_dir = "/tmp" self.outer_temp_dir = tempfile.mkdtemp(dir=self.temp_dir) # Don't use os.path.join here, because the absoluteness of /tmp will # bite you. self.path = self.outer_temp_dir + self.inner_temp_dir os.mkdir(self.path) self.allow_writing_all() self.exec_name = 'isolate' self.box_exec = self.detect_box_executable() self.info_basename = "run.log" # Used for -M self.log = None self.exec_num = -1 logger.debug("Sandbox in `%s' created, using box `%s'.", self.path, self.box_exec) # Default parameters for isolate self.box_id = box_id # -b self.cgroup = config.use_cgroups # --cg self.chdir = self.inner_temp_dir # -c self.dirs = [] # -d self.dirs += [(self.inner_temp_dir, self.path, "rw")] self.preserve_env = False # -e self.inherit_env = [] # -E self.set_env = {} # -E self.fsize = None # -f self.stdin_file = None # -i self.stack_space = None # -k self.address_space = None # -m self.stdout_file = None # -o self.stderr_file = None # -r self.timeout = None # -t self.verbosity = 0 # -v self.wallclock_timeout = None # -w self.extra_timeout = None # -x # Set common environment variables. # Specifically needed by Python, that searches the home for # packages. self.set_env["HOME"] = "./" # Needed on Ubuntu by PHP (and more, ) that # have in /usr/bin only a symlink to one out of many # alternatives. if os.path.isdir("/etc/alternatives"): self.add_mapped_directories(["/etc/alternatives"]) # Tell isolate to get the sandbox ready. box_cmd = [self.box_exec] + (["--cg"] if self.cgroup else []) \ + ["--box-id=%d" % self.box_id] + ["--init"] ret = subprocess.call(box_cmd) if ret != 0: raise SandboxInterfaceException( "Failed to initialize sandbox with command: %s " "(error %d)" % (pretty_print_cmdline(box_cmd), ret)) self._has_cleanedup = False
def _play_with_ffplay(seg): with NamedTemporaryFile("w+b", suffix=".wav") as f: seg.export(f.name, "wav") subprocess.call([PLAYER, "-nodisp", "-autoexit", "-hide_banner", f.name])