def main(setpoint,ip=None): aa=cliente_lib() if ip!=None: aa.ip=ip cliente=aa.cliente_inicio() c=gamepad() try: pipe,msg=c.gamepad_init() except: aa.Cliente_apaga(cliente) exit(1) q = Queue() #orden de palanca q1 =Queue() #velocidad para el robot #anadir info para que tenga datos de trabajo al iniciar q.put(1) q1.put(-1) #comienzo de procesos p = Process(target=palanca, args=(c,pipe,msg,q,)) p1 = Process(target=fuzzy_velocidad, args=(q1,setpoint,aa,cliente,)) p.start() p1.start() while 1: p1.run() #sleep(0.1) p.run() a=robot_ordenes(q,q1,aa,cliente) if a=="r2": break p1.terminate() p.terminate() aa.cliente_apaga(cliente)
def _execute(self, func_arn, func_details, event, context=None, version=None): lambda_cwd = func_details.cwd environment = self._prepare_environment(func_details) # execute the Lambda function in a forked sub-process, sync result via queue queue = Queue() lambda_function = func_details.function(version) def do_execute(): # now we're executing in the child process, safe to change CWD and ENV if lambda_cwd: os.chdir(lambda_cwd) if environment: os.environ.update(environment) result = lambda_function(event, context) queue.put(result) process = Process(target=do_execute) with CaptureOutput() as c: process.run() result = queue.get() # Make sure to keep the log line below, to ensure the log stream gets created log_output = 'START: Lambda %s started via "local" executor ...' % func_arn # TODO: Interweaving stdout/stderr currently not supported for stream in (c.stdout(), c.stderr()): if stream: log_output += ('\n' if log_output else '') + stream # store logs to CloudWatch _store_logs(func_details, log_output) return result
def run_lambda(func, event, context, func_arn, suppress_output=False): if suppress_output: stdout_ = sys.stdout stderr_ = sys.stderr stream = StringIO() sys.stdout = stream sys.stderr = stream lambda_cwd = lambda_arn_to_cwd.get(func_arn) result = None try: runtime = lambda_arn_to_runtime.get(func_arn) handler = lambda_arn_to_handler.get(func_arn) if use_docker(): if config.LAMBDA_REMOTE_DOCKER: cmd = ('CONTAINER_ID="$(docker create' ' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"' ' -e HOSTNAME="$HOSTNAME"' ' "lambci/lambda:%s" "%s"' ')";' 'docker cp "%s/." "$CONTAINER_ID:/var/task";' 'docker start -a "$CONTAINER_ID";') % (runtime, handler, lambda_cwd) else: cmd = ('docker run' ' -v "%s":/var/task' ' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"' ' -e HOSTNAME="$HOSTNAME"' ' "lambci/lambda:%s" "%s"') % (lambda_cwd, runtime, handler) print(cmd) result = run(cmd, env_vars={ 'AWS_LAMBDA_EVENT_BODY': json.dumps(event).replace("'", "\\'"), 'HOSTNAME': DOCKER_BRIDGE_IP, }) else: # execute the Lambda function in a forked sub-process, sync result via queue queue = Queue() def do_execute(): # now we're executing in the child process, safe to change CWD if lambda_cwd: os.chdir(lambda_cwd) result = func(event, context) queue.put(result) process = Process(target=do_execute) process.run() result = queue.get() except Exception as e: return error_response("Error executing Lambda function: %s %s" % (e, traceback.format_exc())) finally: if suppress_output: sys.stdout = stdout_ sys.stderr = stderr_ return result
def run(self): try: Process.run(self) self._cconn.send(None) except Exception as e: tb = traceback.format_exc() self._cconn.send((e, tb))
def _execute(self, func_arn, func_details, event, context=None, version=None): lambda_cwd = func_details.cwd environment = func_details.envvars.copy() # execute the Lambda function in a forked sub-process, sync result via queue queue = Queue() lambda_function = func_details.function(version) def do_execute(): # now we're executing in the child process, safe to change CWD and ENV if lambda_cwd: os.chdir(lambda_cwd) if environment: os.environ.update(environment) result = lambda_function(event, context) queue.put(result) process = Process(target=do_execute) with CaptureOutput() as c: process.run() result = queue.get() # TODO: Interweaving stdout/stderr currently not supported log_output = '' for stream in (c.stdout(), c.stderr()): if stream: log_output += ('\n' if log_output else '') + stream return result, log_output
def _init_from_buildout(self, buildout_build): self.ensure_one() if os.path.isdir(self._path()): shutil.rmtree(self._path(), ignore_errors=True) if not os.path.isdir(buildout_build._path()): self._log( 'buildout', 'Buildout directory %s does not exist' % buildout_build._path()) return MAGIC_PID_RUN_NEXT_JOB self._log('buildout', 'Copying buildout from %s' % buildout_build._path()) def do_copy(buildout_path, own_path): shutil.copytree(buildout_path, own_path, symlinks=True) # we don't care about the logs created during the build of the # buildout shutil.rmtree(os.path.join(own_path, 'logs'), ignore_errors=True) os.makedirs(os.path.join(own_path, 'logs')) process = Process( target=do_copy, args=(buildout_build._path(), self._path()), ) process.run() return process.pid
class PySvc(win32serviceutil.ServiceFramework): _svc_name_ = "CloudStack_WDS_Agent" _svc_display_name_ = "CloudStack WDS Agent" _svc_description_ = "WDS Agent for CloudStack" def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) # core logic of the service def SvcDoRun(self): self.process = Process(target=self.main) self.process.start() self.process.run() # called when we're being shut down def SvcStop(self): self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) self.process.terminate() self.ReportServiceStatus(win32service.SERVICE_STOPPED) def main(self): handler = RotatingFileHandler(app.instance_path + '\\agent.log', maxBytes=10000, backupCount=1) handler.setLevel(logging.INFO) app.logger.addHandler(handler) load_configuration() app.run(threaded=True, port=8250, host='0.0.0.0', ssl_context='adhoc')
def run(self): start_time = time.time() print '%s start sleep time : %s' % (self.name, start_time) time.sleep(self.sleep_time) end_time = time.time() print '%s end sleep time : %s' % (self.name, end_time) Process.run(self)
class Service(win32serviceutil.ServiceFramework): _svc_name_ = "Flask_KYC_WebApp_Service" _svc_display_name_ = "Flask_KYC_WebApp_Service" _svc_description_ = "Flask KYC WebApp Service" def __init__(self, *args): super().__init__(*args) def SvcStop(self): tempflag = False self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) self.process.terminate() self.ReportServiceStatus(win32service.SERVICE_STOPPED) def SvcDoRun(self): self.process = Process(target=self.main) self.process.start() self.process.run() def main(self): application.run(host='0.0.0.0') #, port=5000) '''while tempflag:
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False): lambda_cwd = func_details.cwd environment = func_details.envvars.copy() # execute the Lambda function in a forked sub-process, sync result via queue queue = Queue() lambda_function = func_details.function(version) def do_execute(): # now we're executing in the child process, safe to change CWD and ENV if lambda_cwd: os.chdir(lambda_cwd) if environment: os.environ.update(environment) result = lambda_function(event, context) queue.put(result) process = Process(target=do_execute) process.run() result = queue.get() # TODO capture log output during local execution? log_output = '' return result, log_output
def run(self) -> None: try: sys.stdout = open(os.devnull, "w") Process.run(self) self._cconn.send(None) except Exception as e: tb = traceback.format_exc() self._cconn.send((e, tb))
def run(self): try: Process.run(self) self._send_pipe.send(None) except Exception as exception: # pylint: disable=broad-except trace = traceback.format_exc() self._send_pipe.send((exception, trace)) raise exception
def main(): try: robot = Process(target=soccer).start() robot.run() except KeyboardInterrupt: robot.join(0.1) if robot.is_alive(): robot.terminate()
def eval_(*args, **kwargs): p = Process(target=_child, args=args, kwargs=kwargs) if debug: p.run() else: p.start() p.join() return q.get(timeout=5)
class Report(models.Model): target = models.ForeignKey("Target", on_delete=models.CASCADE, db_index=True, verbose_name="Target") name = models.CharField(max_length=100, verbose_name="Name", null=True) started_at = models.DateTimeField(auto_now_add=True, verbose_name="Addition time") finish_date = models.DateTimeField(auto_now=True, verbose_name="Finish time", null=True) notes = models.CharField(max_length=256, verbose_name="Notes", blank=True) status = models.BooleanField(default=False, verbose_name="Is ready") data = models.FileField(storage=storage, verbose_name="Data", null=True) def __str__(self): if self.name: return f"{self.name}_{self.target}_Scan#{self.target.number_of_scans}" return f"{self.target}_Scan#{self.target.number_of_scans}" def start_scan(self): self.scan_process = Process(target=self.parallel_scan) self.scan_process.run() def parallel_scan(self): time_started = datetime.datetime.now() started_time = f"{time_started.year}-{time_started.month}-{time_started.day}-{time_started.hour}-{time_started.minute}-{time_started.second}" file_name = f"{self}_{started_time}.txt" file_name_path = BASE_DIR / file_name command = f"nmap -oN \"{file_name_path}\" -T4 -v {self.target.ip}" print(command) stdout_file = (BASE_DIR / "stdout_stream_file.txt").resolve() with open(stdout_file, "w") as f: # scan_process = subprocess.run(shlex.split(command), stdout=f, stderr=f) scan_process = subprocess.run(shlex.split(command)) with open(file_name, 'r') as file_itself: self.data = File(file_itself, file_name) print("scan finished") print("is finished set to true") self.status = True print("set finished datetime") super().save(update_fields=['status', 'data']) print("Data saved") def save(self, *args, **kwargs): self.target.number_of_scans += 1 self.target.save_for_report(update_fields=['number_of_scans']) time_started = datetime.datetime.now() date_str = f"{time_started.year}-{time_started.month}-{time_started.day}-{time_started.hour}-{time_started.minute}-{time_started.second}" filename = f"{self}_{date_str}.txt" super().save(*args, **kwargs) print("scan supposed to start now") self.start_scan() class Meta: verbose_name = "Report" verbose_name_plural = "Reports" ordering = ['-finish_date']
def process(): api_server_process = Process(target=api_server.app.run) api_server_process.run() time.sleep(0.2) return api_server_process # if __name__ == '__main__': # process()
class buildThread(object): def __init__(self, target, clicrud, **kwargs): self._kwargs = kwargs self._kwargs['setup'] = clicrud self._q = Queue() self._finq = Queue() self._ranonceq = Queue() self._target = target self._clicrud = clicrud self._ranonce = False def __str__(self): return str(self._kwargs) def output(self): return self._q.get() def prettyOutput(self): _output = self._q.get() _return = "" for k, v in _output.iteritems(): _return += "\n\nCOMMAND: " + k + "--------------------\r\n\n" _return += v + "\n" return _return @property def finq(self): return self._finq.get(timeout=600) def start(self): self._t = Process(target=self._target, args=(self._q, self._finq, self._ranonceq), kwargs=self._kwargs,) self._t.start() def stop(self): self._t.terminate() def join(self): self._t.join() def run(self): self._t.run() @property def ranonce(self): return self._ranonceq.get(timeout=1800) def getPID(self): return self._t.pid def test(self): return self._t._bootstrap()
class buildThread(object): def __init__(self, target, clicrud, **kwargs): self._kwargs = kwargs self._kwargs['setup'] = clicrud self._q = Queue() self._finq = Queue() self._ranonceq = Queue() self._target = target self._clicrud = clicrud self._ranonce = False def __str__(self): return str(self._kwargs) def output(self): return self._q.get() def prettyOutput(self): _output = self._q.get() _return = "" for k, v in _output.iteritems(): _return += "\n\nCOMMAND: " + k + "--------------------\r\n\n" _return += v + "\n" return _return @property def finq(self): return self._finq.get(timeout=600) def start(self): self._t = Process( target=self._target, args=(self._q, self._finq, self._ranonceq), kwargs=self._kwargs, ) self._t.start() def stop(self): self._t.terminate() def join(self): self._t.join() def run(self): self._t.run() @property def ranonce(self): return self._ranonceq.get(timeout=1800) def getPID(self): return self._t.pid def test(self): return self._t._bootstrap()
def processSetup(self): # デバッグ #for num in range(1, 9): # process = Process(target=self.Power, args=(num, )) # process.start() for num in range(1, self.numUseChannel): process = Process(target=self.readWave, args=(self.streams[num], )) process.run() process.terminate()
def log_and_tag(message): # tag original data # log original data message["original"] = message[ "payload"] # this may be redundant, since it's also the first in the chain message[ "id"] = 1 # but actually, it should generate a new ID based on the table its being logged into logger = Process(target=log, args=(message, )) logger.run() return message
def run(self): """ Override. Send any exceptions raised in subprocess to main process. """ try: Process.run(self) self._cconn.send(None) except Exception as e: tb = traceback.format_exc() self._cconn.send((e, tb))
def do_GET(self): self.do_HEAD() if ((time.time() - journal[0].getDate() > timeout) or (len(journal) > maxLogLength)): MessagingProcess = Process(target=messaging()) MessagingProcess.run() print("") self.wfile.write( "<!-- Noticed by Nikita Lindmann, https://ramiil.in/ https://github.com/ramiil-kun/noticed/ -->" .encode("utf-8")) self.wfile.write(errorPage.encode("utf-8"))
def _execute(self, func_arn, func_details, event, context=None, version=None): lambda_cwd = func_details.cwd environment = self._prepare_environment(func_details) # execute the Lambda function in a forked sub-process, sync result via queue queue = Queue() lambda_function = func_details.function(version) def do_execute(): # now we're executing in the child process, safe to change CWD and ENV path_before = sys.path try: if lambda_cwd: os.chdir(lambda_cwd) sys.path = [lambda_cwd] + sys.path if environment: os.environ.update(environment) result = lambda_function(event, context) queue.put(result) finally: sys.path = path_before process = Process(target=do_execute) start_time = now(millis=True) with CaptureOutput() as c: process.run() result = queue.get() end_time = now(millis=True) # Make sure to keep the log line below, to ensure the log stream gets created request_id = long_uid() log_output = 'START %s: Lambda %s started via "local" executor ...' % ( request_id, func_arn) # TODO: Interweaving stdout/stderr currently not supported for stream in (c.stdout(), c.stderr()): if stream: log_output += ('\n' if log_output else '') + stream log_output += '\nEND RequestId: %s' % request_id log_output += '\nREPORT RequestId: %s Duration: %s ms' % ( request_id, int((end_time - start_time) * 1000)) # store logs to CloudWatch _store_logs(func_details, log_output) result = result.result if isinstance(result, InvocationResult) else result invocation_result = InvocationResult(result, log_output=log_output) return invocation_result
def run(self): print('代理池调度器开始运行') if GETTER_ENABLED: getter_process = Process(target=self.schedule_getter) getter_process.start() if TESTER_ENABLED: tester_process = Process(target=self.schedule_tester) tester_process.start() if API_ENABLED: api_process = Process(target=self.schedule_api) api_process.run()
def run(self): self.Logger.info('Process ' + str(self.ProcessID) + ' started**************************************') try: Process.run(self) except Exception as e: self.Logger.info(traceback.format_exc().strip()) self.Logger.info('Process ' + str(self.ProcessID) + ' ended with errors****************************') else: self.Logger.info('Process ' + str(self.ProcessID) + ' ended without errors*************************')
def run(self): try: Process.run(self) self._cconn.send(None) except Exception as e: if isinstance(e, BotoCoreError): logger.error( 'Got botocore error with next params: %s', e.kwargs ) tb = traceback.format_exc() self._cconn.send((e, tb)) raise
def run(self): name = self.name if name is not None: setproctitle('downcast:%s' % (name, )) # Invoke the target function, with profiling if enabled pf = os.environ.get('DOWNCAST_PROFILE_OUT', None) if pf is not None and name is not None: pf = '%s.%s' % (pf, name) cProfile.runctx('Process.run(self)', globals(), locals(), pf) else: Process.run(self)
def run(self): ''' Starts the execution of the process. Any exception happening in the process will be reraised and may be retrieved by accessing ``ExceptionSafeProcess.exception``. ''' try: Process.run(self) self._send_pipe.send(None) except Exception as exception: # pylint: disable=broad-except trace = traceback.format_exc() self._send_pipe.send((exception, trace)) raise exception
def schedue_work(): if CRAWL_MODE: crawler_process = Process(target=schedule_crawl) crawler_process.start() if TEST_MODE: grade_process = Process(target=scheduce_grand) grade_process.start() if API_MODE: app_process = Process(target=schedule_server) app_process.run()
def run(self): try: Process.run(self) except Exception as self.err: if hasattr(self, 'config'): logQ = self.config['logQ'] #self.logQ.put((source, header, level, comments + " " + payload)) logQ.put((__name__, self.name, logging.WARNING, "%s agent crashed. Id: %s. Reason: %s " % (self.name, self.id, self.err) + getErrorInfo())) self.exitmessage = str(self.err) raise
def start_label(): q_json = Queue() #队列,用于存储json数据,自动带锁 q_idx = Queue() #队列,用于存储图片 dw = Draw.draw() gui = Gui.GUI() p1 = Process(target=dw.start_draw, args=(q_json, q_idx)) # 创建两个子进程 p2 = Process(target=gui.start_gui, args=(q_json, q_idx)) p1.daemon = True #设置守护进程 p2.daemon = True p1.start() p2.run() #在linux上p2.start()可以执行,但是在window有问题,不知道为啥,所以改成了p2.run()就可以了 p1.join() #阻塞父进程,让父进程等待p1进程结束父进程才结束,因为父进程在产生子进程后会异步执行 p2.terminate() #若p1进程结束就杀死p2进程
def __init__(self): with socket.socket() as tcp_socket: # 防止端口占用 tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # 绑定端口 tcp_socket.bind(('', 8080)) # 监听 tcp_socket.listen() # 等待客户端连接 while True: self.client_socket, self.client_addr = tcp_socket.accept() t = Process(target=self.handle) t.daemon = True t.run()
def Test_np_multiprocess(): k = np.arange(0, 100000, 1) print(k) # print(type(k)) # for k in range(1000): # num0 = [] # print(k) poo2a = Process(target=plusplus_r, args=(k, )) poo2b = Process(target=plusplus_s, args=(k, )) # # print(type(poo2)) # num0.append(poo2a) # num0.append(poo2b) poo2a.run() poo2b.run()
class Handler(object): """Handle a single message. """ def __init__(self, envelope, q, qlock, timeout=Rx.timeout): self.q = q self.qlock = qlock self.envelope = envelope self.timeout = timeout self.p = None def start(self): if self.p is None: name = '%s/%s' % (__package__, self.envelope.uuid) self.p = Process(target=self.handle, args=(self.envelope.message,), name=name) self.p.run() return self.p def handle(self): m = self.envelope.message assert isinstance(m, DrC) if isinstance(m, run.Run): self.run(m.task) raise ValueError('Unknown message type: %s (%s)', self.envelope.type, m.__class__.__name__) def post(self, message): envelope = Envelope(dict(channel=self.envelope.channel, refs=[self.envelope.uuid], data=message)) with self.qlock: self.q.put(envelope) def run(self, task): assert isinstance(task, Task) self.post(run.Status(uuid=task.uuid, status=Status.started)) try: with flock(self.subsidiary_lock(task.lock), seconds=Rx.timeout): return task.run() except Timeout as e: self.post(run.Status(uuid=task.uuid, status=Status.failed, message=str(e))) self.post(run.Status(uuid=task.uuid, status=Status.success))
def run(self): while True: # Main code goes here ... # Note that logger level needs to be set to logging.DEBUG before # this shows up in the logs logger.debug("Starting main loop") if __name__ == '__main__': logger.debug('Starting Main') info('main line') p = Process(target=pcontrol.pControl, args=(org, place, brokerIP, clientId)) p.start() # o = Process(target=airsensor.airSensor, args=(org, place, brokerIP, clientId, cfgfile)) # o.start() while True: if not p.is_alive(): logger.warning('pControl is DEAD - Restarting-it') p.terminate() p.run() time.sleep(0.1) logger.warning("New PID: " + str(p.pid)) p.join()
def test_timer(): func = printfirst params = 'Jane' publisher = Publisher() fname = 'update.pid' args = [func, params, '11:11:30', '00:03:00', fname, publisher] # None started so should be True assert(timechecker._nonerunning(fname)) p1 = Process(target=timechecker.timer, args=args) p1.daemon = True p1.start() time.sleep(0.1) # One already started so should be False assert(not timechecker._nonerunning(fname)) p1.terminate() os.remove(fname) p2 = Process(target=timechecker.timer, args=args) p2.daemon = True p2.start() time.sleep(0.1) pfail = Process(target=timechecker.timer, args=args) pfail.daemon = True # One already started so should raise OSError with pytest.raises(OSError): pfail.run() pfail.terminate() p2.terminate() os.remove(fname)
def run(self): Process.run(self)
ap.start() suppliers.append(ap) if conf.getboolean('suppliers','mail'): print "[+] Starting mail supplier!" imap = Imap() mp = Process(target=imap.run, args=(mail_queue,)) mp.daemon = True mp.start() suppliers.append(mp) if conf.getboolean('suppliers','dir'): print "[+] Starting Directory monitor!" monitor = Process(Monitor, args=(conf.get('settings','dirmonitor'),dist_queue,)) monitor.daemon = True monitor.run() suppliers.append(monitor) # Starting workers #Portable Executables for i in range(4): p = PEWorker(pe_queue, dist_queue) p.daemon = True p.start() workers.append(p) #Office for i in range(nr_processes): p = OfficeWorker(doc_queue, dist_queue) p.daemon = True p.start()
self.lock = Lock() def Enqueue(self,msg): with self.lock: self.val.put(msg) def Dequeue(self): with self.lock: return self.val.get() def Empty(self): return self.val.empty() def Count(self): return self.val.qsize() if __name__ == '__main__': queue = Q() #startIS() p1 = Process(target = startIRC, args = (queue,)) p2 = Process(target = startIS, args = (queue,)) p2.run() p1.run() reactor.run()
print "here" Test.flag=True def run(self): #time.sleep(10) Test.x.append(self.i) try: try: x/y except: print "xxxxxx" sys.exit(1) except: print "fffff" print "gggg" class Process(object): @staticmethod def run(): record=[] for i in range(5): worker=Test(i) process = multiprocessing.Process(target=worker.run) process.start() record.append(process) print Test.x for process in record: process.join() if __name__ == "__main__": Process.run()
if __name__ == "__main__": GOL = GameOfLife(50, 30,False, None) ##ToffMunstur til ad byrja med GOL.openPattern('Default.rle') GUI = GameOfLifeGUI(GOL) p, g = None, None #Thad er ekki haegt ad hafa thetta i thradum/processum, thvi badir thurfa ad keyra gtk.main_iteration til ad nota vidmotid/teikna g = Process(target = gtk.main_iteration, args = (False,)) w = GUI.Waiter() s = GUI.Simulator() u = GUI.Updater() gtk.gdk.threads_init() gtk.gdk.threads_enter() while(GUI.Running): while gtk.events_pending(): g.run() if(GUI.Play): if not w.is_alive(): w.run(GUI) if not s.is_alive(): s.run(GUI) if not u.is_alive(): u.run(GUI) gtk.gdk.threads_leave()
# Start process # logger.warning("Cache prefix: %s", cachefile) try: logger.debug('Starting pControl') p = Process(target=pcontrol.pControl, args=(org, place, brokerIP, clientId, cachefile)) p.start() logger.warning('Starting pControl, PID: %s', str(p.pid())) # o = Process(target=airsensor.airSensor, args=(org, place, brokerIP, clientId, cfgfile)) # o.start() while True: if not p.is_alive(): logger.warning('pControl is DEAD - Restarting-it') p.terminate() p.run() time.sleep(0.1) logger.warning("New PID: %s", str(p.pid)) ''' if not o.is_alive(): logger.warning('airSensor is DEAD - Restarting-it') o.terminate() o.run() time.sleep(0.1) logger.warning("New PID: " + str(o.pid)) ''' except KeyboardInterrupt: # KeyboardInterrupt logger.warning('Shutingdown Monitoring system')
def Install(button): boton2.set_sensitive(False) if casilla9.get_active(): os.system(dump_registry_to_a_file) etiqueta_ventana_no_passwd.set_text(log_created_no_passwd) etiqueta_ventana_wrong_passwd.set_text(log_created_wrong_passwd) Passwd = PasswdEntry.get_text().rstrip() print Passwd if Passwd == "": ventatana_no_passwd.connect("delete_event", Esconder) ventatana_no_passwd.set_position(Gtk.WindowPosition.CENTER) ventatana_no_passwd.show_all() boton_none.connect("clicked", EsconderNone) boton2.set_sensitive(True) else: os.environ["Passwd"] = Passwd try: Bash_Passwd = subprocess.check_output(""" autosudo () { echo "$Passwd" | sudo -S "$@" ; } ; autosudo echo "Hola" > /dev/null 2>&1 """, shell=True).rstrip("""\n""") except: ventatana_wrong_passwd.connect("delete_event", EsconderWrong) ventatana_wrong_passwd.set_position(Gtk.WindowPosition.CENTER) boton_wrong.connect("clicked", EsconderWrong) ventatana_wrong_passwd.show_all() boton2.set_sensitive(True) else: os.system(""" bash -c ' autosudo () { echo "$Password" | sudo -S "$@" ; } mkdir -p "$ProgressFileLocation" autosudo chmod 777 "$ProgressFileLocation" echo "0.00" > "$ProgressFileLocation/Progress.txt" ' """) def Ok(): try: ProgressFile = os.environ["ProgressFileLocation"] + "/Progress.txt" file = open(ProgressFile) txt = file.read() number = float(txt) barra_de_progreso.set_fraction(number) except ValueError: pass if number != 1.00: threading.Timer(0.01, Ok).start() def P1(): os.system(""" for i in $(seq 0.01 0.01 1.00); do sleep 0.01;echo "$i" > "$ProgressFileLocation/Progress.txt" ; done """) p1 = Process(target=Ok) p2 = Process(target=P1) p1.run() p2.start() boton2.set_sensitive(False) print ("Fin")
def run(self): Process.run(self) for i in range(self.num_circuits): self.s.connect(self.factory) reactor.run()
if __name__ == '__main__': logger.debug('Starting Main') info('main line') p = Process(target=pcontrol.pControl, args=(org,place,brokerIP,clientId)) p.start() o = Process(target=airsensor.airSensor, args=(org,place,brokerIP,clientId,cfgfile)) o.start() while True: if not p.is_alive(): logger.warning('pControl is DEAD - Restarting-it') p.terminate() p.run() time.sleep(0.1) logger.warning( "New PID: " + str(p.pid)) if not o.is_alive(): logger.warning('airSensor is DEAD - Restarting-it') o.terminate() o.run() time.sleep(0.1) logger.warning( "New PID: " + str(o.pid)) p.join() o.join()