def _run(self, redirects, sess, indentation = ""): Pipeline._run(self, redirects, sess, indentation) self._running_processes = [] redirects = redirects.make_pipes() log.log(indentation + "Running %s with %s" % (repr(self), repr(redirects)), "cmd") args = self._arg_list(redirects, sess, indentation) log.log(indentation + "222Running %s with %s" % (repr(self), repr(redirects)), "cmd") pid = os.fork() if pid == 0: self._child(redirects, args) # If we ever get to here, all is lost... sys._exit(-1) log.log(indentation + " %s: Command line %s with %s" % (pid, ' '.join(repr(arg) for arg in args), repr(redirects)), "cmd") self._running_process = RunningProcess(self, pid) self._running_processes.append(self._running_process) redirects.close_source_fds() self._pid = pid self._redirects = self._running_process.redirects = redirects return self._running_processes
def fit_regression_models(expression, expression_indices): pred_expression = expression[:, (0, 1, 2, 3, 4, 5)] resp_expression = expression[:, (0, 1, 2, 3, 4, 5)] shared_index = Value('i', 0) pids = [] with ThreadSafeFile("output_noshift.txt", "w") as ofp: for p_index in xrange(NTHREADS): pid = os.fork() if pid == 0: while True: with shared_index.get_lock(): i = shared_index.value if i >= len(expression): break shared_index.value += 1 alpha, nonzero_coefs = estimate_covariates( pred_expression, resp_expression, i) output_str = "{}\t{}\t{}\n".format( expression_indices[i], alpha, "\t".join( str(expression_indices[x]) for x in nonzero_coefs)) print output_str, ofp.write(output_str) sys._exit() else: pids.append(pid) try: for pid in pids: os.waitpid(pid, 0) except: for pid in pids: os.kill(pid, signal.SIGTERM) raise
def _run(self, redirects, sess, indentation = ""): base.Pipeline._run(self, redirects, sess, indentation) self._running_processes = [] redirects = redirects.make_pipes() log.log(indentation + "Running %s with %s" % (repr(self), repr(redirects)), "cmd") args = self._arg_list(redirects, sess, indentation) log.log(indentation + "222Running %s with %s" % (repr(self), repr(redirects)), "cmd") pid = os.fork() if pid == 0: self._child(redirects, args) # If we ever get to here, all is lost... sys._exit(-1) log.log(indentation + " %s: Command line %s with %s" % (pid, ' '.join(repr(arg) for arg in args), repr(redirects)), "cmd") self._running_process = running.RunningProcess(self, pid) self._running_processes.append(self._running_process) redirects.close_source_fds() self._pid = pid self._redirects = self._running_process.redirects = redirects return self._running_processes
def exit_tensorflow(port=6006): """Close TensorBoard and Nvidia-process if available. Parameters ---------- port : int TensorBoard port you want to close, `6006` as default. """ text = "[TL] Close tensorboard and nvidia-process if available" text2 = "[TL] Close tensorboard and nvidia-process not yet supported by this function (tl.ops.exit_tf) on " if _platform == "linux" or _platform == "linux2": tl.logging.info('linux: %s' % text) os.system('nvidia-smi') os.system('fuser ' + str(port) + '/tcp -k') # kill tensorboard 6006 os.system("nvidia-smi | grep python |awk '{print $3}'|xargs kill") # kill all nvidia-smi python process _exit() elif _platform == "darwin": tl.logging.info('OS X: %s' % text) subprocess.Popen( "lsof -i tcp:" + str(port) + " | grep -v PID | awk '{print $2}' | xargs kill", shell=True ) # kill tensorboard elif _platform == "win32": raise NotImplementedError("this function is not supported on the Windows platform") else: tl.logging.info(text2 + _platform)
def get_proxy(url=IPProxyPool): global qu global qu_index global start_time global change_flag print(time_wait, change_flag) if time_wait < 0: if change_flag == True: for i in range(3): if qu_index == len(qu): get_qu(IPProxyPool) else: qu_index += 1 start_time = time.time() change_flag = False return qu[qu_index] else: return qu[qu_index] else: if time_wait > time.time() - start_time: return qu[qu_index] else: start_time = time.time() for i in range(3): if qu_index == len(qu): get_qu(IPProxyPool) else: qu_index += 1 return qu[qu_index] sys._exit()
def exit_tensorflow(sess=None, port=6006): """Close TensorFlow session, TensorBoard and Nvidia-process if available. Parameters ---------- sess : Session TensorFlow Session. tb_port : int TensorBoard port you want to close, `6006` as default. """ text = "[TL] Close tensorboard and nvidia-process if available" text2 = "[TL] Close tensorboard and nvidia-process not yet supported by this function (tl.ops.exit_tf) on " if sess is not None: sess.close() if _platform == "linux" or _platform == "linux2": tl.logging.info('linux: %s' % text) os.system('nvidia-smi') os.system('fuser ' + port + '/tcp -k') # kill tensorboard 6006 os.system("nvidia-smi | grep python |awk '{print $3}'|xargs kill") # kill all nvidia-smi python process _exit() elif _platform == "darwin": tl.logging.info('OS X: %s' % text) subprocess.Popen("lsof -i tcp:" + str(port) + " | grep -v PID | awk '{print $2}' | xargs kill", shell=True) # kill tensorboard elif _platform == "win32": raise NotImplementedError("this function is not supported on the Windows platform") else: tl.logging.info(text2 + _platform)
def execute_routine(routine): if not routine.exec: # routine is not meant to be executed print('routine not set for exec') return # start on start state transitions = routine.transitions current_action = routine.actions['START'] status = 'not_started' try: while current_action is not END_ACTION: status = current_action.execute() # print(status, current_action.name, transitions) # #todo: remove if not simulating # from time import sleep # from random import uniform # sleep(uniform(0.05,5)) # process status like for boolean actions if status == 'error': # raise warning ERROR_ACTION.execute() break if '*' in transitions[current_action.name]: status = '*' current_action = routine.actions[transitions[current_action.name][status]] except RuntimeError as e: # error states trigger a runtime Error print(e) print('ERROR during {} with {} (status of {})'.format(routine.name, current_action.name, status)) hal_cleanup() _exit(1)
def exit_tf(sess=None, port=6006): """Close TensorFlow session, TensorBoard and Nvidia-process if available. Parameters ---------- sess : a session instance of TensorFlow TensorFlow session tb_port : an integer TensorBoard port you want to close, 6006 as default. """ text = "[TL] Close tensorboard and nvidia-process if available" text2 = "[TL] Close tensorboard and nvidia-process not yet supported by this function (tl.ops.exit_tf) on " if sess != None: sess.close() # import time # time.sleep(2) if _platform == "linux" or _platform == "linux2": print('linux: %s' % text) os.system('nvidia-smi') os.system('fuser '+ port +'/tcp -k') # kill tensorboard 6006 os.system("nvidia-smi | grep python |awk '{print $3}'|xargs kill") # kill all nvidia-smi python process _exit() elif _platform == "darwin": print('OS X: %s' % text) subprocess.Popen("lsof -i tcp:"+ str(port) +" | grep -v PID | awk '{print $2}' | xargs kill", shell=True) # kill tensorboard elif _platform == "win32": print(text2 + "Windows") # TODO else: print(text2 + _platform)
def __subprocess_function_2(some_arg, some_other_arg): if some_arg != 'hello' and some_other_arg != 'world': print('error in common_subprocess_test - aborting') sys._exit() for i in range(5): print(i,' ', end=' ') print()
def exit(status=None, message=None): # NOTE: always call with positional arguments only! # after py3.8: def exit(status=None, message=None, /): if message: print(message, file=stderr) logging.shutdown() _exit(status or 0)
def exit_tf(sess=None, port=6006): """Close TensorFlow session, TensorBoard and Nvidia-process if available. Parameters ---------- sess : a session instance of TensorFlow TensorFlow session tb_port : an integer TensorBoard port you want to close, 6006 as default. """ text = "[TL] Close tensorboard and nvidia-process if available" text2 = "[TL] Close tensorboard and nvidia-process not yet supported by this function (tl.ops.exit_tf) on " if sess != None: sess.close() # import time # time.sleep(2) if _platform == "linux" or _platform == "linux2": logging.info('linux: %s' % text) os.system('nvidia-smi') os.system('fuser ' + port + '/tcp -k') # kill tensorboard 6006 os.system("nvidia-smi | grep python |awk '{print $3}'|xargs kill") # kill all nvidia-smi python process _exit() elif _platform == "darwin": logging.info('OS X: %s' % text) subprocess.Popen("lsof -i tcp:" + str(port) + " | grep -v PID | awk '{print $2}' | xargs kill", shell=True) # kill tensorboard elif _platform == "win32": logging.info(text2 + "Windows") # TODO else: logging.info(text2 + _platform)
def _roster_actor(config, messages): """Act on messages for the roster. The Roster class is not thread-safe, so we use a single actor thread to serialize messages for the database. Messages can be: ('QUIT') ('ATTENDING', User(...)) """ logging.info('Roster actor is starting') try: with config.get_roster() as roster: while True: message = messages.get() if message[0] == 'QUIT': logging.info('Roster actor is quitting') return if message[0] == 'ATTENDING': user = message[1] logging.info('Setting attendance to True: %s', user) roster.set_user_attendance(user, True) except Exception as e: logging.critical('Roster actor failed: %s', e) sys._exit(1)
def CSVtoMysql(self, filename, path): ##MySQLdb#古い #MySQLdb#古い更新されないライブラリらしいのでmysql-connector-pythonに今後切り替え #mysql接続 #Mysqlに接続メソッドを入れる # データベースへの接続とカーソルの生成 #DBはすでに作成済みとする。mysql-connector-pythonに今後切り替え connection = MySQLdb.connect( host='localhost', user='******', passwd='Test1030', #知られても問題ないパスワード db='YahooFinance') cursor = connection.cursor() os.chdir(path) #ディレクトリ変更 print(os.getcwd()) #ディレクトリ確認 try: #ファイルが存在しないときのエラー処理try with open(filename, 'tr') as fin: for iline in fin: try: #カンマ区切りなのでSplitする self.mysqlInsertFuncDekidaka(iline, cursor) except MySQLdb.Error as e: print('MySQLdb.Error: ', e) connection.rollback( ) #失敗したらもとに戻す。これだと途中で成功してもコミットされるので、1回でもエラーのときはBREAKのほうがいいかも。 print("強制終了MYSQL") connection.close() return connection.commit() except FileNotFoundError as e: # FileNotFoundErrorは例外クラス名 print("ファイルが見つかりません。パス、ファイル名を確認してください", e) print("強制終了") sys._exit() #ファイルがなければ終了 #tryのときは_exit()が良いらしい except Exception as e: # Exceptionは、それ以外の例外が発生した場合 print(e)
def run(self): """ runs the threading and the processes for Frost's Kinect vision, LIDAR, and launching """ while True: try: #if the queue has data, then take it and send it off to the approrpriate processing unit in supervisor if not self.q.empty(): self.xdata = self.q.get() if self.xdata[0] == 1: (self.target_found, self.target_angle, self.target_distance) = self.supervisor.view.draw( self.xdata[1], self.target_data) else: self.target_data = self.supervisor.targeter.track( self.xdata[1]) self.supervisor.serial_out.send_serial( self.target_found, self.target_angle, self.target_distance) #shuwdown sequence for pygame for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys._exit() break except KeyboardInterrupt: print "keyboard" self.thread1.terminate() self.thread2.terminate() break
def change_wallpaper(path: str): file = _which("feh") if file is None: # I did the checking here not to seem we don't support i3 print("feh not found, you need it to set the background in i3.") _exit(0) _system(f'feh --bg-fill "{path}"')
def get_tool_and_version(): last_version_line = args.last_blasttabplus_file.readline() if not last_version_line.startswith("# LAST version"): print("""ERROR: First line of blastab file does not contain the version information!""", file = sys.stderr) sys._exit(1) version = last_version_line.rstrip().split()[3] return "lastal " + version
def handler(*args): logging.info('CTRL-C pressed, Stopping Registration Thread...') registratorThread.quit_flag = True registratorThread.join() if os.name == 'posix': sys.exit(0) else: sys._exit()
def test(): try: HandlerClass = ProxyHTTPRequestHandler ServerClass = ThreadingHTTPServer BaseHTTPServer.test(HandlerClass, ServerClass) except: print "sering exit!" sys._exit(1)
def test(): try: HandlerClass = ProxyHTTPRequestHandler ServerClass = ThreadingHTTPServer BaseHTTPServer.test(HandlerClass, ServerClass) except: print "sering exit!" sys._exit(1)
def shell_notify(msg, state=False, more=None, exitcode=None, verbose=True): ''' A pretty long wrapper for a :py:func:`print` function. But this :py:func:`print` is the only one in Photon. .. note:: |use_photon_m| :param msg: The message to show :param state: The message will be prefixed with [`state`] * If ``False`` (default): Prefixed with ~ * If ``None``: Prefixed with [WARNING] * If ``True``: Prefixed with [FATAL] and the exitcode \ will be set (see below) :param more: Something to add to the message (see below) * Anything you have. Just for further information. * Will be displayed after the message, \ pretty printed using :py:func:`pprint.pformat` :param exitcode: |appteardown| with given code :param verbose: Show message or not (see below) * If set to ``False``, you can use :func:`shell_notify` \ for the dictionary it returns. * Will be overruled if `exitcode` is set. :returns: A dictionary containing untouched `msg`, `more` and `verbose` ''' if state is True: state = '[FATAL]' exitcode = 23 elif state is None: state = '[WARNING]' elif state is False: state = '~' else: state = '[%s]' % (str(state)) m = ' %s %s' % (state, str(msg)) if more: m += '\n\t' + _pformat(more).replace('\n', '\n\t') if verbose or isinstance(exitcode, int): print(m) if isinstance(exitcode, int): _exit(exitcode) return dict(message=msg, more=more, verbose=verbose)
def main(): if len(sys.argv)!=2: print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE" sys._exit(-1) address_book = addressbook_pb2.AddressBook() f = open(sys.argv[1],'rb') address_book.ParseFromString(f.read()) f.close() ListPeople(address_book)
def get_vars(var_dict): vars = {} try: for var, vartype in var_dict.items(): vars = {**vars, **{var: vartype(input(f"{var}=? "))}} except KeyboardInterrupt as e: print("\b\b ") _exit() return vars
def __init__(self, commands: dict = {}) -> None: if machine() not in ("armv8", "armv7"): if stdout.isatty(): print("Can only run on Pi 2 and 3") _exit(-1) self._task = Thread(target=self._run_task) self._can_start_conversation = False self._assistant = None self._commands = commands
def _cmd_create(self, options, *args): """Interactivly create a new entry""" number = 1 if len(args) == 1: try: number = int(args[0]) except ValueError: sys._exit(_('--create needs a number')) if len(args) > 1: sys.exit(_('--create accepts only one argument')) for func in itertools.repeat(self.edit_entry, 2): func()
def iniciarServidor(self): try: print "Inicializando Servidor..." server = SimpleXMLRPCServer((str(self.ip), int(self.puerto)),allow_none=True) server.register_function(self.addCliente,'addCliente'); server.register_function(self.desconectarCliente,'desconectarCliente'); server.register_function(self.getUsuarios,'getUsuarios'); server.serve_forever(); except Exception as e: self.serverUI.mostarError(u"No se pudo inicializar el servidor. Por favor, reinicie la aplicación"); print u"No se pudo inicializar el servidor. Por favor, reinicie la aplicación"; sys._exit(-1);
def _fork_callable(callable): r, w = os.pipe() _set_cloexec(w) _set_nonblocking(w) pid = os.fork() if not pid: os.dup2(r, 0) os.close(w) callable() sys._exit(0) os.close(r) return pid, w
def fork_server(options): if os.fork(): return os.close(0) sys.stdin = sys.__stdin__ = open("/dev/null") os.close(1) sys.stdout = sys.__stdout__ = open("/dev/null", "w") os.setsid() os.execv(sys.executable,[sys.executable, os.path.join(os.path.split(sys.argv[0])[0], 'sb_bnserver.py') ]+options) sys._exit(1)
def _cmd_create(self, options, *args): """Interactivly create a new entry""" number = 1 if len(args) == 1: try: number = int(args[0]) except ValueError: sys._exit(_('--create needs a number')) if len(args) > 1: sys.exit(_('--create accepts only one argument')) for func in itertools.repeat(self.edit_entry, 2): func()
def _brute(cube, level, moves): global max_recursion_level global _taints global _static_map global _static_rmap global _algos if level > max_recursion_level: return cube if len(moves) > max_moves: return cube if cube in _wanted: print("[+] SOLUTION FOUND") _m = '' for m in str(moves): _m = '%s %s' % (_m, _formulas[_static_map[m]]) _m = _m.strip() print("[+] MOVES: %s" % _m) _m = _optimize(_m) print("[+] MOVES SHORTENED: %s" % _m) print("[+] MOVES LEN: %s" % str(len(_m.split()))) global start_time time_elapsed = _dt.now() - start_time print('[*] Bruteforced in: {}'.format(time_elapsed)) if len(_algos) == _algos_min: print('[d] Algos minimum reached: %s' % str(_algos_min)) _exit(0) global visited_states if cube in visited_states: return cube new_cube = _cp.deepcopy(cube) if _debug: print("[*] Reached level: %s, len_moves: %s, len_visited: %s" % (str(level), str(len(moves)), str(len(visited_states)))) print("[d] moves: %s" % (str(moves))) visited_states.append(new_cube) for func in _static_rmap: if _strict and func in _taints: if len(moves) > 0 and _static_map[moves[-1]] in _taints[func]: continue changed_cube = eval("%s(new_cube)" % func) _brute(changed_cube, level + 1, "%s%s" % (moves, _static_rmap[func])) return new_cube
def message(entries, info=True, critical=False): pipe, pre = (stdout, 'info') if info else (stderr, 'error') if isinstance(entries, str): entries = entries.splitlines() if not is_iterable(entries): entries = [entries] for entry in entries: pipe.write('{}: {}\n'.format(pre, entry)) pipe.flush() if critical: _exit(1)
def exec_daemon(host, port=None): pid = os.fork() if pid: # Parent (forks again after init, and intermediate exits) os.waitpid(pid, 0) else: thisdir = os.path.dirname(__file__) daemonpath = os.path.join(thisdir, 'daemon.py') pypath = os.path.abspath(os.path.join(thisdir, os.pardir, os.pardir)) args = ['python', daemonpath, '--bg', '--listen', host] if port: args.append('--port') args.append(port) env = dict(PYTHONPATH=pypath) os.execve(sys.executable, args, env) sys._exit(1)
def exec_daemon(host, port=None): pid = os.fork() if pid: # Parent (forks again after init, and intermediate exits) os.waitpid(pid, 0) else: thisdir = os.path.dirname(__file__) daemonpath = os.path.join(thisdir, 'daemon.py') pypath=os.path.abspath(os.path.join(thisdir, os.pardir, os.pardir)) args = ['python', daemonpath, '--bg', '--listen', host] if port: args.append('--port') args.append(port) env=dict(PYTHONPATH=pypath) os.execve(sys.executable, args, env) sys._exit(1)
def parser(self, query): """ :query: String to query the module. """ _available_modules = self._set_modules().keys() if query[:4] in ('!h', 'help'): self.logger.debug("Display !h (help menu)") self._interactive_help() elif query[:2] in _available_modules: self._caller(query[:2], query[3:]) elif query[:4] in ('!q', 'quit'): self.logger.debug("!q selected. Exiting interactive mode") print("Bye!") _exit(2) else: self.logger.debug("Invalid command input") print('Invalid command! Try `!h` or `help` for help.')
def simple_exit(results): """ A simpler version of exit_based_on_results(); this function causes the script to exit normally with return value of zero if and only if all tests within the script passed and had no errors. Otherwise it returns the number of failures plus the number of errors Parameters ---------- results: an instance of unittest.TestResult, returned (for instance) from a call such as results = unittest.TextTestRunner(verbosity=2).run(suite) """ if results.wasSuccessful(): _exit(0) else: nfail = len(results.errors)+len(results.failures) _exit(nfail)
def simple_exit(results): """ A simpler version of exit_based_on_results(); this function causes the script to exit normally with return value of zero if and only if all tests within the script passed and had no errors. Otherwise it returns the number of failures plus the number of errors Parameters ---------- results: an instance of unittest.TestResult, returned (for instance) from a call such as results = unittest.TextTestRunner(verbosity=2).run(suite) """ if results.wasSuccessful(): _exit(0) else: nfail = len(results.errors) + len(results.failures) _exit(nfail)
def proc_entries(self, idx): entries = init_fp(self.ch_refs[idx]).entries (topics, entries_ref) = ([entry.title for entry in entries], [entry.link for entry in entries]) #entries_ref = [qhf for qhf in map(quote_href, unquoted)] for i in range(len(entries_ref)): try: fName = "ch{0}_e{1}.json".format(str(idx), str(i)) src = requests.get(entries_ref[i], timeout=4.0) src = src.content.decode(src.encoding) target = self.filter(src) if target != []: self.proc_article(fName, target, entries_ref[i], topics[i]) except KeyboardInterrupt: sys._exit(1)
def _process_event(self, event): if stdout.isatty(): print(event) status_ui = get_status_ui() if event.type == EventType.ON_START_FINISHED: status_ui.status("ready") self._can_start_conversation = True # Start the voicehat button trigger. get_button().on_press(self._on_button_pressed) if stdout.isatty(): print( 'Say "OK, Google" or press the button, then speak. ' "Press Ctrl+C to quit..." ) elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED and event.args: if stdout.isatty(): print("You said:", event.args["text"]) text = event.args["text"].lower() # If it doesnt have the command let Google handle it if hasattr(self._commands, text): self._assistant.stop_conversation() self._commands[text]() elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: self._can_start_conversation = False status_ui.status("listening") elif event.type == EventType.ON_END_OF_UTTERANCE: status_ui.status("thinking") elif ( event.type == EventType.ON_CONVERSATION_TURN_FINISHED or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT or event.type == EventType.ON_NO_RESPONSE ): status_ui.status("ready") self._can_start_conversation = True elif ( event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args["is_fatal"] ): if stdout.isatty(): print("Fatal Error") _exit(1)
def outputCSVForTableColum(self, columArray, filename, path): if filename.find('.csv') == -1: #拡張子.csvがないときは付記する filename += '.csv' os.chdir(path) #ディレクトリ変更 print(os.getcwd()) #ディレクトリ確認 try: ofile = open(filename, 'tw') except FileNotFoundError as e: # FileNotFoundErrorは例外クラス名 print("ファイルが見つかりません", e) sys._exit() #ファイルがなければ終了#tryのときは_exit()が良いらしい except Exception as e: # Exceptionは、それ以外の例外が発生した場合 print(e) for i in columArray: #テーブルのカラム順が良さそう、code,market,name,torihikiDay,price,previousYearHighDay, #previousYearHighPrice,highPrice ofile.write(i.code+','+i.market+','+i.name+','+i.torihikiDay+','+i.price.replace(',','')+','+\ i.previousYearHighDay+','+i.previousYearHighPrice.replace(',','')+','+i.highPrice.replace(',','')+'\n') ofile.close()
def outputCSVForTableColum(self, columArray, filename, path): #path='/Users/toshiromaseda/Documents/2021年/2021年株/yahoofinance_data/' if filename.find('.csv') == -1: #拡張子.csvがないときは付記する filename += '.csv' os.chdir(path) #ディレクトリ変更 print(os.getcwd()) #ディレクトリ確認 try: ofile = open(filename, 'tw') except FileNotFoundError as e: # FileNotFoundErrorは例外クラス名 print("ファイルが見つかりません", e) sys._exit() #ファイルがなければ終了#tryのときは_exit()が良いらしい except Exception as e: # Exceptionは、それ以外の例外が発生した場合 print(e) for i in columArray: #テーブルのカラム順が良さそう、まだ num,code,market,name,time,price,KessanSyubetsu,eps,kessanDate,PerKessanSyubetsu,per ofile.write(i.code+','+i.market+','+i.name+','+i.price.replace(',','')+','+i.kessanSyubetsu+','+\ i.eps.replace(',','')+','+i.kessanDate+','+i.PerKessanSyubetsu+','+i.per.replace(',','')+'\n') ofile.close()
def CSVtoMysqlConnector(self, filename, path): #MysqlConnector版 今後はこちらで作業 # コネクションの作成 conn = mysql.connector.connect(host='localhost', port='3306', user='******', password='******', database='YahooFinance') cursor = conn.cursor() print("Trueなら接続OK") print(conn.is_connected()) #True,False os.chdir(path) #ディレクトリ変更 print(os.getcwd()) #ディレクトリ確認 try: #ファイルが存在しないときのエラー処理try with open(filename, 'tr') as fin: for iline in fin: #try: self.mysqlConnectorInsertFuncDekidaka(iline, cursor) #except MySQLdb.Error as e: # print('MySQLdb.Error: ', e) # conn.rollback()#失敗したらもとに戻す。これだと途中で成功してもコミットされるので、1回でもエラーのときはBREAKのほうがいいかも。 # print("強制終了MYSQL") # cursor.close() # conn.close() # return except FileNotFoundError as e: # FileNotFoundErrorは例外クラス名 print("ファイルが見つかりません。パス、ファイル名を確認してください", e) print("強制終了") sys._exit() #ファイルがなければ終了 #tryのときは_exit()が良いらしい except Exception as e: # Exceptionは、それ以外の例外が発生した場合 print(e) conn.commit() #テストロールバック #conn.rollback() #print('現在テスト中なのでrollbackしてます') #print('commit') cursor.close() conn.close() print('DB 処理終了。。。')
def fork_server(options): if os.fork(): # parent return os.close(0) sys.stdin = sys.__stdin__ = open("/dev/null") os.close(1) sys.stdout = sys.__stdout__ = open("/dev/null", "w") # leave stderr # os.close(2) # sys.stderr = sys.__stderr__ = open("/dev/null", "w") os.setsid() # Use exec rather than import here because eventually it may be nice to # reimplement this one file in C os.execv(sys.executable,[sys.executable, os.path.join(os.path.split(sys.argv[0])[0], 'sb_bnserver.py') ]+options) # should never get here sys._exit(1)
def fork_server(options): if os.fork(): # parent return os.close(0) sys.stdin = sys.__stdin__ = open("/dev/null") os.close(1) sys.stdout = sys.__stdout__ = open("/dev/null", "w") # leave stderr # os.close(2) # sys.stderr = sys.__stderr__ = open("/dev/null", "w") os.setsid() # Use exec rather than import here because eventually it may be nice to # reimplement this one file in C os.execv(sys.executable, [sys.executable, os.path.join(os.path.split(sys.argv[0])[0], 'sb_bnserver.py') ]+options) # should never get here sys._exit(1)
def exit_based_on_results(results): """ A probably-obsolete function to exit from a unit test-script with a status that depends on whether or not the only errors or failures were NotImplemented errors. Specifically, if the unit-test suite execution encoded in results was: All tests successful: Exit 0 All tests successful or only NotImplemented errors: Exit 1 Some tests either failed or had other errors: Exit 2 The intent was that failures due to missing features be treated differently (especially when that happens on one of the GPU schemes) and that these exit statuses could then be interpreted by NMI or some other automatic build/test system accordingly. Parameters ---------- results: an instance of unittest.TestResult, returned (for instance) from a call such as results = unittest.TextTestRunner(verbosity=2).run(suite) """ NotImpErrors = 0 for error in results.errors: for errormsg in error: if type(errormsg) is str: if 'NotImplemented' in errormsg: NotImpErrors +=1 break if results.wasSuccessful(): _exit(0) elif len(results.failures)==0 and len(results.errors)==NotImpErrors: _exit(1) else: _exit(2)
def iniciarServidor(self): try: print "Inicializando Servidor..." serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM); serverSocket.bind((self.ip, self.puerto)); serverSocket.listen(1); while True: (clientSocket,clientAddress) = serverSocket.accept(); print u"Recibí un cliente en la dirección ",clientAddress; data_json = clientSocket.recv(self.BUFFER_SIZE); print u"Recibí un mensaje" if data_json: data = json.loads(data_json); if(data["peticion"] == "addCliente"): self.addCliente(data["nombre"],data["ip"],data["puerto"]); except Exception as e: self.serverUI.mostarError(u"No se pudo inicializar el servidor. Por favor, reinicie la aplicación"); print u"No se pudo inicializar el servidor. Por favor, reinicie la aplicación"; sys._exit(-1);
import unittest from sys import exit as _exit from utils import parse_args_cpu_only, simple_exit from fft_base import _BaseTestFFTClass parse_args_cpu_only("FFTW pthreads backend") # See if we can get set the FFTW backend to 'pthreads'; if not, say so and exit. if 'fftw' in pycbc.fft._all_backends_list: import pycbc.fft.fftw try: pycbc.fft.fftw.set_threads_backend('pthreads') except: print "Unable to import pthreads threads backend to FFTW; skipping pthreads thread tests" _exit(0) else: print "FFTW does not seem to be an available CPU backend; skipping pthreads thread tests" _exit(0) # Most of the work is now done in fft_base. FFTTestClasses = [] kdict = {'backends' : ['fftw'], 'scheme' : 'cpu', 'context' : CPUScheme(num_threads=2)} klass = type('FFTW_pthreads_test', (_BaseTestFFTClass,),kdict) FFTTestClasses.append(klass) # Finally, we create suites and run them
def main(url,user=None,password=None): api = Zabbix_Api(url,user=user,passwd=password) api.login() parser = argparse.ArgumentParser(description=u""" 这是一个调用zabbix api的python脚本。 使用方法如下(例子): python %(prog)s -H 10.100.0.1 *查看10.100.0.1这台主机状态 python %(prog)s -AH *查看所有的主机状态 python %(prog)s -c 10.100.0.1 鼎新影院 1 10001 *创建一个主机,设置主机别名,分组,模板 """) parser.add_argument('-H','--host',nargs="?",dest='listhost',help=u"查看主机") parser.add_argument('-AH','--all-host',dest='allhost',action="store_true",help=u"显示所有主机") parser.add_argument('-G','--group',nargs="?",dest='listgroup',help=u"查看主机组") parser.add_argument('-AG','--all-group',dest='allgroup',action="store_true",help=u"显示所有主机组") parser.add_argument('-T','--template',nargs="?",dest="listtemplate",help=u"查看模板") parser.add_argument('-AT','--all-template',dest='alltemplate',action="store_true",help=u"显示所有模板") parser.add_argument('-I','--item',nargs="?",dest="listitem",metavar=('HOSTNAME'),help=u"查看某主机监控项(跟主机ID)") parser.add_argument('-O','--history',nargs="?",dest="history",metavar=('ITEMID'),help=u"查看某监控项的历史记录(跟监控项ID)") parser.add_argument('-R','--trigger',nargs="?",dest="trigger",metavar=("Triggerid"),help=u"查看触发器信息") parser.add_argument('-AR','--triggers',dest='triggers',action="store_true",help=u"查看所有的触发器") parser.add_argument('-C','--add-group',nargs="?",dest='addgroup',help=u"创建新的主机组") parser.add_argument('-c','--add-host',nargs=4,dest='addhost',metavar=('HOSTNAME','ALIASNAME','8,10','10001,10002'),help=u"创建新的主机,多个主机组或模板用逗号分隔") parser.add_argument('-mc','--more-add-host',nargs="?",dest="moreaddhost",metavar=("FullPathFileName"),help=u"用文件,多线程添加主机(我这用的gevent协程)") parser.add_argument('-U','--update-host',nargs=2,dest="updatehost",metavar=("HOSTNAME","TEMPLATEID[,templateid]"),help=u"更新主机信息") parser.add_argument('-d','--disable-host',nargs=1,dest="disablehost",help="禁用主机") parser.add_argument('-e','--enable-host',nargs=1,dest="enablehost",help="启用主机") parser.add_argument('-D','--delete-host',nargs="+",dest="deletehost",help="删除主机,多个主机用空格分隔") parser.add_argument('-V','--version',action="version",version="zabbix api python 1.0.0",help="显示当前脚本版本") args = parser.parse_args() status = {"0": "Monitored", "1": "Not Monitored"} available = {"0": "Unknown", "1": "Available", "2": "Unavailable"} if args.listhost: data = api.host("get",{"output": "extend","filter": {"host": args.listhost}}) for host in data: print u"主机ID: {}\t主机名: {}\t监控状态: {}\tagent连接状态: {}".format( host["hostid"],host["name"],status[host["status"]],available[host["available"]]) elif args.allhost: data = api.host("get",{"output": "extend"}) for host in data: print u"主机ID: {}\t主机名: {}\t监控状态: {}\tagent连接状态: {}".format( host["hostid"],host["name"],status[host["status"]],available[host["available"]]) elif args.listgroup: data = api.hostgroup("get",{"output": "extend","filter": {"name":args.listgroup}}) for group in data: print u"主机组ID: {}\t主机组名: {}".format(group["groupid"],group["name"]) elif args.allgroup: data = api.hostgroup("get",{"output": "extend"}) for group in data: print u"主机组ID: {}\t主机组名: {}".format(group["groupid"],group["name"]) elif args.listtemplate: data = api.template("get",{"output": "extend","filter": {"name":args.listtemplate}}) for template in data: print u"模板ID: {}\t模板名称: {}".format(template["templateid"],template["name"]) elif args.alltemplate: data = api.template("get",{"output": "extend"}) for template in data: print u"模板ID: {}\t模板名称: {}".format(template["templateid"],template["name"]) elif args.listitem: status = {"0": "OK", "1": "Disabled"} data = api.item("get",{"output": "extend","hostids": args.listitem,"seach": {"key_": "system"},"sortfield": "name"}) for item in data: print u"监控项ID: {}\t监控项名称: {}\t监控项key: {}\t监控项状态: {}".format( item["itemid"],item["name"],item["key_"],status[item["status"]]) elif args.history: data = api.histroy("get",{"output": "extend", "history": 0, "itemids": args.histroy, "sortfield": "clock", "limit": 30}) for history in data: print u"监控项ID: {}\t时间: {}\t数据: {}\tNS: {}".format( history['itemid'],history['clock'],history['value'],history['ns']) elif args.trigger: data = api.trigger("get",{"triggerids": ags.trigger,"output": "extend","selectFunctions": "extend"}) func = data[0]["functions"][0] print u"函数{函数ID:%s 所属监控项ID:%s 函数名:%s 参数:%s} 触发器所属模板ID: %s\t触发器ID: %s\t表达式: %s\t优先级: %s\t描述: %s"\ %(func["functionid"],func["itemid"],func["function"],func["parameter"],tr["templateid"],tr["triggerid"],tr["expression"],tr["priority"],tr["description"]) elif args.triggers: data = api.trigger("get",{"output": ["triggerid","description","priority"], "filter": {"value": 1}, "sortfield": "priority", "sortorder": "DESC"}) for trigger in data: print u"触发器ID: {}\t优先级: {}\t描述: {}".format( trigger["triggerid"],trigger["priority"],trigger["description"]) elif args.addgroup: data = api.hostgroup("create",{"output": "extend","filter": {"name": args.addgroup}}) print u"添加主机组成功! 主机组ID:%s\t主机组: %s"%(data["groupids"],args.addgroup) elif args.addhost: host,name,groupids,templateids = args.addhost data = api.host("create",{"host": host, "name": name.decode('gbk').encode('utf8'), "interfaces": [{ "type": 1, "main": 1, "useip": 1, "ip": host, "dns": "", "port": "10050"}], "groups": map(lambda x:{"groupid": x},groupids.split(',')), "templates": map(lambda x: {"templateid": x},templateids.split(','))}) print u"添加主机成功! 主机ID: {}\t主机IP: {}\t主机名: {}".format( "".join(data["hostids"]),host,name.decode("gbk").encode("utf8")) elif args.updatehost: hostname,templateids = args.updatehost data = api.host("update",{"hostid": api.host_id(hostname), "templates": map(lambda x: {"templateid": x},templateids.split(','))}) if not data: print u"该主机不存在!" sys._exit(1) print u"主机修改成功! 主机ID: {}\t主机IP: {}".format("".join(data["hostids"]),hostname) elif args.disablehost: data = api.host("update",{"hostid": api.host_id(args.disablehost),"status": 1}) if not data: print u"该主机不存在!" sys._exit(1) print "该主机(%s)已经被停止监控!"%args.disablehost elif args.enablehost: data = api.host("update",{"hostid": api.host_id(args.disablehost),"status": 0}) if not data: print u"该主机不存在!" sys._exit(1) print "该主机(%s)已经启用监控!"%args.disablehost elif args.deletehost: data = api.host("delete",map(lambda x: {"hostid": x},map(lambda y: api.host_id(y),args.deletehost))) print "删除的主机有%r"%(data["hostids"]) elif args.moreaddhost: filename = args.moreaddhost import os.path import gevent if not os.path.exists(filename) or not os.path.isfile(filename): print u"当前目录没有找到此%s文件,可以选择输入绝对路径" from gevent.threadpool import ThreadPool pool = Threadpool(30) for i in open(filename): host,name,groupids,templateids = i.strip().split(",") pool.spawn(api.host,"get",{"host": host, "name": name, "interfaces": [{ "type": 1, "main": 1, "useip": 1, "ip": host, "dns": "", "port": "10050"}], "groups": map(lambda x:{"groupid": x},groupids.split(',')), "templates": map(lambda x: {"templateid": x},templateids.split(','))}) gevent.wait() print u"主机全部添加完毕!" else: parser.print_help()
def main_create_mpo_D(MPO_name, i, L, datatype): if not (valid_mpo(MPO_name)): _exit(Exception(' MPO name is not valid.')) from math import sqrt IDO_name = MPO_name.split('-')[0]+'-Id' coefsite=sqrt(return_ID_DegFree(MPO_name,True)) isnilpotent = isNilpotent(MPO_name) if i>L/2: direction='left' else: direction='right' nb = return_NB_Operator(MPO_name) if nb not in [1,2,float('inf')]: _exit(Exception(' the number of operator is not valid.')) mpo = [] if nb == 1: if i>L: print("Operator do not exist for this position") _exit(0) for l in xrange(1,L+1,1): if (l==i): alpha, tmp = single_mpo_D(MPO_name,i,L,datatype,False) else: tmp = single_mpo_D(IDO_name,l,L,datatype,True) mpo.append(tmp) nor = ['=' for l in range(L) if l < i]+['*']+['-' for l in range(L) if i<l] if nb == 2: if i>=L: print("Operator do not exist for this position") _exit(0) for l in xrange(1,L+1,1): if (l==i): val = double_mpo_D(MPO_name,i,L,datatype,False) alpha1, tmp = val[0] elif (l==i+1): alpha2, tmp = val[1] else: tmp = single_mpo_D(IDO_name,i,L,datatype,True) mpo.append(tmp) alpha = alpha1*alpha2 nor = ['=' for l in range(L) if l < i-1]+['*']+['*']+['-' for l in range(L) if i<l] # if nb == float('inf'): # i=0 # mpo = global_mpo(L,MPO_name,datatype) # nor = L*['*'] for ikl in xrange(len(mpo)): # mpo[ikl]._array /= coefsite mpo[ikl]._array = mpo[ikl]._array / coefsite info = { 'object' : 'operator', 'name' : MPO_name, 't' : 0.0, 'pos' : i, 'bond' : ['bl','sd','su','br'], 'chi' : 1, 'prefactor' : coefsite, 'coefficient' : alpha, 'isnilpotent' : isnilpotent, 'discarded_weights_maximal_value' : 0, 'discarded_weights_accumulated_one_step' : 0, 'discarded_weights_accumulated_total' : 0, 'clock' : 0, 'normalisation' : nor, 'nextstep' : direction } from numpy import isreal, all if not all([isreal(m._array) for m in mpo]): # transform all in imaginary for l in xrange(len(mpo)): mpo[l]._array = mpo[l]._array.astype('complex128') return mpo, info
sys.exit() infoModule.info.source = source[0] for i in infoModule.info.source.keys(): ## this is sort of hack-y, but stupid python returns None for null if infoModule.info.source[i] == None: infoModule.info.source[i] = '' url = source[0]['feed_url'] log.plog('fetching feed ' + url, 1) obj = feedfetcher.getFeed(url) if obj == False or obj['type'] == None: log.plog("unreadable feed ", 5) sys._exit(0) else: if obj['type'][0:3] == 'rss': log.plog('feed is rss', 1) elif obj['type'][0:4] == 'atom': log.plog('feed is atom', 1) else: log.plog("can't tell feed type! " + obj['type'], 5) sys._exit(0) ctr = 0 for entry in obj['data']['entries']: if ctr > 0: break infoModule.info.page['url'] = entry['link']
def sig_stop(signum, frame): """ Signal handler used to cleanly stop application on various signals """ uninit() sys._exit()
""" constants used by the protocol """ from sys import exit as _exit # Tag used by brine, proxy or pickled object TAG_PICKLED = "\x01" TAG_PROXIED = "\x02" TAG_LENGTH = len(TAG_PICKLED) if len(TAG_PICKLED) != len(TAG_PROXIED): _exit("bad tag length") # messages MSG_REQUEST = 1 MSG_REPLY = 2 MSG_EXCEPTION = 3 # boxing LABEL_VALUE = 1 LABEL_TUPLE = 2 LABEL_LOCAL_REF = 3 LABEL_REMOTE_REF = 4 # action handlers HANDLE_PING = 1 HANDLE_CLOSE = 2 HANDLE_GETROOT = 3 HANDLE_GETATTR = 4 HANDLE_DELATTR = 5
def main(): global MODS, COMARD, COMAR_PATH, COMAR_LIBS, STATIC_DATA if not os.path.isfile(COMAR_PATH + COMARD): print "You must run this to COMARd.py root path" sys._exit(1) makepath(comar_global.comar_data) makepath(comar_global.comar_libpath) dl = collectPyFiles(COMAR_PATH) crossList = {} deps = [] for file in dl: if file[-len(COMARD):] != COMARD: fd = open(file, "r") lines = fd.readlines() bit = 0 for l in lines: for mod in MODS: see = mod["signature"] if see == None: dh = file.split("/") if dh[-2] == mod["src_path"]: mod["files"].append(file[:]) bit = 1 break elif l[:len(see)] == see: depends = collectDepends(os.path.basename(file), lines, os.path.dirname(file)) mod["files"].append(file[:]) mod["files"].extend(depends) bit = 1 break if bit == 1: break fd = open(COMAR_PATH + COMARD, "r") lines = fd.readlines() dp = collectDepends(COMAR_PATH + COMARD, lines, COMAR_PATH, "") rv = { "class":"COMAR Main Components", "path":COMAR_LIBS, "signature":"", "files":dp } MODS.append(rv) for mod in MODS: print mod["class"],"MODULES: " p = mod["path"] print "\tDestination Dir:", p makepath(p) for f in mod["files"]: fpart = os.path.basename(f) print "\t\t", f install(f, p + "/" + fpart) print "" print "Creating Data Directories:" for var in dir(comar_global): if var[0:2] != '__': attr = getattr(comar_global, var) p = attr try: if p[0] == "/": print "\tData directory:", p makepath(p) except: pass # COMARd.py'yi de kopyalayalım install(COMAR_PATH+COMARD, COMAR_LIBS+COMARD) # om dtd kopyalayalim install(COMAR_PATH+"/om_dtd/comar.xml", comar_global.comar_om_dtd + "/comar.xml") print "Installation successfull.."