def try_instance_init(self, inst): """ Try to "init" the given module instance. Returns: True on successfull init. False if instance init method raised any Exception. """ try: logger.log("Trying to init module : %s" % inst.get_name()) inst.init_try += 1 # Maybe it's a retry if inst.init_try > 1: # Do not try until 5 sec, or it's too loopy if inst.last_init_try > time.time() - 5: return False inst.last_init_try = time.time() # If it's an external, create/update Queues() if inst.is_external: inst.create_queues(self.manager) inst.init() except Exception, e: logger.log("Error : the instance %s raised an exception %s, I remove it!" % (inst.get_name(), str(e))) output = cStringIO.StringIO() traceback.print_exc(file=output) logger.log("Back trace of this remove : %s" % (output.getvalue())) output.close() return False
def session_removestate_callback_target(): if DEBUG: print >>sys.stderr,"Session: session_removestate_callback_target called",currentThread().getName() try: self.sesscb_removestate(infohash,contentdest,removecontent) except: print_exc()
def handle_error(self, request, client_address): """Override RPCServer method for IDLE Interrupt the MainThread and exit server if link is dropped. """ global quitting try: raise except SystemExit: raise except EOFError: global exit_now exit_now = True thread.interrupt_main() except: erf = sys.__stderr__ print>>erf, '\n' + '-'*40 print>>erf, 'Unhandled server exception!' print>>erf, 'Thread: %s' % threading.currentThread().getName() print>>erf, 'Client Address: ', client_address print>>erf, 'Request: ', repr(request) traceback.print_exc(file=erf) print>>erf, '\n*** Unrecoverable, server exiting!' print>>erf, '-'*40 quitting = True thread.interrupt_main()
def run(bot): 'parse input, update game state and call the bot classes do_turn method' log.info("======= GAME BEGINS ======= ") ants = Ants() map_data = '' while(True): try: current_line = sys.stdin.readline().rstrip('\r\n') # string new line char if current_line.lower() == 'ready': ants.setup(map_data) bot.do_setup(ants) ants.finish_turn() map_data = '' elif current_line.lower() == 'go': ants.update(map_data) # call the do_turn method of the class passed in bot.do_turn(ants) ants.finish_turn() map_data = '' else: map_data += current_line + '\n' except EOFError: break except KeyboardInterrupt: raise except: # don't raise error or return so that bot attempts to stay alive traceback.print_exc(file=sys.stderr) sys.stderr.flush()
def _import_transporter(self, transporter): """Imports transporter module and class, returns class. Input value can be: * a full/absolute module path, like "MyTransporterPackage.SomeTransporterClass" """ transporter_class = None module = None alternatives = [] default_prefix = 'cloud_sync_app.transporter.transporter_' if not transporter.startswith(default_prefix): alternatives.append('%s%s' % (default_prefix, transporter)) for module_name in alternatives: try: module = __import__(module_name, globals(), locals(), ["TRANSPORTER_CLASS"], -1) except ImportError: import traceback traceback.print_exc() pass if not module: msg = "The transporter module '%s' could not be found." % transporter if len(alternatives) > 1: msg = '%s Tried (%s)' % (msg, ', '.join(alternatives)) self.logger.error(msg) else: try: classname = module.TRANSPORTER_CLASS module = __import__(module_name, globals(), locals(), [classname]) transporter_class = getattr(module, classname) except AttributeError: self.logger.error("The Transporter module '%s' was found, but its Transporter class '%s' could not be found." % (module_name, classname)) return transporter_class
def install(self): # get content type. if is_file(self.download_url): path = fileurl_to_path(self.download_url) self.content_type = mimetypes.guess_type(path)[0] else: headerinfo = Downloader.read_head_info(self.download_url) self.content_type = headerinfo['content-type'] if is_html(self.content_type): # note: maybe got 404 or 503 http status code. logger.error("Invalid content-type: `%s`" % self.content_type) return if os.path.isdir(self.install_dir): logger.info("You have already installed `%s`" % self.pkg.name) return self.download_and_extract() logger.info("\nThis could take a while. You can run the following command on another shell to track the status:") logger.info(" tail -f %s\n" % self.logfile) logger.info("Installing %s into %s" % (self.pkg.name, self.install_dir)) try: self.patch() self.configure() self.make() self.make_install() except Exception: import traceback traceback.print_exc() rm_r(self.install_dir) logger.error("Failed to install %s. Check %s to see why." % (self.pkg.name, self.logfile)) sys.exit(1) self.symlink() logger.info("\nInstalled %(pkgname)s successfully." % {"pkgname": self.pkg.name})
def _create_database(self): """ Create a persistent database to store expensive parse results Check for Python version used to create the database is the same as the running python instance or re-create """ project_database_file_name = join(self._output_path, "project_database") create_new = False key = b"version" version = str((4, sys.version)).encode() database = None try: database = DataBase(project_database_file_name) create_new = (key not in database) or (database[key] != version) except KeyboardInterrupt: raise except: # pylint: disable=bare-except traceback.print_exc() create_new = True if create_new: database = DataBase(project_database_file_name, new=True) database[key] = version return PickledDataBase(database)
def make_new_version_message(self, path): """Make a new version message for the repo at the given path.""" try: cwd = os.getcwd() os.chdir(path) version = self.get_current_tag() if version[0] is None: return messages_path = os.path.join(path, 'messages.json') message_path = self.rewrite_messages_json(messages_path, version) if os.path.exists(message_path): os.remove(message_path) with open(message_path, mode='w', encoding='utf-8') as f: header = '{} {}'.format( os.path.basename(path), os.path.splitext(os.path.basename(message_path))[0]) f.write('{}\n{}\n'.format(header, '-' * (len(header) + 1))) f.write(self.get_commit_messages_since(version)) self.window.run_command('open_file', args={'file': message_path}) except Exception: import traceback traceback.print_exc() finally: os.chdir(cwd)
def apply(self,func,args): try: return func(self,*args) except: from traceback import print_exc print_exc() raise EvaluationFailedError
def button_pushed(self, ev): control = ev.Source n = int(ev.ActionCommand) type_name = self._results[n].type_name with_type = False if type_name == "ANY": with_type = True from mytools_Mri.ui.tools import create_popupmenu entries = ((1, 0, 0, "void", "VOID", None), (2, 1, 0, "boolean", "BOOLEAN", None), (3, 2, 0, "byte", "BYTE", None), (4, 3, 0, "short", "SHORT", None), (5, 4, 0, "unsigned short", "UNSIGNED_SHORT", None), (6, 5, 0, "long", "LONG", None), (7, 6, 0, "unsigned long", "UNSIGNED_LONG", None), (8, 7, 0, "hyper", "HYPER", None), (9, 8, 0, "unsigned hyper", "UNSIGNED_HYPER", None), (10, 9, 0, "float", "FLOAT", None), (11, 10, 0, "double", "DOUBLE", None), (12, 11, 0, "string", "STRING", None), (13, 12, 0, "type", "TYPE", None), (14, 13, 0, "enum", "ENUM", None), (None, 14, 0, "", "", None), (15, 15, 0, "struct", "STRUCT", None), (16, 16, 0, "exception", "EXCEPTION", None), (17, 17, 0, "sequence", "SEQUENCE", None), (18, 18, 0, "interface", "INTERFACE", None)) popup = create_popupmenu(self.ctx, entries) r = popup.execute(ev.Source.getPeer(), Rectangle(0, ev.Source.getPosSize().Height, 0, 0), 0) if r == 0: return value_type_name = popup.getCommand(r) control.setLabel(popup.getItemText(r)) if value_type_name in self.FROM_HISTORY: self.set_enable("edit_%s" % n, False) else: self.set_enable("edit_%s" % n, True) self._results[n].value_type = value_type_name return obj = None try: self._history_selector = HistorySelectorDialog(self.ctx, self.cast) obj = self._history_selector.execute( "History", self.TreeSelectionListener(self), allow_void=True, void_listener=self.ItemListener(self)) self._history_selector = None except Exception as e: print(e) traceback.print_exc() finally: if obj is None: return name = "edit_%s" % n if obj == "void": self.set_text(name, "void") self._results[n].value = None self._results[n].value_type = "VOID" elif obj: self.set_text(name, str(obj)) self._results[n].value = obj self._results[n].value_type = value_type_name if with_type else type_name
def connect(self): """ Override the connect() function to intercept calls to certain host/ports. If no app at host/port has been registered for interception then a normal HTTPSConnection is made. """ if debuglevel: sys.stderr.write('connect: %s, %s\n' % (self.host, self.port,)) try: (app, script_name) = self.get_app(self.host, self.port) if app: if debuglevel: sys.stderr.write('INTERCEPTING call to %s:%s\n' % \ (self.host, self.port,)) self.sock = wsgi_fake_socket(app, self.host, self.port, script_name) else: HTTPSConnection.connect(self) except Exception, e: if debuglevel: # intercept & print out tracebacks traceback.print_exc() raise
def get_data_from_url(url, num_tries=5): tries = 0 while True: try: u = urllib2.urlopen(url) result = u.read() u.close() if result: data = json.loads(result) return data return None except: tries += 1 if tries == num_tries: break wait = 2 ** (tries + 1) error = 'Exception for {url}. Sleeping for {wait} secs'.format( url=url, wait=wait) errors.append(error) print error traceback.print_exc(file=sys.stdout) time.sleep(wait) return None
def ruling_process(queue, db, stayalive=_PROCESSOR_STAYALIVE): """ :param queue: job queue. contains ids that reference the _JOBS global. :type queue: multiprocessing.queues.Queue :param stayalive: How long to keep the process running if it runs out of jobs before shutdown. :type stayalive: float :return: """ global _JOBS, _PROCESSOR_STAYALIVE # print("Queue: {}".format(queue)) # print("Stayalive: {}".format(stayalive)) # print("_JOBS: {}".format(_JOBS)) engine = RulesProcessor(db) while not queue.empty(): try: job_id = queue.get() try: job_params = _JOBS[job_id] job = RuleJob.rebuild(job_params) engine.process(job) except: traceback.print_exc() logger.warn("error processing job {}.".format(job_id)) logger.warn("JOBS: {}".format(_JOBS)) if queue.empty(): time.sleep(stayalive) except KeyboardInterrupt: break except Exception as e: traceback.print_exc() reset_globals() return engine
def __get_module_from_str(self, modname, print_exception, pyfile): """ Import the module in the given import path. * Returns the "final" module, so importing "coilib40.subject.visu" returns the "visu" module, not the "coilib40" as returned by __import__ """ try: mod = __import__(modname) for part in modname.split('.')[1:]: mod = getattr(mod, part) return mod except: if print_exception: import pydev_runfiles_xml_rpc import pydevd_io buf_err = pydevd_io.StartRedirect(keep_original_redirection=True, std='stderr') buf_out = pydevd_io.StartRedirect(keep_original_redirection=True, std='stdout') try: import traceback;traceback.print_exc() sys.stderr.write('ERROR: Module: %s could not be imported (file: %s).\n' % (modname, pyfile)) finally: pydevd_io.EndRedirect('stderr') pydevd_io.EndRedirect('stdout') pydev_runfiles_xml_rpc.notifyTest( 'error', buf_out.getvalue(), buf_err.getvalue(), pyfile, modname, 0) return None
def load_preferences(self): self.progress_callback(None, 1) self.progress_callback(_('Starting restoring preferences and column metadata'), 0) prefs_path = os.path.join(self.src_library_path, 'metadata_db_prefs_backup.json') if not os.path.exists(prefs_path): self.progress_callback(_('Cannot restore preferences. Backup file not found.'), 1) return False try: prefs = DBPrefs.read_serialized(self.src_library_path, recreate_prefs=False) db = RestoreDatabase(self.library_path, default_prefs=prefs, restore_all_prefs=True, progress_callback=self.progress_callback) db.commit() db.close() self.progress_callback(None, 1) if 'field_metadata' in prefs: self.progress_callback(_('Finished restoring preferences and column metadata'), 1) return True self.progress_callback(_('Finished restoring preferences'), 1) return False except: traceback.print_exc() self.progress_callback(None, 1) self.progress_callback(_('Restoring preferences and column metadata failed'), 0) return False
def install(mysql_host, mysql_port, mysql_user, mysql_pwd, mysql_name, mysql_prev): global sql try: conn = mysqldb.Connection(host = mysql_host, port = int(mysql_port), user = mysql_user, passwd = mysql_pwd) sql = sql.format(mysql_name, mysql_prev, get_version()) cursor = conn.cursor() for s in sql.split(';'): if not s.strip(): continue cursor.execute(s + ';') conn.commit() mysql_cnf_path = os.path.join(ROOT_PATH, 'mycnf.py') mycnf = open(mysql_cnf_path, 'w') content = "MYSQL_DB_HOST = '{0}'\nMYSQL_DB_PORT = {1}\n"\ "MYSQL_DB_USER = '******'\nMYSQL_DB_PWD='{3}'\n"\ "MYSQL_DB_NAME= '{4}'\n"\ "MYSQL_PRE = '{5}'".format(mysql_host, mysql_port, mysql_user, mysql_pwd, mysql_name, mysql_prev) mycnf.write(content) mycnf.close() cursor.close() conn.close() return {"status":True} except Exception as e: import traceback traceback.print_exc() return {"status":False, "errmsg":str(e)}
def loadModules(prefix): """ Return modules, errorlist, num_testmodules, suite. """ try: from minds import allmodules except ImportError: traceback.print_exc() try: reload(allmodules) except ImportError: pass # bug [856103] (https://sourceforge.net/tracker/?func=detail&atid=105470&aid=856103&group_id=5470) modules = [] errorlist = [] suite = unittest.TestSuite() num_testmodules = 0 for (package, name) in allmodules.modules: package_name = '%s.%s' % (package, name) try: mod = __import__(package_name, globals(), locals(), [name]) except Exception, e: buf = StringIO.StringIO() traceback.print_exc(file=buf) errorlist.append(buf.getvalue()) continue modules.append(mod) module_suite = loadTestCases(mod, prefix) if module_suite: num_testmodules += 1 suite.addTest(module_suite)
def get_value(self, key, args, kwargs): if key == '': return '' try: key = key.lower() try: b = self.book.get_user_metadata(key, False) except: traceback.print_exc() b = None if b is not None and b['datatype'] == 'composite': if key in self.composite_values: return self.composite_values[key] self.composite_values[key] = 'RECURSIVE_COMPOSITE FIELD (S2D) ' + key self.composite_values[key] = \ self.evaluate(b['display']['composite_template'], [], kwargs) return self.composite_values[key] if key in kwargs: val = kwargs[key] if isinstance(val, list) or isinstance(val, FormatsList): val = ','.join(val) return val.replace('/', '_').replace('\\', '_') return '' except: traceback.print_exc() return key
def fetch_collection(cls, feeds, prefix_log): """Fetches a collection of Feed. Args: feeds: the collection of Feed to fetch prefix_log: a prefix to use in the log to know who called it Returns: The time elapsed in seconds. """ start = timezone.now() log_desc = '%s - Fetching %s Feeds' % (prefix_log, feeds.count()) logger.info('%s => start' % (log_desc,)) for feed in feeds: try: feed.fetch() except Exception as err: traceback.print_exc() print err logger.error('%s - Fetching => [KO]\n%s' % (feed.log_desc, err)) delta = timezone.now() - start logger.info('%s in %ss => end' % (log_desc, delta.total_seconds())) return delta
def dump(): assert isinstance(application.options, OptionsCore), 'Invalid application options %s' % application.options if not application.options.writeConfigurations: return if not __debug__: print('Cannot dump configuration file if python is run with "-O" or "-OO" option', file=sys.stderr) sys.exit(1) configFile = application.options.configurationPath try: if os.path.isfile(configFile): with open(configFile, 'r') as f: config = load(f) else: config = {} assembly = application.assembly = ioc.open(aop.modulesIn('__setup__.**'), config=config) assert isinstance(assembly, Assembly), 'Invalid assembly %s' % assembly try: if os.path.isfile(configFile): os.rename(configFile, configFile + '.bak') for config in assembly.configurations: assembly.processForName(config) # Forcing the processing of all configurations with open(configFile, 'w') as f: save(assembly.trimmedConfigurations(), f) print('Created "%s" configuration file' % configFile) finally: ioc.deactivate() except SystemExit: raise except: print('-' * 150, file=sys.stderr) print('A problem occurred while dumping configurations', file=sys.stderr) traceback.print_exc(file=sys.stderr) print('-' * 150, file=sys.stderr)
def fatalError(explanation): if config.state['Debug']: print "omniidl: fatalError occurred, in debug mode." for line in explanation.split("\n"): print ">> " + line if have_traceback: print "Stack:" print "-------------------------" traceback.print_stack() print "Exception:" print "-------------------------" traceback.print_exc() sys.exit(1) lines = explanation.split("\n") lines = [ "Fatal error in C++ backend", "" ] + lines for line in lines: sys.stderr.write("omniidl: %s\n" % line) sys.stderr.write("""\ For more information (mailing list archives, bug reports etc.) please visit the webpage: http://omniorb.sourceforge.net/ """) sys.exit(1)
def tearDown(self): try: if not hasattr(self, 'stderr'): self.unhook_stderr() if hasattr(self, 'stderr'): sys.__stderr__.write(self.stderr) except: traceback.print_exc() if hasattr(self, '_timer'): self._timer.cancel() hub = gevent.hub.get_hub() if self._switch_count is not None and hasattr(hub, 'switch_count'): msg = '' if hub.switch_count < self._switch_count: msg = 'hub.switch_count decreased?\n' elif hub.switch_count == self._switch_count: if self.switch_expected: msg = '%s.%s did not switch\n' % (type(self).__name__, self.testname) elif hub.switch_count > self._switch_count: if not self.switch_expected: msg = '%s.%s switched but expected not to\n' % (type(self).__name__, self.testname) if msg: print >> sys.stderr, 'WARNING: ' + msg if hasattr(gevent.core, '_event_count'): event_count = (gevent.core._event_count(), gevent.core._event_count_active()) if event_count > self._event_count: args = (type(self).__name__, self.testname, self._event_count, event_count) sys.stderr.write('WARNING: %s.%s event count was %s, now %s\n' % args) gevent.sleep(0.1) else: sys.stderr.write('WARNING: %s.setUp does not call base class setUp\n' % (type(self).__name__, ))
def run(self, cont=0): # Print HTTP messages for key in self.http.keys(): print '%s: %s' % (key, self.http[key]) print # Redirect stderr sys.stderr = open(self._tempfile, 'w') # Grab query string self.get_form() # Function handling if not self.active: ret = _not_active(self) print self sys.exit(0) elif not '_no_function' in self.valid.keys(): self.valid['_no_function'] = _no_function if not self.function or self.function not in self.valid.keys(): self.function = '_no_function' try: ret = self.valid[self.function](self) except: traceback.print_exc() sys.stderr.flush() f = open(self._tempfile, 'r') self.title = 'CGI Error Occured' self.append(Pre(f.read())) f.close() # Print Document object print self if not cont: sys.exit(0) # Provide a speedy exit
def dump_raw(): try: mp = MPTable() s = "MP Table -- Raw bytes and structure decode.\n\n" if mp: s += str(mp.floating_pointer) + '\n' s += bits.dumpmem(mp._floating_pointer_memory) + '\n' s += str(mp.header) + '\n' s += bits.dumpmem(mp._base_header_memory) + '\n' for base_struct in mp.base_structures: s += str(base_struct) + '\n' s += bits.dumpmem(base_struct.raw_data) + '\n' if mp.header.extended_table_length: for extended_struct in mp.extended_structures: s += str(extended_struct) + '\n' s += bits.dumpmem(extended_struct.raw_data) + '\n' else: s += "MP Table not found.\n" ttypager.ttypager_wrap(s, indent=False) except: print("Error parsing MP Table information:") import traceback traceback.print_exc()
def onecmd(self, s): # do standard parsing of line name, line, all = self.parseline(s) # look up the method method = getattr(self, "do_%s" % name, None) # if a proper method was found, try and call it. if method is not None and callable(method): # parse arguments and keyword arguments from line args, kwargs = parse_line(line) try: # try to call the method return method(*args, **kwargs) except TypeError as e: # if something went wrong, print the help for that method if self.debug: traceback.print_exc() print "%s: %s" % (type(e), e) if name != 'help': return self.do_help(name) return self.do_help(None) # if no proper method with the name was found, do what cmd always does return cmd.Cmd.onecmd(self, s)
def _init_list_add_on_change(self, key, view_attr, lt_attr): view = self.view # this can end up being called *before* plugin_loaded() because # ST creates the ViewEventListeners *before* calling plugin_loaded() global _lt_settings if not isinstance(_lt_settings, sublime.Settings): try: _lt_settings = sublime.load_settings( "LaTeXTools.sublime-settings" ) except Exception: traceback.print_exc() self.v_attr_updates = view_attr self.lt_attr_updates = lt_attr for attr_name, d in self.v_attr_updates.items(): settings_name = d["setting"] self.__dict__[attr_name] = get_setting(settings_name, view=view) for attr_name, d in self.lt_attr_updates.items(): if attr_name in self.__dict__: continue settings_name = d["setting"] self.__dict__[attr_name] = _lt_settings.get(settings_name) _lt_settings.add_on_change( key, lambda: self._on_setting_change(False)) self.view.settings().add_on_change( key, lambda: self._on_setting_change(True))
def index(self): try: report_form=order_report return dict(report_form=report_form, values={}) except: traceback.print_exc()
def longPollThread(self): connection = None last_url = None while True: if self.stop: return sleep(1) url = self.longPollURL if url != '': proto = self.proto host = self.host parsedUrl = urlsplit(url) if parsedUrl.scheme != '': proto = parsedUrl.scheme if parsedUrl.netloc != '': host = parsedUrl.netloc url = url[url.find(host)+len(host):] if url == '': url = '/' try: if not connection: connection = self.connect(proto, host, LONG_POLL_TIMEOUT) self.sayLine("LP connected to %s", host) self.longPollActive = True (connection, result) = self.request(connection, url, self.headers) self.longPollActive = False self.queueWork(result['result']) self.sayLine('long poll: new block %s%s', (result['result']['data'][56:64], result['result']['data'][48:56])) last_url = self.longPollURL except NotAuthorized: self.sayLine('long poll: Wrong username or password') except RPCError as e: self.sayLine('long poll: %s', e) except (IOError, httplib.HTTPException, ValueError): self.sayLine('long poll exception:') traceback.print_exc()
def interpro_result(interpro_submit_sequences, email, developing, script_dir): protein_ipr_db_domain = {} # this is done per 25 for protein_name, interpro_result in iprscan_soappy.runInterpro(interpro_submit_sequences, email): # get dict with as value protein name and as value various stuff ipr_domain_names = [] protein_ipr_db_domain[protein_name] = {} for ipr_code in interpro_result: # list of ipr domain names for this protein if 'ipr_names' in interpro_result[ipr_code]: ipr_domain_names += interpro_result[ipr_code]['ipr_names'] for database in interpro_result[ipr_code]: protein_ipr_db_domain[protein_name][ipr_code] = {database:interpro_result[ipr_code][database]} # update it with database and database specific name # make a separate list for PFAM domains, because these are used later protein_ipr_db_domain[protein_name]['ipr_domain_names'] = ipr_domain_names if developing: try: interpro_file = script_dir+os.sep+'interpro_results'+os.sep+fix_file_names(protein_name.split('|')[0].strip()+'_interpro.p') f = open( interpro_file, 'wb' ) pickle.dump( protein_ipr_db_domain[protein_name], f ) print 'wrote interpro data to '+interpro_file except: print traceback.print_exc() pass return protein_ipr_db_domain
def deploy(): assert isinstance(application.options, OptionsCore), 'Invalid application options %s' % application.options if not application.options.start: return try: if not os.path.isfile(application.options.configurationPath): print('The configuration file "%s" doesn\'t exist, create one by running the the application ' 'with "-dump" option' % application.options.configurationPath, file=sys.stderr) sys.exit(1) with open(application.options.configurationPath, 'r') as f: config = load(f) assembly = application.assembly = ioc.open(aop.modulesIn('__setup__.**'), config=config) assert isinstance(assembly, Assembly), 'Invalid assembly %s' % assembly import logging logging.basicConfig(format=format()) for name in warning_for(): logging.getLogger(name).setLevel(logging.WARN) for name in info_for(): logging.getLogger(name).setLevel(logging.INFO) for name in debug_for(): logging.getLogger(name).setLevel(logging.DEBUG) try: assembly.processStart() finally: ioc.deactivate() except SystemExit: raise except (SetupError, ConfigError): print('-' * 150, file=sys.stderr) print('A setup or configuration error occurred while deploying, try to rebuild the application properties by ' 'running the the application with "configure components" options', file=sys.stderr) traceback.print_exc(file=sys.stderr) print('-' * 150, file=sys.stderr) except: print('-' * 150, file=sys.stderr) print('A problem occurred while deploying', file=sys.stderr) traceback.print_exc(file=sys.stderr) print('-' * 150, file=sys.stderr)
def main(): """ Starts the debug adapter (creates a thread to read from stdin and another to write to stdout as expected by the vscode debug protocol). We pass the command processor to the reader thread as the idea is that the reader thread will read a message, convert it to an instance of the message in the schema and then forward it to the command processor which will interpret and act on it, posting the results to the writer queue. """ log = None try: import sys try: import robocorp_code_debug_adapter import robocorp_code except ImportError: # Automatically add it to the path if __main__ is being executed. sys.path.append( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import robocorp_code_debug_adapter # @UnusedImport import robocorp_code robocorp_code.import_robocorp_ls_core() from robocorp_ls_core.debug_adapter_core.debug_adapter_threads import ( STOP_WRITER_THREAD, ) from robocorp_ls_core.robotframework_log import ( get_logger, configure_logger, log_args_and_python, ) from robocorp_code_debug_adapter.constants import LOG_FILENAME from robocorp_code_debug_adapter.constants import LOG_LEVEL configure_logger("dap", LOG_LEVEL, LOG_FILENAME) log = get_logger("robocorp_code_debug_adapter.__main__") log_args_and_python(log, sys.argv, robocorp_code) from robocorp_ls_core.debug_adapter_core.debug_adapter_threads import ( reader_thread, ) from robocorp_ls_core.debug_adapter_core.debug_adapter_threads import ( writer_thread, ) from queue import Queue from robocorp_code_debug_adapter.debug_adapter_comm import DebugAdapterComm to_client_queue = Queue() comm = DebugAdapterComm(to_client_queue) write_to = sys.stdout read_from = sys.stdin if sys.version_info[0] <= 2: if sys.platform == "win32": # must read streams as binary on windows import msvcrt msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) else: # Py3 write_to = sys.stdout.buffer read_from = sys.stdin.buffer writer = threading.Thread( target=writer_thread, args=(write_to, to_client_queue, "write to client"), name="Write to client (dap __main__)", ) reader = threading.Thread( target=reader_thread, args=(read_from, comm.from_client, to_client_queue, b"read from client"), name="Read from client (dap __main__)", ) reader.start() writer.start() reader.join() log.debug("Exited reader.\n") to_client_queue.put(STOP_WRITER_THREAD) writer.join() log.debug("Exited writer.\n") except: if log is not None: log.exception("Error") # Critical error (the logging may not be set up properly). # Print to file and stderr. with open(_critical_error_log_file, "a+") as stream: traceback.print_exc(file=stream) traceback.print_exc() finally: if log is not None: log.debug("Exited main.\n")
def play_video(): try: nonlocal frame_buffer, running, video_fps, is_webcam, num_frames, frames_displayed, vid_done video_frame_times = MovingAverage(100) frame_time_stabilizer = frame_time_target last_time = None stabilizer_step = 0.0005 progress_bar = ProgressBar(30, num_frames) while running: frame_time_start = time.time() if not frame_buffer.empty(): next_time = time.time() if last_time is not None: video_frame_times.add(next_time - last_time) video_fps = 1 / video_frame_times.get_avg() if out_path is None: cv2.imshow(path, frame_buffer.get()) else: out.write(frame_buffer.get()) frames_displayed += 1 last_time = next_time if out_path is not None: if video_frame_times.get_avg() == 0: fps = 0 else: fps = 1 / video_frame_times.get_avg() progress = frames_displayed / num_frames * 100 progress_bar.set_val(frames_displayed) print( '\rProcessing Frames %s %6d / %6d (%5.2f%%) %5.2f fps ' % (repr(progress_bar), frames_displayed, num_frames, progress, fps), end='') # This is split because you don't want savevideo to require cv2 display functionality (see #197) if out_path is None and cv2.waitKey(1) == 27: # Press Escape to close running = False if not (frames_displayed < num_frames): running = False if not vid_done: buffer_size = frame_buffer.qsize() if buffer_size < args.video_multiframe: frame_time_stabilizer += stabilizer_step elif buffer_size > args.video_multiframe: frame_time_stabilizer -= stabilizer_step if frame_time_stabilizer < 0: frame_time_stabilizer = 0 new_target = frame_time_stabilizer if is_webcam else max( frame_time_stabilizer, frame_time_target) else: new_target = frame_time_target next_frame_target = max( 2 * new_target - video_frame_times.get_avg(), 0) target_time = frame_time_start + next_frame_target - 0.001 # Let's just subtract a millisecond to be safe if out_path is None or args.emulate_playback: # This gives more accurate timing than if sleeping the whole amount at once while time.time() < target_time: time.sleep(0.001) else: # Let's not starve the main thread, now time.sleep(0.001) except: # See issue #197 for why this is necessary import traceback traceback.print_exc()
#'smb://10.10.10.2/?timeout=10&dc=10.10.10.2&proxytype=socks5&proxyserver=127.0.0.1', #'smb://10.10.10.2/?timeout=10&dc=10.10.10.2&proxytype=socks5&proxyserver=127.0.0.1&proxyuser=admin&proxypass=alma', #'smb://10.10.10.2/?timeout=10&dc=10.10.10.2&proxytype=socks5&proxyserver=127.0.0.1&proxyuser=admin&proxypass=alma&dc=10.10.10.2&dns=8.8.8.8', #'smb://10.10.10.2/?timeout=10&dc=10.10.10.2&proxytype=socks5-ssl&proxyserver=127.0.0.1&proxyuser=admin&proxypass=alma&dc=10.10.10.2&dns=8.8.8.8', #'smb://10.10.10.2/?timeout=10&dc=10.10.10.2&proxytype=multiplexor&proxyserver=127.0.0.1', #'smb://10.10.10.2/?timeout=10&dc=10.10.10.2&proxytype=multiplexor&proxyserver=127.0.0.1&proxyagentid=alma', #'smb://10.10.10.2/?timeout=10&dc=10.10.10.2&proxytype=multiplexor&proxyserver=127.0.0.1&proxyagentid=alma&proxytimeout=111', 'smb://10.10.10.2/C$/test/tst111.dmp?timeout=10&dc=10.10.10.2&proxytype=multiplexor&proxyhost=127.0.0.1&proxyport=1&proxyagentid=alma&proxytimeout=111', ] for url in url_tests: print('===========================================================================') print(url) try: dec = SMBConnectionURL(url) creds = dec.get_credential() target = dec.get_target() smbfile = dec.get_file() print(smbfile) except Exception as e: import traceback traceback.print_exc() print('ERROR! Reason: %s' % e) input() else: print(str(creds)) print(str(target)) input()
def printException_loggerwarn(): if verbosity == True: logger.warn("Oh snap! We ran into a non-critical error. Here's the traceback.") logger.warn(traceback.print_exc())
def printException(): if tracebacksEnabled == True: print("Here's the full traceback:") traceback.print_exc()
def init( job_type=None, dir=None, config=None, # TODO(jhr): type is a union for argparse/absl project=None, entity=None, reinit=None, tags=None, group=None, name=None, notes=None, magic=None, # TODO(jhr): type is union config_exclude_keys=None, config_include_keys=None, anonymous=None, mode=None, allow_val_change=None, resume=None, force=None, tensorboard=None, # alias for sync_tensorboard sync_tensorboard=None, monitor_gym=None, save_code=None, id=None, settings=None, ): """Initialize W&B Spawns a new process to start or resume a run locally and communicate with a wandb server. Should be called before any calls to wandb.log. Args: job_type (str, optional): The type of job running, defaults to 'train' dir (str, optional): An absolute path to a directory where metadata will be stored. config (dict, argparse, or absl.flags, str, optional): Sets the config parameters (typically hyperparameters) to store with the run. See also wandb.config. If dict, argparse or absl.flags: will load the key value pairs into the runs config object. If str: will look for a yaml file that includes config parameters and load them into the run's config object. project (str, optional): W&B Project. entity (str, optional): W&B Entity. reinit (bool, optional): Allow multiple calls to init in the same process. tags (list, optional): A list of tags to apply to the run. group (str, optional): A unique string shared by all runs in a given group. name (str, optional): A display name for the run which does not have to be unique. notes (str, optional): A multiline string associated with the run. magic (bool, dict, or str, optional): magic configuration as bool, dict, json string, yaml filename. config_exclude_keys (list, optional): string keys to exclude storing in W&B when specifying config. config_include_keys (list, optional): string keys to include storing in W&B when specifying config. anonymous (str, optional): Can be "allow", "must", or "never". Controls whether anonymous logging is allowed. Defaults to never. mode (str, optional): Can be "online", "offline" or "disabled". Defaults to online. allow_val_change (bool, optional): allow config values to be changed after setting. Defaults to true in jupyter and false otherwise. resume (bool, str, optional): Sets the resuming behavior. Should be one of: "allow", "must", "never", "auto" or None. Defaults to None. Cases: - "auto" (or True): automatically resume the previous run on the same machine. if the previous run crashed, otherwise starts a new run. - "allow": if id is set with init(id="UNIQUE_ID") or WANDB_RUN_ID="UNIQUE_ID" and it is identical to a previous run, wandb will automatically resume the run with the id. Otherwise wandb will start a new run. - "never": if id is set with init(id="UNIQUE_ID") or WANDB_RUN_ID="UNIQUE_ID" and it is identical to a previous run, wandb will crash. - "must": if id is set with init(id="UNIQUE_ID") or WANDB_RUN_ID="UNIQUE_ID" and it is identical to a previous run, wandb will automatically resume the run with the id. Otherwise wandb will crash. - None: never resumes - if a run has a duplicate run_id the previous run is overwritten. See https://docs.wandb.com/library/advanced/resuming for more detail. force (bool, optional): If true, will cause script to crash if user can't or isn't logged in to a wandb server. If false, will cause script to run in offline modes if user can't or isn't logged in to a wandb server. Defaults to false. sync_tensorboard (bool, optional): Synchronize wandb logs from tensorboard or tensorboardX and saves the relevant events file. Defaults to false. monitor_gym: (bool, optional): automatically logs videos of environment when using OpenAI Gym (see https://docs.wandb.com/library/integrations/openai-gym) Defaults to false. save_code (bool, optional): Save the entrypoint or jupyter session history source code. id (str, optional): A globally unique (per project) identifier for the run. This is primarily used for resuming. Examples: Basic usage ``` wandb.init() ``` Launch multiple runs from the same script ``` for x in range(10): with wandb.init(project="my-projo") as run: for y in range(100): run.log({"metric": x+y}) ``` Raises: Exception: if problem. Returns: A :obj:`Run` object. """ assert not wandb._IS_INTERNAL_PROCESS kwargs = dict(locals()) error_seen = None except_exit = None try: wi = _WandbInit() wi.setup(kwargs) except_exit = wi.settings._except_exit try: run = wi.init() except_exit = wi.settings._except_exit except (KeyboardInterrupt, Exception) as e: if not isinstance(e, KeyboardInterrupt): sentry_exc(e) if not (wandb.wandb_agent._is_running() and isinstance(e, KeyboardInterrupt)): getcaller() assert logger if wi.settings.problem == "fatal": raise if wi.settings.problem == "warn": pass # TODO(jhr): figure out how to make this RunDummy run = None except UsageError: raise except KeyboardInterrupt as e: assert logger logger.warning("interrupted", exc_info=e) raise e except Exception as e: error_seen = e traceback.print_exc() assert logger logger.error("error", exc_info=e) # Need to build delay into this sentry capture because our exit hooks # mess with sentry's ability to send out errors before the program ends. sentry_exc(e, delay=True) # reraise(*sys.exc_info()) # six.raise_from(Exception("problem"), e) finally: if error_seen: wandb.termerror("Abnormal program exit") if except_exit: os._exit(-1) six.raise_from(Exception("problem"), error_seen) return run
def __call__(self): self._makedirs() mappings = PlateMapping([splitext(basename(f))[0] for f in self.files]) if self.ecopts.position_labels: mpfile = join(self.ecopts.mapping_dir, "%s.txt" % self.plate) mappings.read(mpfile) for channel in self.ecopts.regionnames.keys(): dtable, cld = self._load_data(mappings, channel) msg = 'performing error correction on channel %s' % channel self.interruption_point(msg) # error correction if self.ecopts.hmm_algorithm == self.ecopts.HMM_BAUMWELCH: hmm = HmmSklearn(dtable, channel, cld, self.ecopts) else: hmm = HmmTde(dtable, channel, cld, self.ecopts) data = hmm() # plots and export report = HmmReport(data, self.ecopts, cld, self._hmm_dir) prefix = "%s_%s" % (channel.title(), self.ecopts.regionnames[channel]) sby = self.ecopts.sortby.replace(" ", "_") self.interruption_point("plotting overview") report.overview(join(self._hmm_dir, '%s-%s.pdf' % (prefix, sby))) report.close_figures() self.interruption_point("plotting bar- and boxplots") report.bars_and_boxes( join(self._hmm_dir, '%s-%s_boxbars.pdf' % (prefix, sby))) report.close_figures() self.interruption_point("plotting hmm model") report.hmm_model( join(self._hmm_dir, "%s-%s_model.pdf") % (prefix, sby)) if self.ecopts.write_gallery: self.interruption_point("plotting image gallery") try: # replace image_gallery_png with image_gallery_pdf fn = join(self._gallery_dir, '%s-%s_gallery.png' % (prefix, sby)) with cellh5.ch5open(self.ch5file, 'r') as ch5: report.image_gallery_png( ch5, fn, self.ecopts.n_galleries, self.ecopts.resampling_factor, self.ecopts.size_gallery_image) report.close_figures() except Exception as e: # don't stop error corection with open( join(self._gallery_dir, '%s-%s_error_readme.txt' % (prefix, sby)), 'w') as fp: traceback.print_exc(file=fp) fp.write("Check if gallery images exist!") report.export_hmm( join(self._hmm_dir, "%s-hmm.csv" % channel.title()), self.ecopts.sortby)
def do_send_command_to_device(self, lab_session_id, command): """ Callback run when the client sends a command to the experiment @param command Command sent by the client, as a string. """ if DEBUG: dbg("[DBG] Lab Session Id: %s" % lab_session_id) data = "" # This command is currently not used. if command == "GIVE_ME_CIRCUIT_LIST": circuit_list = self.get_circuits().keys() circuit_list_string = "" for c in circuit_list: circuit_list_string += c circuit_list_string += ',' return circuit_list_string elif command.startswith("GIVE_ME_CIRCUIT_DATA"): print "[DBG] GOT GIVE_ME_CIRCUIT_DATA_REQUEST" circuit_name = command.split(' ', 1)[1] circuit_data = self.get_circuits()[circuit_name] return circuit_data elif command == 'GIVE_ME_LIBRARY': if DEBUG: dbg("[DBG] GOT GIVE_ME_LIBRARY") return self.library_xml elif command == "login": if DEBUG: dbg("[DBG] LOGIN") session_key = hashlib.md5('{}-{}'.format(time.time(), random.random())).hexdigest() data = json.dumps({"teacher": self.teacher, "sessionkey": session_key}) elif command.startswith("load"): if DEBUG: dbg("Circuit loaded: " + command[5:]) try: circuit = xml.parseString(command[5:]) except: traceback.print_exc() elif command.startswith("save"): if DEBUG: dbg("Circuit saved: " + command[5:]) try: circuit = xml.parseString(command[5:]) except: traceback.print_exc() else: if DEBUG: dbg("[DBG] REQUEST TYPE: " + self.parse_request_type(command)) dbg("[DBG] SESSION ID: %s" % lab_session_id) try: dom = xml.parseString(command) protocol_nodes = dom.getElementsByTagName('protocol') if protocol_nodes: save_nodes = protocol_nodes[0].getElementsByTagName('save') if save_nodes: for save_node in save_nodes: save_node.parentNode.removeChild(save_node) command = protocol_nodes[0].toxml().replace("<?xml version=\"1.0\" ?>", "") except: pass data = self.forward_request(lab_session_id, command) dom = xml.parseString(data) multimeter = dom.getElementsByTagName('multimeter') for i in range(0, len(multimeter)): multimeter[i].setAttribute("id", str(i+1)) data = dom.toxml().replace("<?xml version=\"1.0\" ?>", "") if DEBUG_MESSAGES: dbg("[DBG] DATA: "+data) return data
def shutdown_request(self, request): try: request.shutdown() except: if config.SHOW_DEBUG: traceback.print_exc()
def handle_error(error): print 'Something went wrong' import traceback traceback.print_exc() print error
def _login(self, params): valid = False if params.get("username") and params.get("hash") and params.get("nonce"): if params.get("nonce") not in DISPOSED_NONCES: DISPOSED_NONCES.add(params.get("nonce")) for entry in (config.USERS or []): entry = re.sub(r"\s", "", entry) username, stored_hash, uid, netfilter = entry.split(':') if username == params.get("username"): try: if params.get("hash") == hashlib.sha256(stored_hash.strip() + params.get("nonce")).hexdigest(): valid = True break except: if config.SHOW_DEBUG: traceback.print_exc() if valid: session_id = os.urandom(SESSION_ID_LENGTH).encode("hex") expiration = time.time() + 3600 * SESSION_EXPIRATION_HOURS self.send_response(httplib.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.SET_COOKIE, "%s=%s; expires=%s; path=/; HttpOnly" % (SESSION_COOKIE_NAME, session_id, time.strftime(HTTP_TIME_FORMAT, time.gmtime(expiration)))) if netfilter in ("", "0.0.0.0/0"): netfilters = None else: addresses = set() netmasks = set() for item in set(re.split(r"[;,]", netfilter)): item = item.strip() if '/' in item: _ = item.split('/')[-1] if _.isdigit() and int(_) >= 16: lower = addr_to_int(item.split('/')[0]) mask = make_mask(int(_)) upper = lower | (0xffffffff ^ mask) while lower <= upper: addresses.add(int_to_addr(lower)) lower += 1 else: netmasks.add(item) elif '-' in item: _ = item.split('-') lower, upper = addr_to_int(_[0]), addr_to_int(_[1]) while lower <= upper: addresses.add(int_to_addr(lower)) lower += 1 elif re.search(r"\d+\.\d+\.\d+\.\d+", item): addresses.add(item) netfilters = netmasks if addresses: netfilters.add(get_regex(addresses)) SESSIONS[session_id] = AttribDict({"username": username, "uid": uid, "netfilters": netfilters, "expiration": expiration, "client_ip": self.client_address[0]}) else: time.sleep(UNAUTHORIZED_SLEEP_TIME) self.send_response(httplib.UNAUTHORIZED) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain") content = "Login %s" % ("success" if valid else "failed") if not subprocess.mswindows: try: subprocess.check_output("logger -p auth.info -t \"%s[%d]\" \"%s password for %s from %s port %s\"" % (NAME.lower(), os.getpid(), "Accepted" if valid else "Failed", params.get("username"), self.client_address[0], self.client_address[1]), stderr=subprocess.STDOUT, shell=True) except Exception: if config.SHOW_DEBUG: traceback.print_exc() return content
def send_mail(book_id, kindle_mail): '''Send email with attachments''' is_mobi = False is_azw = False is_azw3 = False is_epub = False is_pdf = False file_path = None settings = ub.get_mail_settings() # create MIME message msg = MIMEMultipart() msg['From'] = settings["mail_from"] msg['To'] = kindle_mail msg['Subject'] = _('Send to Kindle') text = _('This email has been sent via calibre web.') msg.attach(MIMEText(text)) use_ssl = settings.get('mail_use_ssl', 0) # attach files #msg.attach(self.get_attachment(file_path)) book = db.session.query(db.Books).filter(db.Books.id == book_id).first() data = db.session.query(db.Data).filter(db.Data.book == book.id) formats = {} for entry in data: if entry.format == "MOBI": formats["mobi"] = os.path.join(config.DB_ROOT, book.path, entry.name + ".mobi") if entry.format == "EPUB": formats["epub"] = os.path.join(config.DB_ROOT, book.path, entry.name + ".epub") if entry.format == "PDF": formats["pdf"] = os.path.join(config.DB_ROOT, book.path, entry.name + ".pdf") if len(formats) == 0: return _("Could not find any formats suitable for sending by email") if 'mobi' in formats: msg.attach(get_attachment(formats['mobi'])) elif 'epub' in formats: filepath = make_mobi(book.id) if filepath is not None: msg.attach(get_attachment(filepath)) elif filepath is None: return _("Could not convert epub to mobi") elif 'pdf' in formats: msg.attach(get_attachment(formats['pdf'])) elif 'pdf' in formats: msg.attach(get_attachment(formats['pdf'])) else: return _("Could not find any formats suitable for sending by email") # convert MIME message to string fp = StringIO() gen = Generator(fp, mangle_from_=False) gen.flatten(msg) msg = fp.getvalue() # send email try: mailserver = smtplib.SMTP(settings["mail_server"],settings["mail_port"]) mailserver.set_debuglevel(0) if int(use_ssl) == 1: mailserver.ehlo() mailserver.starttls() mailserver.ehlo() if settings["mail_password"]: mailserver.login(settings["mail_login"], settings["mail_password"]) mailserver.sendmail(settings["mail_login"], kindle_mail, msg) mailserver.quit() except (socket.error, smtplib.SMTPRecipientsRefused, smtplib.SMTPException), e: app.logger.error(traceback.print_exc()) return _("Failed to send mail: %s" % str(e))
def __call__(self, cfg, gpu_no): print("calling program with gpu "+str(gpu_no)) cmd = ['python3', self.program, '--cfg', str(cfg), str(gpu_no)] outs = "" #outputval = 0 outputval = "" try: outs = str(check_output(cmd,stderr=STDOUT, timeout=40000)) if os.path.isfile(logfile): with open(logfile,'a') as f_handle: f_handle.write(outs) else: with open(logfile,'w') as f_handle: f_handle.write(outs) outs = outs.split("\\n") #TODO_CHRIS hacky solution #outputval = 0 #for i in range(len(outs)-1,1,-1): for i in range(len(outs)-1,-1,-1): #if re.match("^\d+?\.\d+?$", outs[-i]) is None: #CHRIS changed outs[-i] to outs[i] print(outs[i]) if re.match("^\(\-?\d+\.?\d*\e?\+?\-?\d*\,\s\-?\d+\.?\d*\e?\+?\-?\d*\)$", outs[i]) is None: #do nothing a=1 else: #outputval = -1 * float(outs[-i]) outputval = outs[i] #if np.isnan(outputval): # outputval = 0 except subprocess.CalledProcessError as e: traceback.print_exc() print (e.output) except: print ("Unexpected error:") traceback.print_exc() print (outs) #outputval = 0 #TODO_CHRIS hacky solution tuple_str1 = '' tuple_str2 = '' success = True i = 1 try: while outputval[i] != ',': tuple_str1 += outputval[i] i += 1 i += 1 while outputval[i] != ')': tuple_str2 += outputval[i] i += 1 except: print("error in receiving answer from gpu " + str(gpu_no)) success = False try: tuple = (float(tuple_str1),float(tuple_str2),success) except: tuple = (0.0,0.0,False) #return outputval return tuple
def finish_request(self, *args, **kwargs): try: BaseHTTPServer.HTTPServer.finish_request(self, *args, **kwargs) except: if config.SHOW_DEBUG: traceback.print_exc()
def getWorkloadForPredictionFromTags(corl_prior, max_step, time_shift, host, port, pwd): ''' Returns list of tuples (code, date, klid) ''' # TODO need to compare corl_prior and max_step with snapshot values in db (secu.params), and opt to re-initialize tag table if 2 versions differ. global cnxpool _init_db(1, host, port, pwd) cnx = cnxpool.get_connection() cursor = None try: print('{} synchronizing kline_d_b_lr_tags from main table...'.format( strftime("%H:%M:%S"))) cursor = cnx.cursor() cursor.execute(''' INSERT IGNORE INTO kline_d_b_lr_tags (code, date, klid, tags, udate, utime) SELECT code, date, klid, 'wcc_predict_ready', date_format(CONVERT_TZ(CURRENT_TIMESTAMP(),'+00:00','+08:00'),'%Y-%m-%d'), date_format(CONVERT_TZ(CURRENT_TIMESTAMP(),'+00:00','+08:00'),'%H:%i:%s') FROM kline_d_b_lr ''') cursor.execute(''' UPDATE kline_d_b_lr_tags SET tags = CONCAT_WS(' ', tags, 'wcc_predict_ready') WHERE MATCH(tags) AGAINST('-wcc_predict_ready -wcc_predict -wcc_predict_insufficient' IN BOOLEAN MODE) ''') cnx.commit() except: traceback.print_exc() raise finally: if cursor is not None: cursor.close() cnx.close() offset = max_step + time_shift - 1 qry = """ SELECT t.code, t_pre.date start_date, t.date end_date, t.klid FROM kline_d_b_lr_tags t_pre, (SELECT code, date, klid FROM kline_d_b_lr_tags WHERE klid >= %s AND MATCH(tags) AGAINST('+wcc_predict_ready -wcc_predict -wcc_predict_insufficient' IN BOOLEAN MODE)) t WHERE t_pre.code = t.code AND t_pre.klid = t.klid - %s """ cursor = None workloads = None cnx = cnxpool.get_connection() try: print('{} querying workload from kline_d_b_lr_tags'.format( strftime("%H:%M:%S"))) cursor = cnx.cursor() cursor.execute(qry, (corl_prior, offset)) rows = cursor.fetchall() workloads = [(c, d1, d2, k) for c, d1, d2, k in rows] except: traceback.print_exc() raise finally: if cursor is not None: cursor.close() cnx.close() # sort by code and klid in ascending order workloads.sort(key=lambda tup: (tup[0], tup[3])) print('{} total workloads: {}'.format(strftime("%H:%M:%S"), len(workloads))) return workloads
def finish(self): try: BaseHTTPServer.BaseHTTPRequestHandler.finish(self) except: if config.SHOW_DEBUG: traceback.print_exc()
def _main(self, args): # type: (List[str]) -> int # We must initialize this before the tempdir manager, otherwise the # configuration would not be accessible by the time we clean up the # tempdir manager. self.tempdir_registry = self.enter_context(tempdir_registry()) # Intentionally set as early as possible so globally-managed temporary # directories are available to the rest of the code. self.enter_context(global_tempdir_manager()) options, args = self.parse_args(args) # Set verbosity so that it can be used elsewhere. self.verbosity = options.verbose - options.quiet level_number = setup_logging( verbosity=self.verbosity, no_color=options.no_color, user_log_file=options.log, ) if ( sys.version_info[:2] == (2, 7) and not options.no_python_version_warning ): message = ( "pip 21.0 will drop support for Python 2.7 in January 2021. " "More details about Python 2 support in pip can be found at " "https://pip.pypa.io/en/latest/development/release-process/#python-2-support" # noqa ) if platform.python_implementation() == "CPython": message = ( "Python 2.7 reached the end of its life on January " "1st, 2020. Please upgrade your Python as Python 2.7 " "is no longer maintained. " ) + message deprecated(message, replacement=None, gone_in="21.0") if ( sys.version_info[:2] == (3, 5) and not options.no_python_version_warning ): message = ( "Python 3.5 reached the end of its life on September " "13th, 2020. Please upgrade your Python as Python 3.5 " "is no longer maintained. pip 21.0 will drop support " "for Python 3.5 in January 2021." ) deprecated(message, replacement=None, gone_in="21.0") # TODO: Try to get these passing down from the command? # without resorting to os.environ to hold these. # This also affects isolated builds and it should. if options.no_input: os.environ['PIP_NO_INPUT'] = '1' if options.exists_action: os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action) if options.require_venv and not self.ignore_require_venv: # If a venv is required check if it can really be found if not running_under_virtualenv(): logger.critical( 'Could not find an activated virtualenv (required).' ) sys.exit(VIRTUALENV_NOT_FOUND) if options.cache_dir: options.cache_dir = normalize_path(options.cache_dir) if not check_path_owner(options.cache_dir): logger.warning( "The directory '%s' or its parent directory is not owned " "or is not writable by the current user. The cache " "has been disabled. Check the permissions and owner of " "that directory. If executing pip with sudo, you may want " "sudo's -H flag.", options.cache_dir, ) options.cache_dir = None if getattr(options, "build_dir", None): deprecated( reason=( "The -b/--build/--build-dir/--build-directory " "option is deprecated." ), replacement=( "use the TMPDIR/TEMP/TMP environment variable, " "possibly combined with --no-clean" ), gone_in="20.3", issue=8333, ) if 'resolver' in options.unstable_features: logger.critical( "--unstable-feature=resolver is no longer supported, and " "has been replaced with --use-feature=2020-resolver instead." ) sys.exit(ERROR) try: status = self.run(options, args) assert isinstance(status, int) return status except PreviousBuildDirError as exc: logger.critical(str(exc)) logger.debug('Exception information:', exc_info=True) return PREVIOUS_BUILD_DIR_ERROR except (InstallationError, UninstallationError, BadCommand, SubProcessError, NetworkConnectionError) as exc: logger.critical(str(exc)) logger.debug('Exception information:', exc_info=True) return ERROR except CommandError as exc: logger.critical('%s', exc) logger.debug('Exception information:', exc_info=True) return ERROR except BrokenStdoutLoggingError: # Bypass our logger and write any remaining messages to stderr # because stdout no longer works. print('ERROR: Pipe to stdout was broken', file=sys.stderr) if level_number <= logging.DEBUG: traceback.print_exc(file=sys.stderr) return ERROR except KeyboardInterrupt: logger.critical('Operation cancelled by user') logger.debug('Exception information:', exc_info=True) return ERROR except BaseException: logger.critical('Exception:', exc_info=True) return UNKNOWN_ERROR finally: self.handle_pip_version_check(options)
def getWorkloadForPrediction(actor_pool, start_anchor, stop_anchor, corl_prior, max_step, time_shift, host, port, pwd): ''' Returns list of tuples (code, date, klid) ''' global cnxpool _init_db(1, host, port, pwd) qry = ("SELECT " " partition_name " "FROM " " information_schema.partitions " "WHERE " " table_schema = 'secu' " " AND table_name = 'kline_d_b_lr' ") cond = '' if start_anchor is not None: c1, k1 = start_anchor cond += ''' and ( t.code > '{}' or (t.code = '{}' and t.klid >= {}) ) '''.format(c1, c1, k1) if stop_anchor is not None: c2, k2 = stop_anchor cond += ''' and ( t.code < '{}' or (t.code = '{}' and t.klid < {}) ) '''.format(c2, c2, k2) cnx = cnxpool.get_connection() cursor = None try: print('{} querying partitions for kline_d_b_lr'.format( strftime("%H:%M:%S"))) cursor = cnx.cursor() cursor.execute(qry) rows = cursor.fetchall() total = cursor.rowcount print('{} #partitions: {}'.format(strftime("%H:%M:%S"), total)) except: traceback.print_exc() raise finally: if cursor is not None: cursor.close() cnx.close() tasks = actor_pool.map( lambda a, part: a.get_wcc_infer_work_request.remote(part, cond), rows) # remove empty sublists workloads = [t for t in list(tasks) if t] # flatten the list and remove empty tuples workloads = [val for sublist in workloads for val in sublist if val] # sort by code and klid in ascending order workloads.sort(key=lambda tup: (tup[0], tup[3])) print('{} total workloads: {}'.format(strftime("%H:%M:%S"), len(workloads))) return workloads
def lookup_by_isbn(number, forceUpdate=False): isbn, price = _process_isbn(number) print("Looking up isbn", isbn, "with price", price) # if length of isbn>0 and isn't "n/a" or "none" if len(isbn) > 0 and not re.match("^n(\s|/){0,1}a|none", isbn, re.I): # first we check our database titles = Title.select(Title.q.isbn == isbn) ##print titles #debug known_title = False the_titles = list(titles) if (len(the_titles) > 0) and (not forceUpdate): ##print "in titles" known_title = the_titles[0] ProductName = the_titles[0].booktitle.format() authors = [] if len(the_titles[0].author) > 0: authors = [x.authorName.format() for x in the_titles[0].author] authors_as_string = ", ".join(authors) categories = [] if len(the_titles[0].categorys) > 0: ##print len(the_titles[0].categorys) ##print the_titles[0].categorys categories = [x.categoryName.format() for x in the_titles[0].categorys] categories_as_string = ", ".join(categories) if price == 0: if len(the_titles[0].books) > 0: ListPrice = max([x.listprice for x in the_titles[0].books]) else: ListPrice = 0 else: ListPrice = price Manufacturer = the_titles[0].publisher.format() Format = the_titles[0].type.format() Kind = the_titles[0].kind.kindName orig_isbn = the_titles[0].origIsbn.format() # if the_titles[0].images: # large_url = the_titles[0].images.largeUrl # med_url = the_titles[0].images.medUrl # small_url = the_titles[0].images.smallUrl # else: # large_url = med_url = small_url = '' large_url = med_url = small_url = "" SpecialOrders = [ tso.id for tso in Title.selectBy( isbn=isbn ).throughTo.specialorder_pivots.filter( TitleSpecialOrder.q.orderStatus == "ON ORDER" ) ] return { "title": ProductName, "authors": authors, "authors_as_string": authors_as_string, "categories_as_string": categories_as_string, "list_price": ListPrice, "publisher": Manufacturer, "isbn": isbn, "orig_isbn": orig_isbn, "large_url": large_url, "med_url": med_url, "small_url": small_url, "format": Format, "kind": Kind, "known_title": known_title, "special_order_pivots": SpecialOrders, } else: # we don't have it yet # if we're using amazon ecs if use_amazon_ecs: sleep(1) # so amazon doesn't get huffy ecs.setLicenseKey(amazon_license_key) ecs.setSecretAccessKey(amazon_secret_key) ecs.setAssociateTag(amazon_associate_tag) ##print "about to search", isbn, isbn[0] amazonBooks = [] idType = "" if len(isbn) == 12: idType = "UPC" elif len(isbn) == 13: # if we are using an internal isbn if isbn.startswith(internal_isbn_prefix): return [] # otherwise search on amazon. elif isbn.startswith("978") or isbn.startswith("979"): idType = "ISBN" else: idType = "EAN" try: print("searching amazon for ", isbn, idType, file=sys.stderr) amazonProds = AmzSear(isbn) print(amazonProds, file=sys.stderr) except (ecs.InvalidParameterValue, HTTPError): pass if amazonProds: print(amazonProds, file=sys.stderr) # inner comprehension tests each prodict for price whose type is in formats # if we find a price which its key is in formats, then we return the coorresponding product format_list = [ "Paperback", "Mass Market Paperback", "Hardcover", "Perfect Paperback", "Pamphlet", "Plastic Comb", "Spiral-bound", "Print on Demand (Paperback)", "DVD", "Calendar", "Board book", "Audio Cassette", "Cards", "Audio CD", "Diary", "DVD-ROM", "Library Binding", "music", "Vinyl", "Health and Beauty", "Hardback", ] prods = [ x for x in amazonProds.values() if [dum for dum in x["prices"].keys() if dum in format_list] ] for prod1 in prods: print(prod1, file=sys.stderr) price_dict = prod1["prices"] listprice = max(price_dict.values()) format = [k for k in format_list if k in price_dict] format = format[0] if not format: continue title = prod1["title"] image_url = prod1["image_url"] authors = [ x.replace("by ", "") for x in prod1["subtext"] if x.startswith("by ") ] auth_list = [ y.strip() for a in [x.split(", ") for x in authors[0].split(" and ")] for y in a ] # we assume any full name less than five characters is an abbreviation like 'Jr.' # so we add it back to the previous authorname abbrev_list = [i for i, x in enumerate(auth_list) if len(x) < 5] for i in abbrev_list: auth_list[i - 1 : i + 1] = [ ", ".join(auth_list[i - 1 : i + 1]) ] return { "title": title, "authors": auth_list, "authors_as_string": ",".join(auth_list), "categories_as_string": "", "list_price": listprice, "publisher": "", "isbn": isbn, "orig_isbn": isbn, "large_url": image_url, "med_url": image_url, "small_url": image_url, "format": format, "kind": "books", "known_title": known_title, "special_orders": [], } else: traceback.print_exc() print("using isbnlib from ecs", file=sys.stderr) isbnlibbooks = [] try: isbnlibbooks = isbnlib.meta(str(isbn)) except: pass if isbnlibbooks: return { "title": isbnlibbooks["Title"], "authors": isbnlibbooks["Authors"], "authors_as_string": ",".join(isbnlibbooks["Authors"]), "categories_as_string": None, "list_price": price, "publisher": isbnlibbooks["Publisher"], "isbn": isbn, "orig_isbn": isbn, "large_url": None, "med_url": None, "small_url": None, "format": None, "kind": "books", "known_title": known_title, "special_orders": [], } else: return {} else: # if we're scraping amazon print("scraping amazon", file=sys.stderr) headers = { "User-Agent": random.sample(user_agents, 1).pop() } amazon_url_template = "http://www.amazon.com/dp/%s/" if len(isbn) == 13: isbn10 = None if isbnlib.is_isbn13(isbn): isbn10 = isbnlib.to_isbn10(isbn) else: return {} if isbn10: with requests.Session() as session: try: print("getting amazon") page_response = session.get( amazon_url_template % isbn10, headers=headers, timeout=0.1 ) print("got response") page_content = BeautifulSoup(page_response.content, "lxml") print("got parsed content") try: booktitle = page_content.select("#productTitle").pop().text except Exception as e: traceback.print_exc() booktitle = '' popover_preload = [ a.text for a in page_content.select( ".author.notFaded .a-popover-preload a.a-link-normal" ) ] author_name = [ a.text for a in page_content.select( ".author.notFaded a.a-link-normal" ) if a.text not in popover_preload ] contributor_role = page_content.select(".contribution span") try: contributor_role = [ re.findall("\w+", cr.text).pop() for cr in contributor_role ] except Exception as e: traceback.print_exc() contributor_role = [] author_role = zip(author_name, contributor_role) try: listprice = ( page_content.select(".a-text-strike").pop().text ) except IndexError as e: print("using bookfinder4u") if "listprice" not in locals(): with requests.Session() as session: bookfinderurl = "http://www.bookfinder4u.com/IsbnSearch.aspx?isbn='%s'&mode=direct" url = bookfinderurl % isbn try: page_response2 = session.get( url, headers=headers, timeout=0.1 ) page_content2 = BeautifulSoup( page_response2.content, "lxml" ) except Exception as e: traceback.print_exc() listprice = 0.0 else: try: matches = re.search( "List\sprice:\s(\w{2,4})\s(\d+(.\d+)?)", page_content2.text, re.I, ) if matches: listprice = matches.groups()[1] else: listprice = 0.00 except Exception as e: traceback.print_exc() listprice = 0.00 try: book_edition = ( page_content.select("#bookEdition").pop().text ) except Exception as e: traceback.print_exc() book_edition = "" try: matches = re.findall( "(?<=imageGalleryData'\s:\s\[)\{.*?\}", page_content.contents[1].text, ) image_url_dict = eval(matches[0]) except Exception as e: traceback.print_exc() image_url_dict = {"mainUrl": "", "thumbUrl": ""} category_items = page_content.select(".zg_hrsr_ladder a") category_items = [a.text for a in category_items] product_details = page_content.select( "#productDetailsTable" ) # ul:first-of-type") try: product_details1 = product_details.pop().text.splitlines() quit_flag = 0 for pd in product_details1: if pd.endswith("pages"): format, numpages = pd.split(":") numpages = numpages.replace(" pages", "").strip() quit_flag += 1 continue if pd.startswith("Publisher: "): matches = re.match( "Publisher: ([^;^(]*)\s?([^(]*)?\W(.*)\W", pd ).groups() publisher = matches[0] publication_date = matches[2] quit_flag += 1 continue if quit_flag == 2: break else: publisher = '' format = '' except Exception as e: traceback.print_exc() publisher = '' format = '' if booktitle: return { "title": booktitle, "authors": author_name, "authors_as_string": ",".join(author_name), "categories_as_string": ",".join(category_items), "list_price": listprice, "publisher": publisher, "isbn": isbn, "orig_isbn": isbn, "large_url": image_url_dict["mainUrl"], "med_url": image_url_dict["mainUrl"], "small_url": image_url_dict["thumbUrl"], "format": format, "kind": "books", "known_title": known_title, "special_orders": [], } except Exception as e: traceback.print_exc() print("using isbnlib from scraper", file=sys.stderr) isbnlibbooks = [] try: isbnlibbooks = isbnlib.meta(str(isbn)) except: pass if isbnlibbooks: return { "title": isbnlibbooks["Title"], "authors": isbnlibbooks["Authors"], "authors_as_string": ",".join( isbnlibbooks["Authors"] ), "categories_as_string": None, "list_price": price, "publisher": isbnlibbooks["Publisher"], "isbn": isbn, "orig_isbn": isbn, "large_url": None, "med_url": None, "small_url": None, "format": None, "kind": "books", "known_title": known_title, "special_orders": [], } else: return {} else: if title: return { "title": title, "authors": author_name, "authors_as_string": ",".join(author_name), "categories_as_string": ",".join(category_items), "list_price": listprice, "publisher": publisher, "isbn": isbn, "orig_isbn": isbn, "large_url": image_url_dict["mainUrl"], "med_url": image_url_dict["mainUrl"], "small_url": image_url_dict["thumbUrl"], "format": format, "kind": "books", "known_title": known_title, "special_orders": [], } else: return {} else: return {}
def _loadTrainingData(bno): global cnxpool, shared_args, check_input # idxlst = _getIndex() flag = 'TR' print("{} loading training set {} {}...".format(strftime("%H:%M:%S"), flag, bno)) cnx = cnxpool.get_connection() try: cursor = cnx.cursor(buffered=True) query = ('SELECT ' " code, klid, rcode, corl_stz " 'FROM ' ' wcc_trn ' 'WHERE ' " flag = %s " " AND bno = %s") cursor.execute(query, ( flag, int(bno), )) train_set = cursor.fetchall() total = cursor.rowcount cursor.close() data, vals, seqlen = [], [], [] if total > 0: # issue using loky in Area51m # qk, qd, qd_idx = _getFtQuery() # exc = _getExecutor() # params = [(code, klid, rcode, val, max_step, time_shift, qk, # qd_idx if rcode in idxlst else qd) # for code, klid, rcode, val in train_set] # r = list(exc.map(_getSeries, params)) # data, vals, seqlen = zip(*r) tasks = [ getSeries.remote(code, klid, rcode, val, shared_args) for code, klid, rcode, val in train_set ] r = list(ray.get(tasks)) data, vals, seqlen = zip(*r) # data = [batch, max_step, feature*time_shift] # seqlen = [batch] # vals = [batch] d = np.array(data, 'f') s = np.expand_dims(np.array(seqlen, 'i'), axis=1) v = np.expand_dims(np.array(vals, 'f'), axis=1) if check_input: if np.ma.is_masked(d): print('batch[{}] masked feature'.format(bno)) print(d) if np.ma.is_masked(s): print('batch[{}] masked seqlens'.format(bno)) print(s) if np.ma.is_masked(v): print('batch[{}] masked values'.format(bno)) print(v) found = False nanLoc = np.argwhere(np.isnan(d)) if len(nanLoc) > 0: print('batch[{}] nan for feature: {}'.format(bno, nanLoc)) found = True infLoc = np.argwhere(np.isinf(d)) if len(infLoc) > 0: print('batch[{}] inf for feature: {}'.format(bno, infLoc)) found = True if found: print(d) found = False nanLoc = np.argwhere(np.isnan(s)) if len(nanLoc) > 0: print('batch[{}] nan for seqlens: {}'.format(bno, nanLoc)) found = True infLoc = np.argwhere(np.isinf(s)) if len(infLoc) > 0: print('batch[{}] inf for seqlens: {}'.format(bno, infLoc)) found = True if found: print(s) found = False nanVal = np.argwhere(np.isnan(v)) if len(nanVal) > 0: print('batch[{}] nan for values: {}'.format(bno, nanVal)) found = True infVal = np.argwhere(np.isinf(v)) if len(infVal) > 0: print('batch[{}] inf for values: {}'.format(bno, infVal)) found = True if found: print(v) return d, s, v except: traceback.print_exc() raise finally: cnx.close()
def act( flags, actor_index: int, free_queue: mp.SimpleQueue, full_queue: mp.SimpleQueue, model: torch.nn.Module, buffers: Buffers, initial_agent_state_buffers, ): try: logging.info("Actor %i started.", actor_index) timings = prof.Timings() # Keep track of how fast things are. gym_env = create_env(flags) seed = actor_index ^ int.from_bytes(os.urandom(4), byteorder="little") gym_env.seed(seed) env = environment.Environment(gym_env) env_output = env.initial() agent_state = model.initial_state(batch_size=1) agent_output, unused_state = model(env_output, agent_state) while True: index = free_queue.get() if index is None: break # Write old rollout end. for key in env_output: buffers[key][index][0, ...] = env_output[key] for key in agent_output: buffers[key][index][0, ...] = agent_output[key] for i, tensor in enumerate(agent_state): initial_agent_state_buffers[index][i][...] = tensor # Do new rollout. for t in range(flags.unroll_length): timings.reset() with torch.no_grad(): agent_output, agent_state = model(env_output, agent_state) timings.time("model") env_output = env.step(agent_output["action"]) timings.time("step") for key in env_output: buffers[key][index][t + 1, ...] = env_output[key] for key in agent_output: buffers[key][index][t + 1, ...] = agent_output[key] timings.time("write") full_queue.put(index) if actor_index == 0: logging.info("Actor %i: %s", actor_index, timings.summary()) except KeyboardInterrupt: pass # Return silently. except Exception as e: logging.error("Exception in worker process %i", actor_index) traceback.print_exc() print() raise e
# Create thumbnail self._gen_thumbnail(prefix, config.paper_width_mm, config.paper_height_mm) self.result = RESULT_SUCCESS l.info("Finished rendering of job #%d." % self.job.id) except KeyboardInterrupt: self.result = RESULT_KEYBOARD_INTERRUPT l.info("Rendering of job #%d interrupted!" % self.job.id) except Exception, e: self.result = RESULT_RENDERING_EXCEPTION l.exception("Rendering of job #%d failed (exception occurred during" " rendering)!" % self.job.id) errfile = os.path.join(RENDERING_RESULT_PATH, self.job.files_prefix() + "-errors.txt") fp = open(errfile, "w") traceback.print_exc(file=fp) fp.close() self._email_exception(e) return self.result if __name__ == '__main__': def usage(): sys.stderr.write('usage: %s <jobid> [timeout]\n' % sys.argv[0]) if len(sys.argv) < 2 or len(sys.argv) > 3: usage() sys.exit(3) try:
def main(cmd_args): import optparse global options, PSYCO usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc oparser = optparse.OptionParser(usage) oparser.add_option("-l", "--logfilename", default="", help="contains error messages") oparser.add_option( "-v", "--verbosity", type="int", default=0, help="level of information and diagnostics provided") oparser.add_option( "-m", "--mmap", type="int", default=-1, help="1: use mmap; 0: don't use mmap; -1: accept heuristic") oparser.add_option("-e", "--encoding", default="", help="encoding override") oparser.add_option( "-f", "--formatting", type="int", default=0, help="0 (default): no fmt info\n" "1: fmt info (all cells)\n", ) oparser.add_option( "-g", "--gc", type="int", default=0, help= "0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc" ) oparser.add_option( "-s", "--onesheet", default="", help="restrict output to this sheet (name or index)") oparser.add_option("-u", "--unnumbered", action="store_true", default=0, help="omit line numbers or offsets in biff_dump") oparser.add_option("-d", "--on-demand", action="store_true", default=0, help="load sheets on demand instead of all at once") oparser.add_option("-t", "--suppress-timing", action="store_true", default=0, help="don't print timings (diffs are less messy)") oparser.add_option("-r", "--ragged-rows", action="store_true", default=0, help="open_workbook(..., ragged_rows=True)") options, args = oparser.parse_args(cmd_args) if len(args) == 1 and args[0] in ("version", ): pass elif len(args) < 2: oparser.error("Expected at least 2 args, found %d" % len(args)) cmd = args[0] xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5") if cmd == 'biff_dump': xlrd.dump(args[1], unnumbered=options.unnumbered) sys.exit(0) if cmd == 'biff_count': xlrd.count_records(args[1]) sys.exit(0) if cmd == 'version': print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__)) print("Python:", sys.version) sys.exit(0) if options.logfilename: logfile = LogHandler(open(options.logfilename, 'w')) else: logfile = sys.stdout mmap_opt = options.mmap mmap_arg = xlrd.USE_MMAP if mmap_opt in (1, 0): mmap_arg = mmap_opt elif mmap_opt != -1: print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt) fmt_opt = options.formatting | (cmd in ('xfc', )) gc_mode = options.gc if gc_mode: gc.disable() for pattern in args[1:]: for fname in glob.glob(pattern): print("\n=== File: %s ===" % fname) if logfile != sys.stdout: logfile.setfileheading("\n=== File: %s ===\n" % fname) if gc_mode == 1: n_unreachable = gc.collect() if n_unreachable: print("GC before open:", n_unreachable, "unreachable objects") if PSYCO: import psyco psyco.full() PSYCO = 0 try: t0 = time.time() bk = xlrd.open_workbook( fname, verbosity=options.verbosity, logfile=logfile, use_mmap=mmap_arg, encoding_override=options.encoding, formatting_info=fmt_opt, on_demand=options.on_demand, ragged_rows=options.ragged_rows, ) t1 = time.time() if not options.suppress_timing: print("Open took %.2f seconds" % (t1 - t0, )) except xlrd.XLRDError as e: print("*** Open failed: %s: %s" % (type(e).__name__, e)) continue except KeyboardInterrupt: print("*** KeyboardInterrupt ***") traceback.print_exc(file=sys.stdout) sys.exit(1) except BaseException as e: print("*** Open failed: %s: %s" % (type(e).__name__, e)) traceback.print_exc(file=sys.stdout) continue t0 = time.time() if cmd == 'hdr': bk_header(bk) elif cmd == 'ov': # OverView show(bk, 0) elif cmd == 'show': # all rows show(bk) elif cmd == '2rows': # first row and last row show(bk, 2) elif cmd == '3rows': # first row, 2nd row and last row show(bk, 3) elif cmd == 'bench': show(bk, printit=0) elif cmd == 'fonts': bk_header(bk) show_fonts(bk) elif cmd == 'names': # named reference list show_names(bk) elif cmd == 'name_dump': # named reference list show_names(bk, dump=1) elif cmd == 'labels': show_labels(bk) elif cmd == 'xfc': count_xfs(bk) else: print("*** Unknown command <%s>" % cmd) sys.exit(1) del bk if gc_mode == 1: n_unreachable = gc.collect() if n_unreachable: print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects") if not options.suppress_timing: t1 = time.time() print("\ncommand took %.2f seconds\n" % (t1 - t0, )) return None
def trigger_flow_event(cls, run, webhook_url, node_uuid, msg, action='POST', resthook=None, header=None): flow = run.flow org = flow.org contact = run.contact api_user = get_api_user() json_time = datetime_to_str(timezone.now()) # get the results for this contact results = run.flow.get_results(run.contact) values = [] if results and results[0]: values = results[0]['values'] for value in values: value['time'] = datetime_to_str(value['time']) value['value'] = six.text_type(value['value']) if msg: text = msg.text attachments = msg.get_attachments() channel = msg.channel contact_urn = msg.contact_urn else: # if the action is on the first node we might not have an sms (or channel) yet channel = None text = None attachments = [] contact_urn = contact.get_urn() steps = [] for step in run.steps.prefetch_related( 'messages', 'broadcasts').order_by('arrived_on'): steps.append( dict(type=step.step_type, node=step.step_uuid, arrived_on=datetime_to_str(step.arrived_on), left_on=datetime_to_str(step.left_on), text=step.get_text(), value=step.rule_value)) data = dict(channel=channel.id if channel else -1, channel_uuid=channel.uuid if channel else None, relayer=channel.id if channel else -1, flow=flow.id, flow_uuid=flow.uuid, flow_name=flow.name, flow_base_language=flow.base_language, run=run.id, text=text, attachments=[a.url for a in attachments], step=six.text_type(node_uuid), phone=contact.get_urn_display(org=org, scheme=TEL_SCHEME, formatted=False), contact=contact.uuid, contact_name=contact.name, urn=six.text_type(contact_urn), values=json.dumps(values), steps=json.dumps(steps), time=json_time, header=header) if not action: # pragma: needs cover action = 'POST' webhook_event = cls.objects.create(org=org, event=cls.TYPE_FLOW, channel=channel, data=json.dumps(data), run=run, try_count=1, action=action, resthook=resthook, created_by=api_user, modified_by=api_user) status_code = -1 message = "None" body = None start = time.time() # webhook events fire immediately since we need the results back try: # no url, bail! if not webhook_url: raise Exception("No webhook_url specified, skipping send") # only send webhooks when we are configured to, otherwise fail if settings.SEND_WEBHOOKS: requests_headers = TEMBA_HEADERS if header: requests_headers.update(header) # some hosts deny generic user agents, use Temba as our user agent if action == 'GET': response = requests.get(webhook_url, headers=requests_headers, timeout=10) else: response = requests.post(webhook_url, data=data, headers=requests_headers, timeout=10) body = response.text if body: body = body.strip() status_code = response.status_code else: print("!! Skipping WebHook send, SEND_WEBHOOKS set to False") body = 'Skipped actual send' status_code = 200 # process the webhook response try: response_json = json.loads(body, object_pairs_hook=OrderedDict) # only update if we got a valid JSON dictionary or list if not isinstance(response_json, dict) and not isinstance( response_json, list): raise ValueError( "Response must be a JSON dictionary or list, ignoring response." ) run.update_fields(response_json) message = "Webhook called successfully." except ValueError: message = "Response must be a JSON dictionary, ignoring response." if 200 <= status_code < 300: webhook_event.status = cls.STATUS_COMPLETE else: webhook_event.status = cls.STATUS_FAILED message = "Got non 200 response (%d) from webhook." % response.status_code raise Exception("Got non 200 response (%d) from webhook." % response.status_code) except Exception as e: import traceback traceback.print_exc() webhook_event.status = cls.STATUS_FAILED message = "Error calling webhook: %s" % six.text_type(e) finally: webhook_event.save() # make sure our message isn't too long if message: message = message[:255] request_time = (time.time() - start) * 1000 result = WebHookResult.objects.create(event=webhook_event, url=webhook_url, status_code=status_code, body=body, message=message, data=urlencode(data, doseq=True), request_time=request_time, created_by=api_user, modified_by=api_user) # if this is a test contact, add an entry to our action log if run.contact.is_test: log_txt = "Triggered <a href='%s' target='_log'>webhook event</a> - %d" % ( reverse('api.log_read', args=[webhook_event.pk ]), status_code) ActionLog.create(run, log_txt, safe=True) return result
def _initialize(self, db): tr = TransactionReform(main_net=self.main_net, your_ck=self.ck) incoming = self._get_history(self.nem.TRANSFER_INCOMING) incoming = tr.reform_transactions(incoming)[::-1] outgoing = self._get_history(self.nem.TRANSFER_OUTGOING) outgoing = tr.reform_transactions(outgoing)[::-1] with db as conn: self.refresh(db=db) incoming_many = list() for tx in incoming: if tx['recipient'] != self.ck: continue if self.nem.height < tx['height'] + self.confirm_height: threading.Thread(target=self._confirm, name='Confirm', args=(tx,), daemon=True).start() logging.info("Unconfirmed receive 0x%s" % tx['txhash']) continue # 十分な認証Block数が過ぎていない txhash = unhexlify(tx['txhash'].encode()) f = conn.execute(""" SELECT `txhash` FROM `incoming_table` WHERE `txhash`= ? """, (txhash,)) if f.fetchone() is not None: continue height = tx['height'] try: if self.f_at_first: userid = self.owner_id else: tag = msg2tag(tx['message']) userid = self.find_user(tag=tag, address=tx['sender'], db=db) except: import traceback traceback.print_exc() userid, tag = self.create_user(group='@unknown', db=db) for mosaic in tx['coin']: self._check_expire_mosaic(mosaic, db=db) amount = tx['coin'][mosaic] value = self.get_value(mosaic, amount, db=db) price = self.get_price(mosaic, db=db) incoming_many.append(( txhash, height, userid, mosaic, amount, value, price, tx['time'] )) if len(incoming_many) > 0: conn.executemany(""" INSERT INTO `incoming_table` VALUES (?, ?, ?, ?, ?, ?, ?, ?) """, incoming_many) outgoing_many = list() for tx in outgoing: if tx['sender'] != self.ck: continue txhash = unhexlify(tx['txhash'].encode()) f = conn.execute(""" SELECT `txhash`, `height` FROM `outgoing_table` WHERE `txhash`= ? """, (txhash,)) txs = f.fetchall() if len(txs) > 0: for txhash_, height_ in txs: if height_ is None: # UPDATE失敗TXを修正 conn.execute(""" UPDATE `outgoing_table` SET `height`= ?, `time`= ? WHERE `txhash`= ? """, (tx['height'], tx['time'], txhash_)) break else: continue # 取り込まれなかったと思われるTXは手動削除 else: height = tx['height'] if self.f_at_first: userid = self.owner_id else: userid, tag = self.create_user(group='@unknown', db=db) # Unknown user fee = self.nem.estimate_levy_fee(tx['coin']) fee = DictMath.add(fee, {'nem:xem': tx['fee']}) all_amount = DictMath.add(fee, tx['coin']) for mosaic in all_amount: self._check_expire_mosaic(mosaic, db=db) amount = all_amount[mosaic] if amount == 0: continue value = self.get_value(mosaic, amount, db=db) price = self.get_price(mosaic, db=db) outgoing_many.append(( txhash, height, userid, mosaic, amount, value, price, tx['time'] )) if len(outgoing_many) > 0: conn.executemany(""" INSERT INTO `outgoing_table` VALUES (?, ?, ?, ?, ?, ?, ?, ?) """, outgoing_many) if len(incoming_many) > 0 or len(outgoing_many) > 0: conn.commit() return incoming_many, outgoing_many
def on_error(self, ws, error): if not self.finished: traceback.print_exc() exit(1)
def parse_scriptSig(d, bytes): active_chain = chainparams.get_active_chain() try: decoded = [ x for x in script_GetOp(bytes) ] except Exception: # coinbase transactions raise an exception print_error("cannot find address in input script", bytes.encode('hex')) return # payto_pubkey match = [ opcodes.OP_PUSHDATA4 ] if match_decoded(decoded, match): sig = decoded[0][1].encode('hex') d['address'] = "(pubkey)" d['signatures'] = [sig] d['num_sig'] = 1 d['x_pubkeys'] = ["(pubkey)"] d['pubkeys'] = ["(pubkey)"] return # non-generated TxIn transactions push a signature # (seventy-something bytes) and then their public key # (65 bytes) onto the stack: match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ] if match_decoded(decoded, match): sig = decoded[0][1].encode('hex') x_pubkey = decoded[1][1].encode('hex') try: signatures = parse_sig([sig]) pubkey, address = parse_xpub(x_pubkey) except: import traceback traceback.print_exc(file=sys.stdout) print_error("cannot find address in input script", bytes.encode('hex')) return d['signatures'] = signatures d['x_pubkeys'] = [x_pubkey] d['num_sig'] = 1 d['pubkeys'] = [pubkey] d['address'] = address return # p2sh transaction, 2 of n match = [ opcodes.OP_0 ] while len(match) < len(decoded): match.append(opcodes.OP_PUSHDATA4) if not match_decoded(decoded, match): print_error("cannot find address in input script", bytes.encode('hex')) return x_sig = map(lambda x:x[1].encode('hex'), decoded[1:-1]) d['signatures'] = parse_sig(x_sig) d['num_sig'] = 2 dec2 = [ x for x in script_GetOp(decoded[-1][1]) ] match_2of2 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ] match_2of3 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ] if match_decoded(dec2, match_2of2): x_pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex') ] elif match_decoded(dec2, match_2of3): x_pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex'), dec2[3][1].encode('hex') ] else: print_error("cannot find address in input script", bytes.encode('hex')) return d['x_pubkeys'] = x_pubkeys pubkeys = map(lambda x: parse_xpub(x)[0], x_pubkeys) d['pubkeys'] = pubkeys redeemScript = Transaction.multisig_script(pubkeys,2) d['redeemScript'] = redeemScript d['address'] = hash_160_to_bc_address(hash_160(redeemScript.decode('hex')), active_chain.p2sh_version)
def parse_html(html_str, court_name): try: soup = BeautifulSoup(html_str, "html.parser") table_tag = soup.find_all('table')[1] table_soup = BeautifulSoup(str(table_tag), "html.parser") tr_list = table_soup.find_all('tr') tr_count = 0 for tr in tr_list: emergency_exit = select_one_query("SELECT emergency_exit FROM Tracker WHERE Name='" + court_name + "'") if emergency_exit is not None: if emergency_exit['emergency_exit'] == 1: break tr_count += 1 if tr_count <= 2 or tr_count > 17: continue case_no = "NULL" judgment_date = "NULL" coram = "NULL" type_ = "NULL" status = "NULL" pdf_data = "NULL" pdf_file = "NULL" # insert_check = False tr_soup = BeautifulSoup(str(tr), "html.parser") td_list = tr_soup.find_all('td') i = 0 for td in td_list: i += 1 if i == 1: case_no = escape_string(str(td.decode_contents())) # if select_count_query(str(court_name), str(case_no), 'judgment_date', judgment_date): # insert_check = True if i == 2: coram = escape_string(str(td.decode_contents())) if i == 3: judgment_date = escape_string(str(td.decode_contents())) if i == 5: type_ = escape_string(str(td.decode_contents())) if i == 6: status = escape_string(str(td.decode_contents())) if i == 4: a_tag = BeautifulSoup(str(td), "html.parser").a pdf_file = escape_string(base_url + a_tag.get('href')) pdf_data = escape_string(request_pdf(base_url + a_tag.get('href'), case_no, court_name)) # if case_no != "NULL" and insert_check and case_no.find("DISCLAIMER") == -1: if case_no != "NULL" and case_no.find("DISCLAIMER") == -1: sql_query = "INSERT INTO " + str(court_name) + " (case_no, judgment_date, coram, type, status, " \ "pdf_file, pdf_filename) VALUE ('" + case_no + "', '" + \ judgment_date + "', '" + coram + "', '" + type_ + "', '" + status + "', '" + pdf_file + \ "', '" + court_name + "_" + slugify(case_no) + ".pdf')" insert_query(sql_query) update_query("UPDATE " + court_name + " SET pdf_data = '" + str(pdf_data) + "' WHERE case_no = '" + str(case_no) + "'") update_query("UPDATE Tracker SET No_Cases = No_Cases + 1 WHERE Name = '" + str(court_name) + "'") return True except Exception as e: traceback.print_exc() logging.error("Failed to parse the html: %s", e) update_query("UPDATE Tracker SET No_Error = No_Error + 1 WHERE Name = '" + str(court_name) + "'") return False
def main(): app = Client(get_bot_name(), bot_token=get_token()) def member_in(member, member_list): for m in member_list: if member == m[0]: return True return False def load(target): try: return get_subs(target) except Exception: return [] def tag_user(name): # Add @ to user's username try: end_found = False start = -1 for i in range(len(name), 0, -1): j = i - 1 if name[j] == ")": end_found = True if name[j] == "(" and end_found: start = j break if start >= 0: return name[:start + 1] + "@" + name[start + 1:] else: return name except: return name with app: while True: track = get_tracking_channels() print(track) for pair in track: try: owner_name = pair[0] target = pair[1] new = [] prev = load(target) new = get_all_subscribers_list(target) if len(prev) == 0: prev = new for uuid, name in prev: print(uuid) add_sub(name, target, uuid) else: sub_count = len(new) for uuid, name in new: if not member_in(uuid, prev): sub_message = "[@{}] ✅ {} subscribed."\ .format(target, tag_user(name)) add_sub(name, target, uuid) app.send_message(owner_name, sub_message) for uuid, name in prev: if not member_in(uuid, new): unsub_message = "[@{}] ❌ {} unsubscribed."\ .format(target, tag_user(name), sub_count) delete_sub(uuid, target) app.send_message(owner_name, unsub_message) except Exception as err: print(err) traceback.print_exc(file=sys.stdout) time.sleep(60) continue time.sleep(5)
def main(): global x global y global score global thiscolor global returnCount try: # while we haven't clicked the return 3 times in a row while returnCount < 3: for event in pygame.event.get(): if event.type == KEYDOWN: sense.set_pixel(x, y, thiscolor[0], thiscolor[1], thiscolor[2]) if event.key == K_DOWN and y < 7: returnCount = 0 y = y + 1 elif event.key == K_UP and y > 0: returnCount = 0 y = y - 1 elif event.key == K_RIGHT and x < 7: returnCount = 0 x = x + 1 elif event.key == K_LEFT and x > 0: returnCount = 0 x = x - 1 elif event.key == K_RETURN: returnCount = returnCount + 1 print(returnCount) print(str(x) + ' ' + str(y)) payload= {"x": x, "y": y, "player": "p1" } r = requests.post('https://amantestemail.mybluemix.net/shot/', data = payload) shotResult = r.json() jsonArr = json.dumps(shotResult) result = shotResult['res'] print(result) # translate coords to sea position your_position = (y*8)+x if result == 'hit': enemySea[your_position] = hitColor elif result == 'miss': enemySea[your_position] = seaColor elif result == 'crazy??': enemySea[your_position] = hitColor print('already a hit') else: print('unknown response: ' + result) sense.set_pixels(mySea) time.sleep(2) sense.set_pixels(enemySea) thiscolor = sense.get_pixel(x, y) sense.set_pixel(x, y, 0, 255, 0) #colour of pixel for location target except KeyboardInterrupt: print ("shutdown requested... exiting") sense.clear() except Exception: traceback.print_exc(file=sys.stdout) sys.exit(0) print ('game over') sense.show_message("game over", text_colour=[0, 255, 255], scroll_speed=0.07) sense.show_message(str(score), text_colour=[0, 200, 255]) # TODO if ships are still floating if True: print ('well done') sense.show_message("well done", text_colour=[0, 255, 255]) time.sleep(3) else: print ("better luck next time")