def __init__(self): if Driver.DRIVER is None: self.driver = webdriver.Firefox() Driver.DRIVER = self.driver log.debug("Driver started with context: " + str(Driver.CONTEXT)) else: self.driver = Driver.DRIVER
def run(self): """The main entry point for a plugin.""" self._request = self._parse_request() log.debug('Handling incoming request for %s', self.request.path) items = self._dispatch(self.request.path) self.close_storages() return items
def __init__(self, *args): QMainWindow.__init__(self, *args) log.debug("main window initialization starting") # these are all categories apple is providing for now self.categories = [(self.tr('Just added'), '/trailers/home/feeds/just_added.json'), (self.tr('Exclusive'), '/trailers/home/feeds/exclusive.json'), (self.tr('Only HD'), '/trailers/home/feeds/just_hd.json'), (self.tr('Most popular'), '/trailers/home/feeds/most_pop.json'), (self.tr('Search'), '/trailers/home/scripts/quickfind.php?&q=')] # pick sane defaults self.config = configparser.SafeConfigParser({'downloadDir':'/tmp', 'filters':json.dumps([y for x, y in PyTrailerSettings.filters]), 'readAhead':'4', 'parallelDownload':'2', 'player':'mplayer -user-agent %%a %%u'}) log.info("settings loaded: %s" % self.config.items("DEFAULT")) # run initializations self.player_proc = None self.list_loader = None self.list_loader_p = None self.movieDict = {} self.config.read(self.configPath) self.load_cache() self.init_preloaders() self.init_widget() self.init_menus() self.downloader.start() signal.signal(signal.SIGTERM, PyTrailerWidget.term_handler) signal.signal(signal.SIGINT, PyTrailerWidget.term_handler) log.debug("main window initialization done")
def unload_plugin(self, plugin_type, plugin_name, plugin_version): cmd = '{} plugin unload {}:{}:{}'.format(os.path.join(self.dir, self.name), plugin_type, plugin_name, plugin_version) log.debug("snapctl unload plugin {}".format(cmd)) out = self._start_process(cmd) log.debug("plugin unloaded? {}".format("Plugin unloaded" in out)) return "Plugin unloaded" in out
def load_group(self, groupName): url = None for cat, catURL in self.categories: if cat == groupName: url = "http://trailers.apple.com%s" % catURL break if groupName == self.tr("Search"): d = PyTrailerSearch(self) if d.exec_() == QDialog.Accepted: url = "%s%s" % (url, d.ui.lineEdit.text()) else: return self.unload_current_group() log.debug("loading group %s" % groupName) # loadID is used to identify what group task belonged to # we can use it to make sure we don't display trailers from # different group after being cached self.loadID = random.random() self.loading.setVisible(True) # run loading in separate process self.list_loader , child_conn = multiprocessing.Pipe() self.list_loader_p = multiprocessing.Process(target=PyTrailerWidget.movielist_loader, args=(child_conn,url)) self.list_loader_p.start()
def actionDateTime_trigger(self): global CreateDate global item_list log.info("====== DateTime ======") self.iw=DateTimeWindow("","","",0) self.iw.exec_() res = self.iw.result() log.debug("resultat=%d" % res) if res == 1: CreateDate, incSecond = self.iw.save() log.info("CreateDate is %s, incSeconds %s" % (CreateDate, incSecond)) for i in range(self.ui.treeWidget.topLevelItemCount()): item = self.ui.treeWidget.topLevelItem(i) if item.checkState(0) == 2: exif.initArglist() exif.appendCreateDate(CreateDate) exif.appendArglist(SourceDir + os.path.sep + item.text(2)) exif.executeExiftool() CreateDate = self.iw.inctime(CreateDate) item_list.clear() item_list = filemgr.readFiles(Filelist, item_list,self.ui.progressBar) self.redrawTreeWidget(item_list)
def post_threadpool_actions(pool, args, expected_filesize, SmartDL_obj): "Run function after thread pool is done. Run this in a thread." while not pool.isFinished(): time.sleep(0.1) if SmartDL_obj._killed: return if expected_filesize: # if not zero, etc expected filesize is not known threads = len(args[0]) total_filesize = sum([os.path.getsize(x) for x in args[0]]) diff = math.fabs(expected_filesize - total_filesize) # if the difference is more than 4*thread numbers (because a thread may download 4KB more per thread because of NTFS's block size) if diff > 4*threads: log.warning('Diff between downloaded files and expected filesizes is %dKB. Retrying...' % diff) SmartDL_obj.retry() return SmartDL_obj.status = "combining" combine_files(*args) if SmartDL_obj.verify_hash: dest_path = args[-1] with open(dest_path, 'rb') as f: hash = hashlib.new(SmartDL_obj.hash_algorithm, f.read()).hexdigest() if hash == SmartDL_obj.hash_code: log.debug('Hash verification succeeded.') else: log.warning('Hash verification failed (got %s, expected %s). Trying next mirror...' % (hash, SmartDL_obj.hash_code)) SmartDL_obj.try_next_mirror() return
def scp(self): """Initialize the SCP client.""" if not self._scp: log.debug("creating scp connection") self._scp = scp.SCPClient(self.transport, progress=self._file_transfer_progress) return self._scp
def unlink(self, path): log.debug("path %s" % path) # os.unlink("." + path) hashpath=HashPath("Salt") dir, full = hashpath.path(path) os.unlink(full) self.unlink_empty_dirs(dir)
def tojson(input): log.debug(input) try: json.loads(input) return mark_safe("%s" % input) except Exception, ex: pass
def _doSixthSense(self, messenger, currentTime, player, cellIndex, teamAmount): index = self._currentIndex param = self._currentParam cooldownInterval = param.get('CooldownInterval') commandDelay = param.get('CommandDelay') textDelay = param.get('TextDelay') minTeamAmount = param.get('MinTeamAmount') maxTeamAmount = param.get('MaxTeamAmount') cooldownTime = self._getCooldownTime(currentTime, cooldownInterval) if cooldownTime > 0: log.info('[{}]: now cooldown time, skip. (rest {:.1f}s)'.format(index, cooldownTime)) return messenger.setParam(commandDelay, textDelay) if minTeamAmount and teamAmount <= minTeamAmount: log.info('[{}]: team amount ({}) is too less (<= {}), skip.'.format(index, teamAmount, minTeamAmount)) return if maxTeamAmount and teamAmount > maxTeamAmount: log.info('[{}]: team amount ({}) is too many (> {}), skip.'.format(index, teamAmount, maxTeamAmount)) return commandOrder = param.get('CommandOrder', []) log.info('[{}]: command order: {}'.format(index, commandOrder)) for command in commandOrder: log.debug('[{}]: already executed command class: {}'.format(index, self._isDone)) getattr(self, _commandMethod[command])(messenger, cellIndex=cellIndex)
def __init__(self, path, section='DEFAULT'): log.debug('initing global_settings by from %s, section:%s' % (path, section)) self.inifile = None for virtpath in settings.VIRTUAL_PATHS.values(): if virtpath in path: self.inifile = os.path.join(virtpath, '.settings.ini') self.section = section
def redirect(self, url): """Used when you need to redirect to another view, and you only have the final plugin:// url.""" # TODO: Should we be overriding self.request with the new request? self._request = self._parse_request(url=url, handle=self.request.handle) log.debug('Redirecting %s to %s', self.request.path, self._request.path) return self._dispatch(self._request.path)
def _reset_config(self): if self.searcher.configuration.get_value("USE_FILEBROWSER"): self._file_browser_checkbox.set_active(True) else: self._file_browser_checkbox.set_active(False) log.debug("[GeditOpenFileGui] EXCLUDE_LIST = " + str(self.searcher.configuration.get_value(""))) self._config_ignore_input.set_text(", ".join(self.searcher.configuration.get_value("EXCLUDE_LIST")))
def parse_ChartLyrics(song, artist): "Uses ChartLyrics API for lyrics grabbing." artist = urllib2.quote(artist.encode("utf8")) song = urllib2.quote(song.encode("utf8")) url = "http://api.chartlyrics.com/apiv1.asmx/SearchLyricDirect?artist=%s&song=%s" % (artist, song) log.debug('Fetching %s...' % url) try: obj = urllib2.urlopen(url) response = obj.read() except urllib2.HTTPError: # lyricsGrabber_ChartLyrics: HTTP Error 500 return (x for x in []) obj.close() dom = xml.dom.minidom.parseString(response) try: dom_GetLyricResult = dom.getElementsByTagName('GetLyricResult')[0] lyrics = dom_GetLyricResult.getElementsByTagName('Lyric')[0].childNodes[0].data artist = dom_GetLyricResult.getElementsByTagName('LyricArtist')[0].childNodes[0].data title = dom_GetLyricResult.getElementsByTagName('LyricSong')[0].childNodes[0].data except IndexError: return (x for x in []) lyrics += "\n\n [ Lyrics from ChartLyrics ] " lyricsObj = utils.classes.LyricsData(lyrics, artist, title) return (x for x in [lyricsObj])
def wipe(self): """ Zap (destroy) any GPT and MBR data structures if present For DASD disks create a new VTOC table """ if 'dasd' in self.table_type: log.debug('Initialize DASD disk with new VTOC table') fdasd_input = NamedTemporaryFile() with open(fdasd_input.name, 'w') as vtoc: vtoc.write('y\n\nw\nq\n') bash_command = ' '.join( [ 'cat', fdasd_input.name, '|', 'fdasd', '-f', self.storage_provider.get_device() ] ) Command.run( ['bash', '-c', bash_command] ) else: log.debug('Initialize %s disk', self.table_type) Command.run( [ 'sgdisk', '--zap-all', self.storage_provider.get_device() ] )
def __init__(self): # init config reader self._config = ConfigMonitor() # start debugger log.debug(self._config.getVar('logging','dir')) # init serverpush self._push = Serverpush(self._config.getVar('cluster','host')) # basic informations info = BasicInformations() # register on server if self._config.getVar('cluster','id')!='': print(self._config.getVar('cluster','id')) else: log.info("reciving new id") response = self._push.push('register',json.dumps(info.getInfo())) print(response) # start webserver s = WebServer(self._config.getVar("webserver","host"),self._config.getVar("webserver","port")) s.start() pass
def readFiles(self,files, item_list,pbar): log.info("Filemanager.readFiles") log.debug("Filemanager.readFiles: %s" % files) completed = 0 pbar.setValue(0) pbar.setMaximum(len(files)) for file in files: completed += 1 pbar.setValue(completed) st_ino = os.stat(file)[1] if exif.inode_inlist(item_list, st_ino) == False: if exif.readJson(file) == True: exif.readJsonCreateDate() item_list.append([1,st_ino, Path(file).name, exif.fileModifyDate, exif.exifCreateDate, str(exif.LensModel), str(exif.FNumber), exif.GPSLatitude, exif.GPSLongitude]) return item_list
def __init__(self, test, section='DEFAULT'): if os.path.isdir(test): self.inifile = os.path.join(test, settings.TEST_CONTEXT_FILE_NAME) else: self.inifile = os.path.join(os.path.dirname(test), settings.TEST_CONTEXT_FILE_NAME) self.section = section log.debug('context: %s, section: %s, test: %s' % (self.inifile, self.section, test))
def parsexml(self,xml): log.debug('in parsexml') # 如果传进来的是文件,则直接读取内容 # 如果是字符串,则先尝试解释,如果不是xml则再尝试作为文件路径,再不行,则抛异常了。 xml_content = '' if type(xml) == file: xml_content = xml.read() xml.close() elif type(xml) == str or type(xml) == unicode: log.debug('try to load file') if os.path.exists(xml): xml_file = open(xml,'r') xml_content = xml_file.read() xml_file.close() else: xml_content = xml else: log.error('could not init testcase from xml') raise TypeError,'fromxml need a file instance or a string instance for argument' log.debug('starting parse xml') log.debug('xml content: %s'%xml_content) doc = minidom.parseString(xml_content) ret = get_child_tags(doc,'test')#this statement return a list log.debug('child tag len : %d'%len(ret)) if ret: test_node = ret[0] return test_node else: log.warn('no test node in the xml!')
def handle_outbound_queue(): while thread_loop_active: try: reply = outbound_messages.get(block=0) if reply and getattr(reply, 'msg') and getattr(reply, 'addr'): if reply.msg_type != ClientRTP: log.info('server sends %s to %s' % (reply.msg_type, repr(reply.addr))) else: log.debug('server sends %s to %s' % (reply.msg_type, repr(reply.addr))) try: data = reply.msg.pack() reactor.callFromThread( servers_pool.send_to,reply.addr, data) except Exception, inst: log.exception('exception') except Queue.Empty: time.sleep(0.010) except: log.exception('exception') log.info('terminating thread: handle_outbound_queue')
def __init__(self, path, flags, *mode): log.debug("file path %s flags %s mode %s " % ( path, flags, mode)) # self.file = os.fdopen(os.open("." + path, flags, *mode), # flag2mode(flags)) # self.fd = self.file.fileno() # log.debug("file %s fd %d" % (self.file, self.fd)) self.hashpath = HashPath("Salt") dir, full = self.hashpath.path(path) log.debug("dir=%s full= %s" %(dir,full)) self.makedirs(dir) self.file = os.fdopen(os.open(full, flags, *mode), flag2mode(flags)) self.fd = self.file.fileno() self.path = path self.flags = flags self.direct_io = 0 self.keep_cache = 0
def init_db(self, database): self.databases[database]['uuid'] = '' with closing(sqlite3.connect(str(self.databases[database]['path']))) as connection: log.debug("connect to db") connection.row_factory = dict_factory cursor = connection.cursor() cursor.execute('SELECT f_db_uuid FROM T_UUIDDB') self.databases[database]['uuid'] = cursor.fetchone()['f_db_uuid'] log.debug("dbuuid: {}".format(self.databases[database]['uuid'])) if database == 'main': log.debug("this is the main db") cursor.execute('SELECT * FROM T_SHELF') for shelf in cursor.fetchall(): self.collections[shelf['f_id_shelf']] = BookeenShelf(shelf) cursor.execute('SELECT * FROM T_SHELF_LINK') for link in cursor.fetchall(): self.shelves_links[link['f_item_id']] = (link['f_shelf_id'], link['f_db_uuid']) log.debug("finding books") cursor.execute('SELECT * FROM T_ITEM WHERE f_item_filetype=2') for book_row in cursor.fetchall(): finished = True if book_row['f_islastpage'] else False book = BookeenBook('', book_row['f_internal_uri'][len(self.databases[database]['prefix_fs']):], book_row['f_title'], finished=finished, current_page=book_row['f_current_page'], date=time.gmtime(book_row['f_documenttime'])) if book_row['f_id_item'] in self.shelves_links.keys(): self.collections[self.shelves_links[book_row['f_id_item']][0]].add_book(book) self.books[(self.databases[database]['uuid'], book_row['f_id_item'])] = book self.databases[database]['books'][book_row['f_id_item']] = book log.debug("Found {} books on {}".format(len(self.books), database))
def selectionchange(self,i): log.info("Items in the list are %d:" %i) for count in range(self.ui.lenses_comboBox.count()): log.debug(self.ui.lenses_comboBox.itemText(count)) log.debug("Current index %d: selection changed %s" % (i,self.ui.lenses_comboBox.currentText()))
def parse_glgltz(): "Parses the top songs from glgltz" url = 'http://www.glgltz.co.il/1177-he/Galgalatz.aspx' log.debug('Fetching %s...' % url) obj = urllib2.urlopen(url, timeout=config.webservices_timeout) response = obj.read() soup = BeautifulSoup(response) # from PyQt4 import QtCore; import pdb; QtCore.pyqtRemoveInputHook(); pdb.set_trace() tags = soup.find_all('a', id=re.compile('Master_ContentPlaceHolder1_rptTabs')) catid = [x['catid'] for x in tags if u"המצעד הישראלי" in x.text][0] url = 'http://www.glgltz.co.il/Shared/Ajax/GetTophitsByCategory.aspx?FolderId=%s&lang=he' % catid log.debug('Fetching %s...' % url) obj = urllib2.urlopen(url) response = obj.read() songs = [] soup = BeautifulSoup(response) for tag in soup.find_all('div', class_='hit'): title = tag.h4.text if tag.h4 else None artist = tag.span.text if tag.span else None if artist and title: songs.append("%s - %s" % (artist, title)) elif artist: songs.append(artist) elif title: songs.append(title) else: raise RuntimeError("Could not parse glgltz hits") return songs
def log_error(self): self.rds.hincrby("fail_counter", self.app_key) log.error('message push fail') type, value, tb = sys.exc_info() error_message = traceback.format_exception(type, value, tb) log.debug(type) log.error(error_message)
def showSixthSenseIndicator(self): if not self._isEnabled or not self._activeParams: log.info('sixth sense message is disabled or nothing to do.') return currentTime = Utils.getTime() cooldownTime = self._getCooldownTime(currentTime, self._cooldownInterval) if cooldownTime > 0: log.info('[time:{:.1f}] invoke sixth sense, but it\'s not time yet. (rest {:.1f}s)'.format(currentTime, cooldownTime)) Utils.addClientMessage(sm_settings.get('CooldownMsg').format(rest=int(math.ceil(cooldownTime)))) return log.info('[time:{:.1f}] invoke sixth sense.'.format(currentTime)) player = Utils.getPlayer() teamAmount = Utils.getTeamAmount() cellIndex = MinimapInfo.getCellIndexByPosition(Utils.getPos()) messenger = IngameMessanger() log.info('current chat channel: {}'.format(messenger.getChannelLabels())) log.info('current team amount: {}'.format(teamAmount)) self._isDone = {} for index, param in enumerate(self._activeParams): self._currentIndex = index self._currentParam = param self._doSixthSense(messenger, currentTime, player, cellIndex, teamAmount) if self._isDone: log.debug('success commands, update last activity.') self._lastActivity = currentTime
def movie_readahead(taskQueue, doneQueue, cache): """Function to be run in separate process, caching additional movie information """ while True: try: i, movie, loadID = taskQueue.get() log.debug("loading information about movie %s" % movie.title) latestUpdate = movie.get_latest_trailer_date() if movie.baseURL in cache and cache[movie.baseURL][0] >= latestUpdate: cached_data = cache[movie.baseURL] movie.poster = cached_data[1] movie.trailerLinks = cached_data[2] movie.description = cached_data[3] else: movie.poster movie.trailerLinks movie.description doneQueue.put((i, movie, loadID)) except socket.error as e: log.error("network problem error while loading movie information: %s" % e) log.error(traceback.format_exc()) except KeyboardInterrupt: log.debug("keyboard interrupt. stopping movie readahead") return except: log.error("uncaught exception ocurred while doing readahead. Please report this!") log.error(traceback.format_exc()) raise
def get(self, option, default=None): value = config.get(self.inifile, self.section, option) log.debug('get context option: %s=%s from %s' % (option, value, self.inifile)) if not value: value = default return value
def fsync(self, isfsyncfile): log.debug("file %s isfsyncfile %d" % (self.path, isfsyncfile)) self._fflush() if isfsyncfile and hasattr(os, 'fdatasync'): os.fdatasync(self.fd) else: os.fsync(self.fd)
def eval(self, context, params=None): log.debug('try eval variable/function %r' % self.name) try: value = context.get(self.name) log.debug('get variable %r = %r' % (self.name, value)) if (isinstance(value, procedures.Function)): log.debug('evaluating function %r' % value) value = value.eval(context) log.debug('evaluated function %r = %r' % (self.name, value)) return value except KeyError: log.error('Variable %r not found' % self.name) return ""
def config_nat(nf_command, nat_type, router_id, rule_id, nat): log.debug('nf_command: %s, nat_id: %r-%r, nat: %r' % (nf_command, router_id, rule_id, nat.to_native())) args, constrains = get_config_nat_args(nf_command, nat_type, router_id, nat) if constrains is not None: log.info(constrains) return json_response(status=INVALID_POST_DATA, description=constrains), HTTP_BAD_REQUEST key = (nf_command, 'nat', nat_type, str(router_id), str(nat.rule_id)) ret_queue = PseudoGeventQueue() cmd_waiting_queues_put(router_id, router_hash, key, args, ret_queue) output = cmd_response_get(ret_queue) if output: return json_response(status=SERVER_ERROR, description=output), HTTP_INTERNAL_SERVER_ERROR return json_response(status=SUCCESS), HTTP_OK
def eval(self, context, params=None): assert len(params) == 1 prop = params[0] # Make sure we read in the metadata. if (meta is None): log.error("BuiltInDocumentProperties: Metadata not read.") return "" # See if we can find the metadata attribute. if (not hasattr(meta, prop.lower())): log.error("BuiltInDocumentProperties: Metadata field '" + prop + "' not found.") return "" # We have the attribute. Return it. r = getattr(meta, prop.lower()) log.debug("BuiltInDocumentProperties: return %r -> %r" % (prop, r)) return r
def eval(self, context, params=None): # The wildcard for matching propagates through operations. evaluated_args = eval_args(self.arg, context) if ((isinstance(evaluated_args, Iterable)) and ("**MATCH ANY**" in evaluated_args)): return "**MATCH ANY**" # return the and of all the arguments: try: if (log.getEffectiveLevel() == logging.DEBUG): log.debug("Compute negate " + str(self.arg)) val = self.arg if (isinstance(val, VBA_Object)): val = val.eval(context) return (-int(val)) except Exception as e: log.error("Cannot compute negation of " + str(self.arg) + ". " + str(e)) return "NULL"
def parse_billboard(): "Parses the top 100 songs from billboard.com rss feed" url = 'http://www.billboard.com/rss/charts/hot-100' log.debug('Fetching %s...' % url) obj = urllib2.urlopen(url, timeout=config.webservices_timeout) response = obj.read() obj.close() songs = [] soup = BeautifulSoup(response) for item in soup.find_all('item'): artist = item.artist.text.split('Featuring')[0] title = item.chart_item_title.text song = "%s - %s" % (artist, title) song = song.replace(' ', ' ') songs.append(song) return songs
def replace_constant_int_inline(vba_code): """ Replace constant integer definitions inline, but leave the definition behind in case the regex fails. """ const_pattern = re.compile( "(?i)const ([a-zA-Z][a-zA-Z0-9]{0,20})\s?=\s?(\d+)") d_const = dict() for const in re.findall(const_pattern, vba_code): d_const[const[0]] = const[1] if len(d_const) > 0: log.debug("Found constant integer definitions, replacing them.") for const in d_const: this_const = re.compile('(?i)(?<=(?:[(), ]))' + str(const) + '(?=(?:[(), ]))') vba_code = re.sub(this_const, str(d_const[const]), vba_code) return (vba_code)
def eval(self, context, params=None): # The wildcard for matching propagates through operations. evaluated_args = eval_args(self.arg, context) if ((isinstance(evaluated_args, Iterable)) and ("**MATCH ANY**" in evaluated_args)): return "**MATCH ANY**" # return the eqv of all the arguments: try: if (log.getEffectiveLevel() == logging.DEBUG): log.debug("Compute eqv " + str(self.arg)) return reduce(lambda a, b: (a & b) | ~(a | b), coerce_args(evaluated_args, preferred_type="int")) except (TypeError, ValueError): log.error('Impossible to Eqv arguments of different types.') return 0 except RuntimeError as e: log.error("overflow trying eval Eqv: %r" % self.arg) raise e
def __extract_ngram_list(self, corpus, input_file): input_io = open(input_file) list_io = open(input_file + '.nulls.list', 'w+') with open(corpus) as corpus_io: for i, line in enumerate(corpus_io): words = line.strip().split() tokens = input_io.next().strip().lower().split() ngrams = self.__extract_ngrams(words, tokens) if ngrams: list_io.write("\n".join(ngrams) + "\n") if 0 == (i + 1) % DEBUG_COUNTER: log.debug("[{}]".format(i + 1)) input_io.close() list_io.close() return input_file + '.nulls.list'
def parse_block(self, end=['end', 'sub']): """ Parse a block of statements, until reaching a line starting with the end string :param end: string indicating the end of the block :return: list of statements (excluding the last line matching end) """ statements = [] line_index, line, line_keywords = self.parse_next_line() while not list_startswith(line_keywords, end): try: l = vba_line.parseString(line, parseAll=True) log.debug(l) statements.extend(l) except ParseException as err: print('*** PARSING ERROR (3) ***') print(err.line) print(" " * (err.column - 1) + "^") print(err) line_index, line, line_keywords = self.parse_next_line() return statements
def open_file(editor, file_manager): """ Open file from user disk :param editor: object of editor :param file_manager: object of FileManger class :return: None """ open_file_dialog = QFileDialog.getOpenFileName(None, 'Open', '/') if open_file_dialog[0]: log.info('Opening File @ {}'.format(open_file_dialog[0])) with open(open_file_dialog[0], 'r') as file: file_text = file.read() editor.setPlainText(file_text) log.debug('Updating file context details') file_manager.set_hash(file_text) file_manager.set_location(open_file_dialog[0]) log.debug('Complete File Openning @ {}'.format(open_file_dialog[0]))
def metric_get(self, metric): cmd = '{} metric get -m {}'.format(os.path.join(self.dir, self.name), metric) log.debug("snapctl metric get -m {}".format(metric)) out = self._start_process(cmd).split('\n') if len(out) < 8: return [] out = out[7:] headers = map(lambda e: e.replace(" ", ""), filter(lambda e: e != "", out[0].split('\t'))) rules = [] for o in out[1:]: r = map(lambda e: e.replace(" ", ""), filter(lambda e: e != "", o.split('\t'))) if len(r) == len(headers): rule = {} for i in range(len(headers)): rule[headers[i]] = r[i] rules.append(rule) return rules
def _on_query_entry(self, widget, event): # Check to see if key pressed is Return if so open first file if event.keyval == gtk.keysyms.Return: self._on_select_from_list(None, event) return self._clear_treeveiw() # Remove all input_query = widget.get_text() log.debug("[GeditOpenFileGui] input_query : %s" % input_query) if input_query: # Query database based on input results = self.searcher.search(input_query) self._insert_into_treeview(results) # Select the first one on the list iter = self._liststore.get_iter_first() if iter != None: self._file_list.get_selection().select_iter(iter)
def deactivate(self, container): """ Deactivate all rules associated with this container :param Container container: :return: """ with self._lock: old_container = self._active.get(container.id) if old_container is not None: del self._active[container.id] # since this container is active, get the old address # so we can log exactly which names/addresses # are being deactivated mapping = self._get_mapping_by_container(container) if mapping: log.debug('[registry] deactivating map for container %s \n%s', container, '\n'.join( ["\t\t\t\t\t\t\t\t\t\t\t\t\t- %s -> %s" % (r.idna(), container.name) for r in mapping.names])) self._deactivate(mapping.names, tag=mapping.key)
def publish(file): if not file: return '', '' # ipfs add /home/erm/disk2/ipfs-storage/http---xml.nfowars.net-Alex.rss.cache.rss published_file = "%s.published" % file test_file = "%s.test.published" % file if os.path.exists(published_file) and os.path.exists(test_file): pub_key = "" test_pub_key = "" with open(published_file, 'r') as fp: pub_key = "%s" % fp.read() with open(test_file, 'r') as fp: test_pub_key = "%s" % fp.read() else: pub_key = add_file(file, published_file) test_pub_key = add_file(published_file, test_file) log.debug("pub_key:%s test_pub_key:%s" % (pub_key, test_pub_key)) return pub_key, test_pub_key
def get_full(): log.debug("Get full") table = FILES_TABLE_NAME response = None try: conn = sqlite3.connect(MONITORING_DB_NAME) c = conn.cursor() c.execute("SELECT agentname, dirname, filename, content FROM %s" % (table)) response = c.fetchall() conn.close() except Exception as e: print("Failed action get_full: %s: %s " % (type(e).__name__, e)) finally: conn.close() return response
def eval(self, context, params=None): # The wildcard for matching propagates through operations. evaluated_args = eval_args(self.arg, context) if ((isinstance(evaluated_args, Iterable)) and ("**MATCH ANY**" in evaluated_args)): return "**MATCH ANY**" # return the subtraction of all the arguments: try: log.debug("Compute subract " + str(self.arg)) return reduce(lambda x, y: x - y, coerce_args(evaluated_args, preferred_type="int")) except (TypeError, ValueError): # Try converting strings to ints. # TODO: Need to handle floats in strings. try: return reduce(lambda x, y: coerce_to_int(x) - coerce_to_int(y), evaluated_args) except Exception as e: # Are we doing math on character ordinals? l1 = [] orig = evaluated_args for v in orig: if (isinstance(v, int)): l1.append(v) continue if (isinstance(v, str) and (len(v) == 1)): l1.append(ord(v)) continue # Do we have something that we can do math on? if (len(orig) != len(l1)): log.error( 'Impossible to subtract arguments of different types. ' + str(e)) return 0 # Try subtracting based on character ordinals. return reduce(lambda x, y: int(x) - int(y), l1)
def eval(self, context, params=None): # assumption: here the params have already been evaluated by Call_Function beforehand assert len(params) >= 2 # Were we given a start position? start = 0 s1 = params[0] if (s1 is None): s1 = '' s2 = params[1] if (s2 is None): s2 = '' if (isinstance(params[0], int)): start = params[0] - 1 if (start < 0): start = 0 s1 = params[1] s2 = params[2] # Were we given a search type? search_type = 1 if (isinstance(params[-1], int)): search_type = params[-1] if (search_type not in (0, 1)): search_type = 1 # TODO: Figure out how VB binary search works. For now just do text search. r = None if (len(s1) == 0): r = 0 elif (len(s2) == 0): r = start elif (start > len(s1)): r = 0 else: if (s2 in s1): r = s1[start:].index(s2) + start + 1 else: r = 0 log.debug("InStr: %r returns %r" % (self, r)) return r
def usersTimeoutCheckLoop(self): """ Start timed out users disconnect loop. This function will be called every `checkTime` seconds and so on, forever. CALL THIS FUNCTION ONLY ONCE! :return: """ try: log.debug("Checking timed out clients") exceptions = [] timedOutTokens = [] # timed out users timeoutLimit = int(time.time()) - 100 for key, value in self.tokens.items(): # Check timeout (fokabot is ignored) if value.pingTime < timeoutLimit and value.userID != 999 and not value.irc and not value.tournament: # That user has timed out, add to disconnected tokens # We can't delete it while iterating or items() throws an error timedOutTokens.append(key) # Delete timed out users from self.tokens # i is token string (dictionary key) for i in timedOutTokens: log.debug("{} timed out!!".format(self.tokens[i].username)) self.tokens[i].enqueue( serverPackets.notification( "Your connection to the server timed out.")) try: logoutEvent.handle(self.tokens[i], None) except Exception as e: exceptions.append(e) log.error( "Something wrong happened while disconnecting a timed out client. Reporting to Sentry " "when the loop ends.") del timedOutTokens # Re-raise exceptions if needed if exceptions: raise periodicLoopException(exceptions) finally: # Schedule a new check (endless loop) threading.Timer(100, self.usersTimeoutCheckLoop).start()
def open(cls, url='', language='en', post_data=None, get_data=None): if post_data is None: post_data = {} if get_data is not None: url += '?' + urlencode(get_data) print(url) result = True if len(post_data) > 0: cls.create_cookies(post_data) if cls._cookies is not None: req = urllib2.Request(url, cls._cookies) cls._cookies = None else: req = urllib2.Request(url) req.add_header('User-Agent', USER_AGENT) req.add_header('Content-Language', language) req.add_header("Accept-Encoding", "gzip") opener = urllib2.build_opener(urllib2.HTTPCookieProcessor( cls.cookies)) # open cookie jar try: response = opener.open(req) # send cookies and open url cls.headers = response.headers # borrow from provider.py Steeve if response.headers.get("Content-Encoding", "") == "gzip": import zlib cls.content = zlib.decompressobj(16 + zlib.MAX_WBITS).decompress( response.read()) else: cls.content = response.read() response.close() cls.status = 200 except urllib2.HTTPError as e: cls.status = e.code result = False except urllib2.URLError as e: cls.status = e.reason result = False log.debug("Status: " + str(cls.status)) log.debug(cls.content) return result
def parse_onlylyrics(title, artist): "Uses OnlyLyrics.com for lyrics grabbing" log.debug("Grabbing lyrics for %s - %s from OnlyLyrics.com..." % (artist, title)) url = 'http://www.onlylyrics.com/search.php?search=%s&metode=artist&x=0&y=0' % urllib2.quote( artist.encode("utf8")) log.debug('Fetching %s...' % url) obj = urllib2.urlopen(url) response = obj.read() DOMAIN = "www.onlylyrics.com" song_url = "" soup = BeautifulSoup(response) for link in soup.find_all('a', href=re.compile(r'.+-lyrics-[0-9]+.php')): link_artist, link_title = link.text.split(' :: ') if title.lower() == link_title.lower(): song_url = "http://%s%s" % (DOMAIN, link['href']) break if not song_url: return "" obj = urllib2.urlopen(song_url) response = obj.read() soup = BeautifulSoup(response) div = soup.find('div', style='width:90%;margin:0 auto;') if not div: return "" lyrics = "" for tag in div.contents: tag = unicode(tag) if tag.startswith('<br'): tag = "\n" lyrics += tag lyrics += "\n\n [ Lyrics from %s ] " % song_url lyricsObj = utils.cls.LyricsData(lyrics, artist, title) return (x for x in [lyricsObj])
def remove_chain(self, name, wrap=True): """Remove named chain. This removal "cascades". All rule in the chain are removed, as are all rules in other chains that jump to it. If the chain is not found, this is merely logged. """ name = get_chain_name(name, wrap) chain_set = self._select_chain_set(wrap) if name not in chain_set: LOG.debug('Attempted to remove chain %s which does not exist', name) return chain_set.remove(name) if not wrap: # non-wrapped chains and rules need to be dealt with specially, # so we keep a list of them to be iterated over in apply() self.remove_chains.add(name) # first, add rules to remove that have a matching chain name self.remove_rules += [r for r in self.rules if r.chain == name] # next, remove rules from list that have a matching chain name self.rules = [r for r in self.rules if r.chain != name] if not wrap: jump_snippet = '-j %s' % name # next, add rules to remove that have a matching jump chain self.remove_rules += [ r for r in self.rules if jump_snippet in r.rule ] else: jump_snippet = '-j %s-%s' % (self.wrap_name, name) # finally, remove rules from list that have a matching jump chain self.rules = [r for r in self.rules if jump_snippet not in r.rule]
def check_object(self, object_name): """Compare current version of 'object_name' with last requested.""" current_object_content = self.get_object_content(object_name) last_requested_content = get_last_version_object( self.space_name, object_name) # compare objects if last_requested_content: last_requested_content = last_requested_content[0][0] log.debug("Last requested:\n" + str(last_requested_content)) log.debug("Current:\n" + str(current_object_content)) if current_object_content != last_requested_content: log.info("Update object %s" % (object_name)) update_diff_object(self.space_name, object_name, current_object_content) update_last_version_object(self.space_name, object_name, current_object_content)
def run(output_path, h, v, alg, cpus, input_path, resume=True): if not os.path.exists(output_path): os.makedirs(output_path) pool = mp.Pool(processes=cpus) lines = [l for l in range(0, 5000, 100)] # Disabled for now if resume is True: pass # for f in os.listdir(output_path): # line = int(f[13:-4]) - 1 # if not line % 100: # lines.remove(line) func = partial(worker, output_path, input_path, h, v, alg) success = pool.map(func, lines) log.debug('Successful workers: {}'.format(np.sum(success)))
def publish(self, scfile, name): path = Path(scfile).resolve() tmp = self.load_lib_add() tmp['sources'] = {path.name: {"urls": [str(path)]}} sc = tmp log.debug(str(path.parent)) compiled_sol = compile_standard(sc, allow_paths=str(path.parent)) contract_interface = compiled_sol['contracts'][path.name][name] w3 = self.web3 w3.eth.defaultAccount = w3.eth.accounts[0] bytecode = contract_interface['evm']['bytecode']['object'] abi = json.loads(contract_interface['metadata'])['output']['abi'] tester = w3.eth.contract(abi=abi, bytecode=bytecode) tx_hash = tester.constructor().transact() tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash) # contract = {scfile: tx_receipt.contractAddress} # with open('contract_address.json', 'w') as wf: # json.dump(contract, wf, indent=4) # print(tx_receipt) self.contract_instance = self.contract(tx_receipt, contract_interface) return tx_receipt, contract_interface
def eval(self, context, params=None): # return the division of all the arguments: try: log.debug("Compute div " + str(self.arg)) return reduce(lambda x, y: x / y, coerce_args(eval_args(self.arg, context))) except (TypeError, ValueError): # Try converting strings to ints. # TODO: Need to handle floats in strings. try: return reduce(lambda x, y: int(x) / int(y), eval_args(self.arg, context)) except Exception as e: log.error( 'Impossible to divide arguments of different types. ' + str(e)) # TODO return 0 except ZeroDivisionError: log.error("Division by 0 error. Returning ''.") return ''
def post_msg(self, message, slack_channel): """Post a message to a specified Slack channel. Args: message (str): message to send to Slack. slack_channel (str): specified Slack channel (if a private channel, the corresponding Slack bot must be a member of the channel first. Returns: None """ try: response = self.client.chat_postMessage( channel = slack_channel, text = message) log.debug(response) log.info("Posted message: {} to Slack channel: {}".format(message, slack_channel)) except Exception as e: log.error("Failed to push to Slack") log.error(e)
def set_cached_value(arg, val): """ Set the cached value of an all constant numeric expression. """ # We should be setting this to a numeric expression if ((not isinstance(val, int)) and (not isinstance(val, float)) and (not isinstance(val, complex))): if (log.getEffectiveLevel() == logging.DEBUG): log.warning("Expression '" + str(val) + "' is a " + str(type(val)) + ", not an int. Not caching.") return # We have a number. Cache it. arg_str = str(arg) try: if (log.getEffectiveLevel() == logging.DEBUG): log.debug("Cache value of " + arg_str + " = " + str(val)) except UnicodeEncodeError: pass constant_expr_cache[arg_str] = val
def force_read(self, name, is_password=False): if not name or not isinstance(name, str): log.debug("invalid name") return None name = name[:1].upper() + name[1:] while (True): try: prompt = Message.format("{} must be provided: ", name) if is_password: key = getpass.getpass(prompt) else: key = str(raw_input(prompt).strip()) if self.valid_string(key): return key except KeyboardInterrupt: log.exception("\nNo {} was input", name) break except: log.exception()
def add_url_rule(self, url_rule, view_func, name, options=None): '''This method adds a URL rule for routing purposes. The provided name can be different from the view function name if desired. The provided name is what is used in url_for to build a URL. The route decorator provides the same functionality. ''' rule = UrlRule(url_rule, view_func, name, options) if name in self._view_functions.keys(): # TODO: Raise exception for ambiguous views during registration log.warning( 'Cannot add url rule "%s" with name "%s". There is ' 'already a view with that name', url_rule, name) self._view_functions[name] = None else: log.debug( 'Adding url rule "%s" named "%s" pointing to function ' '"%s"', url_rule, name, view_func.__name__) self._view_functions[name] = rule self._routes.append(rule)
def modify_res(self, table: str, uuid: str, body: str): try: log.debug("Request to modify id: " + uuid + " of table " + table + "with body: " + body) UUID(uuid, version=4) schema_class = self.schema_index[table]['entry_import_schema'] mod_res, errors = schema_class(partial=True).loads(body) if len(mod_res) != 1: raise key, value = list(mod_res.items())[0] self.tables[table][uuid][key] = value log.debug('Main State: ' + str(self.main_state)) self.sync_state['sync'] = False self.sync_state[table][uuid] = False self.__dump_main_state__() return self.tables[table][uuid] except ValueError: log.error('The provided uuid is no uuid version 4') return False except Exception as e: log.error("resource could not be modified") return False