def DownloadFile(url, path, startByte=0, endByte=None, ShowProgress=True): ''' Function downloads file. @param url: File url address. @param path: Destination file path. @param startByte: Start byte. @param endByte: End byte. Will work only if server supports HTTPRange headers. @param ShowProgress: If true, shows textual progress bar. @return path: Destination file path. ''' url = url.replace(' ', '%20') headers = {} if endByte is not None: headers['Range'] = 'bytes=%d-%d' % (startByte,endByte) req = urllib2.Request(url, headers=headers) try: urlObj = urllib2.urlopen(req, timeout=4) except urllib2.HTTPError, e: if "HTTP Error 416" in str(e): # HTTP 416 Error: Requested Range Not Satisfiable. Happens when we ask # for a range that is not available on the server. It will happen when # the server will try to send us a .html page that means something like # "you opened too many connections to our server". If this happens, we # will wait for the other threads to finish their connections and try again. log.warning("Thread didn't got the file it was expecting. Retrying...") time.sleep(5) return DownloadFile(url, path, startByte, endByte, ShowProgress) else: raise e
def parse_Mp3skull(song, maxpages=1): "Function connects to mp3skull.com and returns the .mp3 links in it" if utils.isHebrew(song): # Dilandau doesn't have hebrew log.warning("[Mp3skull] source has no hebrew songs. Skipping...") return song = urllib2.quote(song.encode("utf8")) for i in range(maxpages): # http://mp3skull.com/mp3/how_i_met_your_mother.html url = 'http://mp3skull.com/mp3/%s.html' % (song.replace('-','').replace(' ','_').replace('__','_').lower()) log.debug("[Mp3skull] Parsing %s... " % url) obj = urllib2.urlopen(url) response = obj.read() links = [] soup = BeautifulSoup(response) for link in soup.find_all('a', href=re.compile(r'\.mp3$')): links.append(link['href']) log.debug("[Mp3skull] found %d links" % len(links)) if not links: break for link in links: yield utils.cls.MetaUrl(link, 'Mp3skull')
def parse_dilandau(song, maxpages=10): "Function connects to Dilandau.eu and returns the .mp3 links in it" if not utils.isAscii(song): # Dilandau doesn't like unicode. log.warning("[Dilandau] Song is not ASCII. Skipping...") return song = urllib2.quote(song.encode("utf8")) for i in range(maxpages): # http://en.dilandau.eu/download-mp3/call-me-maybe-1.html url = 'http://en.dilandau.eu/download-mp3/%s-%d.html' % (song.replace('-','').replace(' ','-').replace('--','-').lower(), i+1) log.debug("[Dilandau] Parsing %s... " % url) obj = urllib2.urlopen(url) response = obj.read() links = [] soup = BeautifulSoup(response) for link in soup.find_all('a', url=re.compile(r'\.mp3$')): url = link['href'] + link['url'] # dont ask me why. dilandabu decided to split their url addresses. links.append(url) log.debug("[Dilandau] found %d links" % len(links)) if not links: break for link in links: yield utils.cls.MetaUrl(link, 'dilandau')
def erase(self, pkg): """Remove RpmPackage. Return an RpmList error code (after warning the user).""" name = pkg["name"] if not self.database.hasName(name) or \ pkg not in self.database.getPkgsByName(name): log.warning("Package %s (id %s) not found", pkg.getNEVRA(), id(pkg)) return self.NOT_INSTALLED if self.isInstalled(pkg): self.erases.add(pkg) self.installs.discard(pkg) self.check_installs.discard(pkg) if pkg in self.updates: del self.updates[pkg] if pkg in self.obsoletes: del self.obsoletes[pkg] self.check_erases.add(pkg) self.check_file_requires = True self.database.removePkg(pkg) return self.OK
def _readElement(self, element, key): keyType = PARAM_INFO[key][INFO_TYPE] resmgrAttr = { 'Bool': 'asBool', 'Int': 'asInt', 'Float': 'asFloat', 'String': 'asString' } #log.debug('read element as "{}" from section'.format(key)) if keyType in resmgrAttr: try: return getattr(element, resmgrAttr[keyType]) except: log.current_exception() elif keyType == 'Enum': try: v = element.asString e = PARAM_INFO[key][INFO_ENUM] if v in e: return v else: log.warning('found invalid item "{}", available only {}'.format(v, e)) return None except: log.current_exception() elif keyType == 'List': values = [] child = PARAM_INFO[key][INFO_CHILD] for k, v in element.items(): if k == child: v = self._readElement(v, k) if v: values.append(v) else: log.warning('found invalid tag "{}", available only "{}"'.format(k, child)) return values return None
def post_threadpool_actions(pool, args, expected_filesize, SmartDL_obj): "Run function after thread pool is done. Run this in a thread." while not pool.isFinished(): time.sleep(0.1) if SmartDL_obj._killed: return if expected_filesize: # if not zero, etc expected filesize is not known threads = len(args[0]) total_filesize = sum([os.path.getsize(x) for x in args[0]]) diff = math.fabs(expected_filesize - total_filesize) # if the difference is more than 4*thread numbers (because a thread may download 4KB more per thread because of NTFS's block size) if diff > 4*threads: log.warning('Diff between downloaded files and expected filesizes is %dKB. Retrying...' % diff) SmartDL_obj.retry() return SmartDL_obj.status = "combining" combine_files(*args) if SmartDL_obj.verify_hash: dest_path = args[-1] with open(dest_path, 'rb') as f: hash = hashlib.new(SmartDL_obj.hash_algorithm, f.read()).hexdigest() if hash == SmartDL_obj.hash_code: log.debug('Hash verification succeeded.') else: log.warning('Hash verification failed (got %s, expected %s). Trying next mirror...' % (hash, SmartDL_obj.hash_code)) SmartDL_obj.try_next_mirror() return
def visitPinyin(self, pinyin): # Find possible base sounds we could accept possiblebases = [pinyin.numericformat(hideneutraltone=False, tone="spoken")] substitutions = waysToSubstituteAwayUUmlaut(pinyin.word) if pinyin.toneinfo.spoken == 5: # Sometimes we can replace tone 5 with 4 in order to deal with lack of '[xx]5.ogg's possiblebases.extend([pinyin.word, pinyin.word + "4"]) elif substitutions is not None: # Typically u: is written as v in filenames possiblebases.extend([substitution + str(pinyin.toneinfo.spoken) for substitution in substitutions]) # Find path to first suitable media in the possibilty list for possiblebase in possiblebases: media = self.mediapack.mediafor(possiblebase, self.audioextensions) if media: break if media: # If we've managed to find some media, we can put it into the output: self.output.append(media) else: # Otherwise, increment the count of missing media we use to determine optimality log.warning( "Couldn't find media for %s (%s) in %s", pinyin, pinyin.numericformat(tone="spoken"), self.mediapack ) self.mediamissingcount += 1
def dossh(self): ret = -1 ssh = pexpect.spawn('ssh %s@%s' % (self.__user, self.__ip)) try: i = ssh.expect(['Password:'******'continue connecting(yes/no)?']) if i == 0: ssh.sendline(self.__passwd) elif i == 1: ssh.sendline('yes\n') ssh.expect('Password:'******'\nOK') print self.__cmd ssh.sendline(self.__cmd) ssh.expect('\nOK', timeout= -1) ret = ssh.before print '[%s] get xhistory calls done!' % self.__ip log.info('[%s] get xhistory calls done!' % self.__ip) except pexpect.EOF: print "EOF" log.warning('[%s]dossh error : EOF !' % (self.__ip)) ret = -1 except pexpect.TIMEOUT: print "TIMEOUT" log.warning('[%s]dossh error : TIMEOUT !' % (self.__ip)) ret = -2 finally: ssh.close() return ret
def load_file(self, filename): """loads skeleton information from file""" if os.path.exists(filename): log.debug("loading file: " + filename) self.filename = filename # try to load the file try: tree = etree.parse(filename) skel = tree.getroot() for node in self.info_nodes: keys = node.keys() attr_name = keys[0] attr_path = node[attr_name] node_data = tree.xpath(attr_path) if node_data: self.__dict__[attr_name] = node_data[0].text # get file list filelist = tree.xpath("/skel/filelist/file") for fil in filelist: self.filelist.append(fil.get("name")) # parameters params = tree.xpath("/skel/params/param") for param in params: self.params.append({param.get("name"): param.get("value")}) except Exception as ex: log.warning("Could not parse skeleton file: " + str(ex) + " in file: " + filename) return False else: log.error("File does not exist: " + filename) return False
def __copy_theme_data_to_boot_directory(self, lookup_path): if not lookup_path: lookup_path = self.root_dir boot_unicode_font = self.root_dir + '/boot/unicode.pf2' if not os.path.exists(boot_unicode_font): unicode_font = self.__find_grub_data(lookup_path + '/usr/share') + \ '/unicode.pf2' try: Command.run( ['cp', unicode_font, boot_unicode_font] ) except Exception: raise KiwiBootLoaderGrubFontError( 'Unicode font %s not found' % unicode_font ) boot_theme_dir = self.root_dir + '/boot/' + \ self.boot_directory_name + '/themes' if self.theme and not os.path.exists(boot_theme_dir): Path.create(boot_theme_dir) theme_dir = self.__find_grub_data(lookup_path + '/usr/share') + \ '/themes/' + self.theme if os.path.exists(theme_dir): Command.run( ['rsync', '-zav', theme_dir, boot_theme_dir], ) else: log.warning('Theme %s not found', theme_dir)
def __cleanup_dir_stack(self): for location in reversed(self.dir_stack): try: Path.remove_hierarchy(self.root_dir + location) except Exception as e: log.warning( 'Failed to remove directory %s: %s', location, format(e) ) del self.dir_stack[:]
def __build_mount_list(self): mount_points = [] for location in reversed(self.mount_stack): mount_path = self.root_dir + location try: Command.run(['mountpoint', '-q', mount_path]) mount_points.append(mount_path) except Exception: log.warning('Path %s not a mountpoint', mount_path) return mount_points
def __cleanup_mount_stack(self): try: Command.run(['umount', '-l'] + self.__build_mount_list()) except Exception as e: log.warning( 'Image root directory %s not cleanly umounted: %s', self.root_dir, format(e) ) del self.mount_stack[:]
def setUpEnv(envVars): if envVars is None: return for item in envVars: if item in os.environ: log.warning("[Warning] Override ENV variable: {0}: {1} -> {2}".format( item, envVars[item], os.environ[item])) else: log.info("[Info] Setup ENV var: {0} = {1}".format(item, envVars[item])) os.environ[item] = envVars[item]
def post(self): username = self.get_argument('username', '') password = self.get_argument('password', '') if self.check_permission(username, password): log.info('User {} logging in with password {}'.format(username, password)) self.set_current_user(username) self.redirect(self.get_argument('next', u'/')) else: log.warning('Login failed for user {} with password {}'.format(username, password)) error_msg = u"?error=" + escape.url_escape('Login incorrect') self.redirect(u"/login" + error_msg)
def __del__(self): if self.raid_device: log.info('Cleaning up %s instance', type(self).__name__) try: Command.run( ['mdadm', '--stop', self.raid_device] ) except Exception: log.warning( 'Shutdown of raid device failed, %s still busy', self.raid_device )
def __del__(self): if self.storage_provider.is_loop() and self.is_mapped: log.info('Cleaning up %s instance', type(self).__name__) try: Command.run( ['kpartx', '-s', '-d', self.storage_provider.get_device()] ) except Exception: log.warning( 'cleanup of partition device maps failed, %s still busy', self.storage_provider.get_device() )
def __del__(self): if self.luks_device: log.info('Cleaning up %s instance', type(self).__name__) try: Command.run( ['cryptsetup', 'luksClose', self.luks_name] ) except Exception: log.warning( 'Shutdown of luks map %s failed, %s still busy', self.luks_name, self.luks_device )
def __scale_features(dataset): """ Check if features should and could be scaled. :param dataset: an array of arrays :return: scaled feature set if requested/possible, else original dataset. """ if (not args.no_feature_scaling) and clf.scaling_possible: # scale features if requested, warn if impossible return preprocessing.scale(dataset) elif (not args.no_feature_scaling) and (not clf.scaling_possible): log.warning("Can't scale features with classifier '%s'. Proceeding without feature scaling." % args.clf) return dataset
def slot_choose_temp_dir(self): dialog = QtGui.QFileDialog() dialog.setFileMode(QtGui.QFileDialog.Directory) dialog.setDirectory(self.temp_dir.displayText()) temp_dir = unicode(dialog.getExistingDirectory(options=QtGui.QFileDialog.ShowDirsOnly)) temp_dir = temp_dir.replace('/','\\') if temp_dir: if utils.get_free_space(temp_dir) < 200*1024**2: # 200 MB log.warning("The Directory has less than 200 MB available. Application may not function properly.") QtGui.QMessageBox.warning(self, tr("Warning"), tr("The Directory has less than 200 MB available. Application may not function properly."), QtGui.QMessageBox.Ok) log.debug("temp_dir is set to: %s" % temp_dir) self.temp_dir.setText(temp_dir)
def old_spell_fix(s): "Uses google API to fix spelling" data = u""" <spellrequest textalreadyclipped="0" ignoredups="1" ignoredigits="1" ignoreallcaps="0"> <text>%s</text> </spellrequest> """ data = data % s data_octets = data.encode('utf-8') new_s = s if log: log.debug("Checking spell suggestions for '%s'..." % s) if utils.isHebrew(s): log.debug("Search string is hebrew. Skipping on spell checking...") return s con = httplib.HTTPConnection("www.google.com", timeout=config.webservices_timeout) con.request("POST", "/tbproxy/spell?lang=en", data_octets, {'content-type': 'text/xml; charset=utf-8'}) response = con.getresponse().read() if log: log.debug("Response: %s" % response) try: dom = xml.dom.minidom.parseString(response) dom_data = dom.getElementsByTagName('spellresult')[0] except ExpatError: log.warning('spell_fix failed: ExpatError.') return s for node in dom_data.childNodes: att_o = int(node.attributes.item(2).value) # The offset from the start of the text of the word att_l = int(node.attributes.item(1).value) # Length of misspelled word att_s = int(node.attributes.item(0).value) # Confidence of the suggestion if not node.firstChild: # no suggestions return s text = node.firstChild.data.split("\t")[0] # print "%s --> %s (s: %d)" % (s[att_o:att_o+att_l], text, att_s) if att_s: # if suggestion is confident new_s = new_s.replace(s[att_o:att_o+att_l], text) if log: if s == new_s: log.debug("No suggestions were accepted.") else: log.debug("Suggestions were accepted: %s --> %s." % (s, new_s)) return new_s
def _handle_rtp(self, request): try: call = ctx_table.find_call(request.call_ctx) if call: other_addr = ctx_table.get_other_addr(request.client_ctx, request.call_ctx) buf = request.msg.serialize() yield CommMessage(other_addr, ClientRTP, buf) else: log.warning('%s _handle_rtp: call is out of context %s' % (repr(self) ,repr(call))) ctx_table.pprint() except: log.exception('exception')
def __parseYaml(self): try: self.__data = yaml.load(self.__data) log.info('Parse Yaml done! ip: %s' % (self.__ip)) if(self.__last): if(self.__last in self.__data): del self.__data[self.__last] log.info('Delete last callId done! ip: %s, last: %s' % (self.__ip, self.__last)) else: log.warning('Not found last callId, ip: %s, last: %s' % (self.__ip, self.__last)) except Exception, e: log.warning('Parse Yaml Error! ip: %s', self.__ip) print Exception, ":", e self.__saveFile()
def setupversion(self): '''Internal: Handle 'auto' for --version.''' # Handle automatic version detection if self.version == 'auto': log.debug('Attempting to automatically detect Dwarf Fortress version.') self.version = detectversion(paths=(self.input, self.output)) if self.version is None: log.error('Unable to detect Dwarf Fortress version.') else: log.debug('Detected Dwarf Fortress version %s.' % self.version) elif self.version is None: log.warning('No Dwarf Fortress version was specified. Scripts will be run regardless of their indicated compatibility.') else: log.info('Managing Dwarf Fortress version %s.' % self.version)
def start_package_install(self): d = Main.WebParser.WebServices.get_packages_data() for i, component in enumerate(self.components): urls, file_hash, install_param = d[component] fn = urls[0].split('/')[-1] log.debug("Downloading Component %s [%d/%d]..." % (component, i+1, len(self.components))) self.label1.setText(tr("Downloading %s...") % component) self.label2.setText(tr("File: <i>%s</i> [%d/%d]") % (fn, i+1, len(self.components))) for j in range(self.retries): obj = SmartDL(urls, logger=log) obj.start(blocking=False) b = True while not obj.isFinished(): if b: self.label1.setText(tr("Downloading %s (%.2f MB)...") % (component, obj.filesize/1024.0**2)) b = False QtGui.QApplication.processEvents() self.prg_bar.setValue(int(obj.get_progress()*100)) time.sleep(0.1) if obj._failed: QtGui.QMessageBox.critical(self, tr("Error"), tr("The download has failed. It may be a network connection problem. Please try to rerun this application and try again."), QtGui.QMessageBox.Ok) self.close() self.prg_bar.setValue(100) computed_hash = utils.calc_sha256(obj.get_dest()) if file_hash == computed_hash: log.debug('Hash for %s is valid.' % component) break else: log.warning('Hash for %s is NOT valid (%s != %s). Retrying (%d/%d)...' % (component, file_hash, computed_hash, j+1, self.retries)) if file_hash != computed_hash: log.error('Hash for %s is NOT valid (%s != %s).' % (component, file_hash, computed_hash)) QtGui.QMessageBox.warning(self, tr("Warning"), tr("Hash check failed for %s. Please contact with the program's developer.") % component, QtGui.QMessageBox.Ok) self.close() return path = obj.get_dest() install_params = [path] + install_param self.label1.setText(tr("Installing %s...") % component) subprocess.call(install_params, shell=True) QtGui.QApplication.processEvents() self.close()
def set_flag(self, partition_id, flag_name): if flag_name not in self.flag_map: raise KiwiPartitionerGptFlagError( 'Unknown partition flag %s' % flag_name ) if self.flag_map[flag_name]: Command.run( [ 'sgdisk', '-t', ':'.join([format(partition_id), self.flag_map[flag_name]]), self.disk_device ] ) else: log.warning('Flag %s ignored on GPT', flag_name)
def collect_evaluation_params(filepath, num_of_parts): param_sets = [] for part in format_parts(num_of_parts): param_file = "{fp}.{p}/cross.{p}.params".format(fp=filepath, p=part) if not os.path.exists(param_file): log.warning("file with tuned params does not exist: {}".format(param_file)) continue with open(param_file) as file: fields = file.read().strip().split() param_set = (float(fields[1]), float(fields[3])) param_sets.append(param_set) return param_sets
def addFont(self): ''' @note:: 成功或者失败 ''' font_path = self.font_path fontId = QFontDatabase.addApplicationFont(font_path) if(fontId != -1): fontInfoList = QFontDatabase.applicationFontFamilies(fontId) fontFamily = fontInfoList[0] self.__font.setFamily(fontFamily) log.info("添加字体成功") return True else: log.warning("添加字体失败") return False
def get_id3info(url): ''' Function fetches data about mp3 files over HTTP. @param url: mp3 file address. @return bitrate, title, artist, id3tags_file ''' if not is_ServerSupportHTTPRange(url): log.warning("Server does not support HTTPRANGE! [%s]" % url) return [0, "", "", ""] url = url.replace(' ', '%20') # may not be needed req = urllib2.Request(url, headers=config.generic_http_headers) urlObj = urllib2.urlopen(req, timeout=config.get_id3info_timeout) tmpfile = utils.get_rand_filename(config.temp_dir) stringIO = StringIO() while True: stringIO.write(urlObj.read(8192)) with open(tmpfile, 'wb') as f: f.write(stringIO.getvalue()) try: audio = MP3(tmpfile) break except EOFError: # We still didn't fetch all the ID3 data. pass # log.debug("metadata is not in this %d KB chunk or not supported." % len(stringIO.getvalue())) except mutagen.mp3.HeaderNotFoundError: log.debug("HeaderNotFoundError: can't sync to an MPEG frame") stringIO.close() return [0, "", "", ""] stringIO.close() try: audioID3 = EasyID3(tmpfile) except mutagen.id3.ID3NoHeaderError: log.debug("no ID3data found (mutagen.id3.ID3NoHeaderError).") return [audio.info.bitrate, "", "", ""] except mutagen.id3.ID3BadUnsynchData: log.debug("Bad ID3 Unsynch Data (mutagen.id3.ID3BadUnsynchData)") return [audio.info.bitrate, "", "", ""] title = utils.fix_faulty_unicode(audioID3.get('title')[0]) if audioID3.get('title') else "" artist = utils.fix_faulty_unicode(audioID3.get('artist')[0]) if audioID3.get('artist') else "" return [audio.info.bitrate, title, artist, tmpfile]
def add_url_rule(self, url_rule, view_func, name, options=None): '''This method adds a URL rule for routing purposes. The provided name can be different from the view function name if desired. The provided name is what is used in url_for to build a URL. The route decorator provides the same functionality. ''' rule = UrlRule(url_rule, view_func, name, options) if name in self._view_functions.keys(): # TODO: Raise exception for ambiguous views during registration log.warning('Cannot add url rule "%s" with name "%s". There is already a view with that name' % (url_rule, name)) self._view_functions[name] = None else: log.debug('Adding url rule "%s" named "%s" pointing to function "%s"' % (url_rule, name, view_func.__name__)) self._view_functions[name] = rule self._routes.append(rule)
def main(): args = handle_arguments() while True: try: scrape_events = ScrapeEvents( inventory_url=args.inventory_url, offline_token=args.offline_token, index=args.index, es_server=args.es_server, es_user=args.es_user, es_pass=args.es_pass, backup_destination=args.backup_destination) scrape_events.run_service() except Exception as ex: log.warning( "Elastefying logs failed with error %s, sleeping for %s and retrying", ex, RETRY_INTERVAL) time.sleep(RETRY_INTERVAL)
def add_url_rule(self, url_rule, view_func, name, options=None): '''This method adds a URL rule for routing purposes. The provided name can be different from the view function name if desired. The provided name is what is used in url_for to build a URL. The route decorator provides the same functionality. ''' rule = UrlRule(url_rule, view_func, name, options) if name in self._view_functions.keys(): # TODO: Raise exception for ambiguous views during registration log.warning('Cannot add url rule "%s" with name "%s". There is ' 'already a view with that name' % (url_rule, name)) self._view_functions[name] = None else: log.debug('Adding url rule "%s" named "%s" pointing to function ' '"%s"' % (url_rule, name, view_func.__name__)) self._view_functions[name] = rule self._routes.append(rule)
def init_id3data(self): try: # Add ID3 Tags if does not exist mp3Obj = MP3(self.path, ID3=ID3) mp3Obj.add_tags() mp3Obj.save() except error: pass except HeaderNotFoundError: log.warning( "This MP3 files seems to be faulty. Cannot edit it's ID3 data. (Error: HeaderNotFoundError)" ) QtGui.QMessageBox.critical( self, tr("Error"), tr("This MP3 files seems to be faulty. Cannot edit it's ID3 data." ), QtGui.QMessageBox.Ok) self.isValid = False return try: self.easyID3Obj = EasyID3(self.path) self.ID3Obj = ID3(self.path) except ID3NoHeaderError: # That means mp3Obj.add_tags() didn't work for a reason. utils.appendDummyID3(self.path) self.easyID3Obj = EasyID3(self.path) self.ID3Obj = ID3(self.path) USLT_Tag = [x for x in self.ID3Obj.keys() if x.startswith('USLT')] self.originalLyrics = self.ID3Obj[USLT_Tag[0]].text if USLT_Tag else "" APIC_Tag = [x for x in self.ID3Obj.keys() if x.startswith('APIC')] if APIC_Tag: APIC_Tag = APIC_Tag[0] mime = self.ID3Obj[APIC_Tag].mime if mime == u'image/jpeg': self.pix_path = os.path.join(config.temp_dir, 'album_art.jpg') elif mime == u'image/png': self.pix_path = os.path.join(config.temp_dir, 'album_art.png') else: self.pix_path = os.path.join(config.temp_dir, 'album_art.pic') with open(self.pix_path, 'wb') as f: f.write(self.ID3Obj[APIC_Tag].data)
async def push_stats(): # unlike the log function we will have to poll # the db for updates, aggregating results while True: yield gen.sleep(20) cursor = stats.find() async for r in cursor: # here check the stats and report any error try: check_stat(r) except Exception as e: log.warning(e.message) r[u'msgtype'] = 'stats' msg = json.dumps(r, default=date_encoder.default) for client in client_list: client.write_message(msg)
def parse_soundcloud_api1(song, maxpages=1, numTries=2): ''' Function connects to soundcloud.com and returns the .mp3 links in it API method 1: Looking for legitimate download links. ''' song = urllib2.quote(song.encode("utf8")) # since you can't combine decorators and generators, we have to implement # a retry section here, instead of using the retry deco. if numTries <= 0: return for i in range(maxpages): # http://soundcloud.com/tracks/search?page-1&q%5Bfulltext%5D=naruto&q%5Bdownloadable%5D=true domain = "soundcloud.com" url = 'http://soundcloud.com/tracks/search?page=%d&q%%5Bfulltext%%5D=%s&q%%5Bdownloadable%%5D=true' % ( i + 1, song.replace('-', '').replace(' ', '_').replace( '__', '_').lower()) log.debug("[SoundCloud] Parsing %s... " % url) obj = urllib2.urlopen(url) response = obj.read() soup = BeautifulSoup(response) hint = soup.find('div', class_='hint') if hint: if 'search is currently not available' in hint.text.lower(): log.warning("soundcloud: search is currently not available!") time.sleep(1.5) parse_soundcloud_api1(song, maxpages, numTries - 1) if "we couldn't find any tracks" in hint.text.lower(): return links = soup.find_all('a', href=re.compile(r'/download$')) log.debug("[Mp3skull] found %d links" % len(links)) for link in soup.find_all('a', href=re.compile(r'/download$')): url = "http://%s%s" % (domain, link['href']) track = link.find_parent('li').find('div', class_="info-header").h3.text # print track yield utils.cls.MetaUrl(url, 'SoundCloud', track)
def parse_config(): contents = [ line.strip() for line in read_config_file() if line.strip() != '' ] current_group = DEFAULT_GROUP configs = collections.OrderedDict() configs[current_group] = collections.OrderedDict() current_host = None for line in contents: # if starts with #gz comments if line.startswith(GZ_GROUP_PREFIX): current_group = parse_group_name(line) if current_group not in configs: configs[current_group] = collections.OrderedDict() else: try: values = line.split() if values[0] not in EXPECTED_CONFIG_PREFIXES: raise Exception("Unexpted line specified") except Exception as e: log.warning(str(e) + ", line=%s" % line) continue key = values[0] value = '-'.join(values[1:]) if key == HOST: current_host = value configs[current_group][current_host] = collections.OrderedDict( ) else: configs[current_group][current_host][key] = value if len(configs[DEFAULT_GROUP]) == 0: del configs[DEFAULT_GROUP] return configs
def __accumulate_volume_size(self, root_mbytes): """ calculate number of mbytes to add to the disk to allow the creaton of the volumes with their configured size """ disk_volume_mbytes = 0 data_volume_mbytes = self.__calculate_volume_mbytes() root_volume = self.__get_root_volume_configuration() for volume in self.volumes: if volume.realpath and not volume.realpath == '/' and volume.size: [size_type, req_size] = volume.size.split(':') disk_add_mbytes = 0 if size_type == 'freespace': disk_add_mbytes += int(req_size) + \ Defaults.get_min_volume_mbytes() else: disk_add_mbytes += int(req_size) - \ data_volume_mbytes.volume[volume.realpath] if disk_add_mbytes > 0: disk_volume_mbytes += disk_add_mbytes else: log.warning( 'volume size of %s MB for %s is too small, skipped', int(req_size), volume.realpath) if root_volume: if root_volume.size_type == 'freespace': disk_add_mbytes += root_volume.req_size + \ Defaults.get_min_volume_mbytes() else: disk_add_mbytes = root_volume.req_size - \ root_mbytes + data_volume_mbytes.total if disk_add_mbytes > 0: disk_volume_mbytes += disk_add_mbytes else: log.warning('root volume size of %s MB is too small, skipped', root_volume.req_size) return disk_volume_mbytes
def parse_MusicAddict(song, maxpages=10): "Function connects to MusicAddict.com and returns the .mp3 links in it" if utils.isHebrew(song): # Dilandau doesn't have hebrew log.warning("[MusicAddict] source has no hebrew songs. Skipping...") return song = urllib2.quote(song.encode("utf8")) for i in range(maxpages): # http://www.musicaddict.com/mp3/naruto-shippuden/page-1.html url = 'http://www.musicaddict.com/mp3/%s/page-%d.html' % (song.replace( '-', '').replace('_', '').replace(' ', '-').lower(), i + 1) log.debug("[MusicAddict] Parsing %s... " % url) obj = urllib2.urlopen(url) response = obj.read() DOMAIN = 'http://www.musicaddict.com/' t_links = [] links = [] soup = BeautifulSoup(response) for span in soup.find_all('span', class_='dl_link'): if not span.a['href'].startswith('http'): url = DOMAIN + span.a['href'] t_links.append(url) for link in t_links: obj = urllib2.urlopen(link) response = obj.read() soup = BeautifulSoup(response) js = soup.find('script', src=re.compile(r"js3/\d+.js")) jsUrl = DOMAIN + js['src'] obj = urllib2.urlopen(jsUrl) response = obj.read() url = re.search('src="(.+?)"', response).group(1) links.append(url) yield utils.cls.MetaUrl(url, 'MusicAddict') if not links: break
def add_compiled_module(self, m): """ Add an already parsed and processed module. """ if (m is None): return self.modules.append(m) for name, _sub in m.subs.items(): # Skip duplicate subs that look less interesting than the old one. if (name in self.globals): old_sub = self.globals[name] if (hasattr(old_sub, "statements")): if (len(_sub.statements) < len(old_sub.statements)): log.warning("Sub " + str(name) + " is already defined. Skipping new definition.") continue if (log.getEffectiveLevel() == logging.DEBUG): log.debug('(1) storing sub "%s" in globals' % name) self.globals[name.lower()] = _sub self.globals[name] = _sub for name, _function in m.functions.items(): if (log.getEffectiveLevel() == logging.DEBUG): log.debug('(1) storing function "%s" in globals' % name) self.globals[name.lower()] = _function self.globals[name] = _function for name, _prop in m.functions.items(): if (log.getEffectiveLevel() == logging.DEBUG): log.debug('(1) storing property let "%s" in globals' % name) self.globals[name.lower()] = _prop self.globals[name] = _prop for name, _function in m.external_functions.items(): if (log.getEffectiveLevel() == logging.DEBUG): log.debug('(1) storing external function "%s" in globals' % name) self.globals[name.lower()] = _function self.externals[name.lower()] = _function for name, _var in m.global_vars.items(): if (log.getEffectiveLevel() == logging.DEBUG): log.debug('(1) storing global var "%s" = %s in globals (1)' % (name, str(_var))) if (isinstance(name, str)): self.globals[name.lower()] = _var if (isinstance(name, list)): self.globals[name[0].lower()] = _var self.types[name[0].lower()] = name[1]
def run(self): # Called by Qt once the thread environment has been set up. google_ans = Main.WebParser.WebServices.googleImageSearch(self.song)[:] pool = ThreadPool(max_threads=config.GoogleImagesGrabber_processes, catch_returns=True, logger=log) fn_list = [] while len(fn_list) < self.numOfPhotos and google_ans: urls = [] for i in range(self.numOfPhotos-len(fn_list)): if google_ans: urls.append(google_ans.pop(0)) for url in urls: pool(self.fetchPhoto)(url) for photo in pool.iter(): try: if photo: fn_list.append(photo) except Exception, e: log.warning("Exception %s ignored in GoogleImagesGrabberThread." % str(e))
def _transform_wait_loop(loop): """ Transform useless loops like 'Do While x <> y:SomeFunctionCall():Loop' to 'SomeFunctionCall()' """ # Do we have this sort of loop? loop_pat = r"[Ww]hile\s+\w+\s*<>\s*\"?\w+\"?\r?\n.{0,500}?[Ww]end" loop_str = loop.original_str if (re.search(loop_pat, loop_str, re.DOTALL) is None): return loop # Is the loop body a function call? if ((len(loop.body) > 1) or (len(loop.body) == 0) or (not isinstance(loop.body[0], statements.Call_Statement))): return loop # Just do the call once. log.warning("Transformed possible infinite wait loop...") return loop.body[0]
def test_consistency(self): #create file log.enable_save_to_txt("approval_test_check.txt") log.info("testing info clean") log.error("testing error clean") log.warning("testing warning clean") log.success("testing success clean") #get its data to_check = "" with open("approval_test_check.txt", "r") as f1: to_check = f1.read() #get target data target = "" with open("approval_test_valid.txt", "r") as f2: target = f2.read() self.assertEqual(to_check, target)
def slot_choose_temp_dir(self): dialog = QtGui.QFileDialog() dialog.setFileMode(QtGui.QFileDialog.Directory) dialog.setDirectory(self.temp_dir.displayText()) temp_dir = unicode( dialog.getExistingDirectory( options=QtGui.QFileDialog.ShowDirsOnly)) temp_dir = temp_dir.replace('/', '\\') if temp_dir: if utils.get_free_space(temp_dir) < 200 * 1024**2: # 200 MB log.warning( "The Directory has less than 200 MB available. Application may not function properly." ) QtGui.QMessageBox.warning( self, tr("Warning"), tr("The Directory has less than 200 MB available. Application may not function properly." ), QtGui.QMessageBox.Ok) log.debug("temp_dir is set to: %s" % temp_dir) self.temp_dir.setText(temp_dir)
def handle(self, data): data = super().parseData(data) if data is None: return targetTokens = glob.tokens.getTokenFromUserID(data["userID"], ignoreIRC=True, _all=True) if targetTokens: icon = glob.db.fetch( "SELECT file_id, url FROM main_menu_icons WHERE id = %s LIMIT 1", (data["mainMenuIconID"],) ) if icon is None: log.warning("Tried to test an unknown main menu icon") return for x in targetTokens: x.enqueue( serverPackets.mainMenuIcon("{}|{}".format( "https://ussr.pl/static/logos/{}.png".format(icon["file_id"]), icon["url"] )) )
def to_python(self, context, params=None, indent=0): """JIT compile this VBA object to Python code for direct emulation. @param context (Context object) Context for the Python code generation (local and global variables). Current program state will be read from the context. @param params (list) Any parameters provided to the object. @param indent (int) The number of spaces of indent to use at the beginning of the generated Python code. @return (str) The current object with it's emulation implemented as Python code. """ log.warning("to_python() not implemented in " + safe_str_convert(type(self))) raise NotImplementedError("to_python() not implemented in " + safe_str_convert(type(self)))
def __del__(self): if self.mountpoint: log.info('Cleaning up %s instance', type(self).__name__) if self.is_mounted(): umounted_successfully = False for busy in [1, 2, 3]: try: Command.run(['umount', self.mountpoint]) umounted_successfully = True break except Exception: log.warning( '%d umount of %s failed, try again in 1sec', busy, self.mountpoint) time.sleep(1) if not umounted_successfully: log.warning('%s still busy at %s', self.mountpoint, type(self).__name__) else: Path.remove(self.mountpoint)
def masterschool_compare(dfFILE, logVECTOR): # get school numbers fileSCHNUMB = dfFILE['schnumb'] #yearMS = {'2020', '2019', '2018', '2017'} yearMS ={'2020'} #debug #get length of cols on df count_col = dfFILE.shape[1] # gives number of col count #check to see if schname and schnumber present #logVECTOR = [snumb, dcode, schname, disname, stid, dob] if logVECTOR[0]==1 & logVECTOR[1]==1: #Main loop to go through years for y in yearMS: #Generate master school dictonrary msDICT = setup_dicts(y) #random order TODO: force yearly descending order #School number in MS compared to Dataset and School Name Comparison log.info('Dataset Compare to ms%s',yearMS) index = 0 for i in fileSCHNUMB: fileKEY = ((dfFILE['schnumb'][index])) if (fileKEY in (msDICT['schcode'].keys())): #School name Comparison dfFILE.at[index,count_col+1]=dfFILE['schname'][index] #Print out dataframe name for ease dfFILE.at[index,count_col+2]=msDICT['schname'][i] #Print out mastersschool name for ease dfFILE.at[index,count_col+3]=fuzz_check(dfFILE['schname'][index], msDICT['schname'][i]) #compare else: log.warning("ATTENTION!!!!!!!!!!!!!!!!!! This key was not found in dict: %f", i) index = index + 1 # Membership Evaluation # dfFILE['schnumbmatch'] = dfFILE['schnumb'].isin(ms_2020['schcode'].keys()) # dfFILE['schnamematch'] = dfFILE['schname'].isin(ms_2020['schname'].values()) #set columns names dfFILE.rename(columns={count_col+1: "DF_Entry", count_col+2: "MS2020_Entry", count_col+3:"DF-MS_MATCH"},inplace=True) time.sleep(1) #wait a sec for error check before returning file return dfFILE
def joinChannel(userID = 0, channel = "", token = None, toIRC = True, force=False): """ Join a channel :param userID: user ID of the user that joins the channel. Optional. token can be used instead. :param token: user token object of user that joins the channel. Optional. userID can be used instead. :param channel: channel name :param toIRC: if True, send this channel join event to IRC. Must be true if joining from bancho. Default: True :param force: whether to allow game clients to join #spect_ and #multi_ channels :return: 0 if joined or other IRC code in case of error. Needed only on IRC-side """ try: # Get token if not defined if token is None: token = glob.tokens.getTokenFromUserID(userID) # Make sure the token exists if token is None: raise exceptions.userNotFoundException else: token = token # Normal channel, do check stuff # Make sure the channel exists if channel not in glob.channels.channels: raise exceptions.channelUnknownException() # Make sure a game client is not trying to join a #multi_ or #spect_ channel manually channelObject = glob.channels.channels[channel] if channelObject.isSpecial and not token.irc and not force: raise exceptions.channelUnknownException() # Add the channel to our joined channel token.joinChannel(channelObject) # Console output log.info("{} joined channel {}".format(token.username, channel)) # IRC code return return 0 except exceptions.channelNoPermissionsException: log.warning("{} attempted to join channel {}, but they have no read permissions".format(token.username, channel)) return 403 except exceptions.channelUnknownException: log.warning("{} attempted to join an unknown channel ({})".format(token.username, channel)) return 403 except exceptions.userAlreadyInChannelException: log.warning("User {} already in channel {}".format(token.username, channel)) return 403 except exceptions.userNotFoundException: log.warning("User not connected to IRC/Bancho") return 403 # idk
def _checkObsoletes(self, pkg, dep, list, operation=OP_INSTALL): """RpmPackage pkg to be newly installed during operation provides dep, which is obsoleted by RpmPackage's in list. Filter out irrelevant obsoletes and return 1 if pkg remains obsoleted, 0 otherwise. dep is (name, RPMSENSE_* flag, EVR string) or (filename, 0, "").""" ret = 0 conflicts = self._getObsoletes(pkg, dep, list, operation) for (c, r) in conflicts: if operation == OP_UPDATE and \ (r in self.pkg_updates or r in self.pkg_obsoletes): continue if self.isInstalled(r): fmt = "%s conflicts with already installed %s on %s, skipping" else: fmt = "%s conflicts with already added %s on %s, skipping" log.warning(fmt, pkg.getNEVRA(), depString(c), r.getNEVRA()) ret = 1 return ret
def enqueue(self, bytes_): """ Add bytes (packets) to queue :param bytes_: (packet) bytes to enqueue """ # Stop queuing stuff to the bot so we dont run out of mem if self.userID == 999: return try: # Acquire the buffer lock self._bufferLock.acquire() # Avoid memory leaks if len(bytes_) < MAX_BYTES: self.queue += bytes_ else: log.warning("{}'s packets buffer is above 10M!! Lost some data!".format(self.username)) finally: # Release the buffer lock self._bufferLock.release()
def __cleanup_intermediate_config(self): # delete kiwi copied config files config_files_to_delete = [] for config in self.cleanup_files: config_files_to_delete.append(self.root_dir + config) del self.cleanup_files[:] # delete stale symlinks if there are any. normally the package # installation process should have replaced the symlinks with # real files from the packages for config in self.config_files: if os.path.islink(self.root_dir + config): config_files_to_delete.append(self.root_dir + config) try: Command.run(['rm', '-f'] + config_files_to_delete) except Exception as e: log.warning('Failed to remove intermediate config files: %s', format(e))
def set_cached_value(arg, val): """ Set the cached value of an all constant numeric expression. """ # We should be setting this to a numeric expression if ((not isinstance(val, int)) and (not isinstance(val, float)) and (not isinstance(val, complex))): if (log.getEffectiveLevel() == logging.DEBUG): log.warning("Expression '" + str(val) + "' is a " + str(type(val)) + ", not an int. Not caching.") return # We have a number. Cache it. arg_str = str(arg) try: if (log.getEffectiveLevel() == logging.DEBUG): log.debug("Cache value of " + arg_str + " = " + str(val)) except UnicodeEncodeError: pass constant_expr_cache[arg_str] = val
def handle(userToken, packetData): try: # Start spectating packet packetData = clientPackets.startSpectating(packetData) # If the user id is less than 0, treat this as a stop spectating packet if packetData["userID"] < 0: userToken.stopSpectating() return # Get host token targetToken = glob.tokens.getTokenFromUserID(packetData["userID"]) if targetToken is None: raise exceptions.tokenNotFoundException # Start spectating new user userToken.startSpectating(targetToken) except exceptions.tokenNotFoundException: # Stop spectating if token not found log.warning("Spectator start: token not found") userToken.stopSpectating()
def call_driver(self, action, network, **action_kwargs): """Invoke an action on a DHCP driver instance.""" LOG.debug('Calling driver for network: %(net)s action: %(action)s', { 'net': network.id, 'action': action }) try: # the Driver expects something that is duck typed similar to # the base models. driver = self.dhcp_driver_cls(self.conf, network, self._process_monitor, self.dhcp_version, self.plugin_rpc) getattr(driver, action)(**action_kwargs) return True except exceptions.Conflict: # No need to resync here, the agent will receive the event related # to a status update for the network LOG.error(traceback.format_exc()) LOG.warning(('Unable to %(action)s dhcp for %(net_id)s: there ' 'is a conflict with its current state; please ' 'check that the network and/or its subnet(s) ' 'still exist.'), { 'net_id': network.id, 'action': action }) except Exception as e: if getattr(e, 'exc_type', '') != 'IpAddressGenerationFailure': # Don't resync if port could not be created because of an IP # allocation failure. When the subnet is updated with a new # allocation pool or a port is deleted to free up an IP, this # will automatically be retried on the notification LOG.error(traceback.format_exc()) LOG.debug("Network %s has been deleted.", network.id) else: LOG.exception(('Unable to %(action)s dhcp for %(net_id)s.'), { 'net_id': network.id, 'action': action }) LOG.error("enable dhcp err:%s", e) LOG.error(traceback.format_exc())
def get_metadata_exif(filename): """Get the Office metadata for a given file with the exiftool utility. @param filename (str) The name of the Office file for which to get metadata. @return (FakeMeta object) An object with a field for each piece of metadate. """ # Use exiftool to get the document metadata. output = None try: output = subprocess.check_output(["exiftool", filename]) except Exception as e: log.error("Cannot read metadata with exiftool. " + safe_str_convert(e)) return {} # Sanity check results. if (log.getEffectiveLevel() == logging.DEBUG): log.debug("exiftool output: '" + safe_str_convert(output) + "'") if (":" not in output): log.warning("Cannot read metadata with exiftool.") return {} # Store the metadata in an object. lines = output.split("\n") r = FakeMeta() for line in lines: line = line.strip() if ((len(line) == 0) or (":" not in line)): continue field = line[:line.index(":")].strip().lower() val = line[line.index(":") + 1:].strip().replace("...", "\r\n") setattr(r, field, val) # Done. return r
def __del__(self): if self.volume_group: log.info('Cleaning up %s instance', type(self).__name__) if self.is_mounted(): all_volumes_umounted = True for mount in reversed(self.mount_list): umounted_successfully = False for busy in [1, 2, 3]: try: Command.run(['umount', mount.device]) umounted_successfully = True break except Exception: log.warning( '%d umount of %s failed, try again in 1sec', busy, mount.device) time.sleep(1) if not umounted_successfully: all_volumes_umounted = False log.warning('%s still busy at %s', self.mountpoint + mount.mountpoint, type(self).__name__) if all_volumes_umounted: Path.wipe(self.mountpoint) try: Command.run(['vgchange', '-an', self.volume_group]) except Exception: log.warning('volume group %s still busy', self.volume_group)
def submit(self, flags): while (1): try: ictf = self.ictf.iCTF() self.t = ictf.login(self.email, self.token) sleep(20) break except: sleep(20) status = [] try: out = self.t.submit_flag(flags) except Exception as e: log.error(e.message) return [STATUS['unsubmitted']] * len(flags) for stat in out: if stat == "correct": status.append(STATUS['accepted']) elif stat == "alreadysubmitted": status.append(STATUS['rejected']) log.warning("the flag has already been submitted!") elif stat == "incorrect": status.append(STATUS['rejected']) log.error("wrong flags submitted!") elif stat == "notactive": status.append(STATUS['old']) log.error("unactive!") else: status.append(STATUS['unsubmitted']) log.error("too many incorrect STAHP!!!") if len(status) < len(flags): status += [ STATUS['unsubmitted'] for i in range(len(flags) - len(status)) ] return status
def set_hosts_roles(client, cluster, nodes_details, machine_net, tf, master_count, static_network_mode): networks_names = ( nodes_details["libvirt_network_name"], nodes_details["libvirt_secondary_network_name"] ) # don't set roles in bip role if machine_net.has_ip_v4: libvirt_nodes = utils.get_libvirt_nodes_mac_role_ip_and_name(networks_names[0]) libvirt_nodes.update(utils.get_libvirt_nodes_mac_role_ip_and_name(networks_names[1])) if static_network_mode: log.info("Setting hostnames when running in static network config mode") update_hostnames = True else: update_hostnames = False else: log.warning("Work around libvirt for Terrafrom not setting hostnames of IPv6-only hosts") libvirt_nodes = utils.get_libvirt_nodes_from_tf_state(networks_names, tf.get_state()) update_hostnames = True utils.update_hosts(client, cluster.id, libvirt_nodes, update_hostnames=update_hostnames, update_roles=master_count > 1)
def _pull_cells_sheet_internal(sheet): """ Pull all the cells from a Sheet object defined internally in excel.py. """ # We are going to use the internal cells field to build the list of all # cells, so this will only work with the ExcelSheet class defined in excel.py. if (not hasattr(sheet, "cells")): log.warning("Cannot read all cells from internal sheet. Sheet object has no 'cells' attribute.") return None # Cycle row by row through the sheet, tracking all the cells. # Find the max row and column for the cells. max_row = -1 max_col = -1 for cell_index in sheet.cells.keys(): curr_row = cell_index[0] curr_col = cell_index[1] if (curr_row > max_row): max_row = curr_row if (curr_col > max_col): max_col = curr_col # Cycle through all the cells in order. curr_cells = [] for curr_row in range(0, max_row + 1): for curr_col in range(0, max_col + 1): try: curr_cell = { "value" : sheet.cell(curr_row, curr_col), "row" : curr_row + 1, "col" : curr_col + 1, "index" : _get_alphanum_cell_index(curr_row, curr_col) } curr_cells.append(curr_cell) except KeyError: pass # Return the cells. return curr_cells
def checkDocker(self): ''' check whether Docker is installed, and if it is, verify it as well ''' if self.os == 'ubuntu': if self.ssh.exec_command_stdout('which docker') == '': log.error('Docker is not installed') return False _, _, stderr = self.ssh.exec_command('sudo docker run hello-world') if stderr.read().decode('utf-8') != '': log.error('Docker verification failed') return False else: log.warning('Docker has been successfully verified...') return True elif self.os == 'centos': raise NotImplementedError