def __init__(self, url, cache_data): proto = "{}/".format('/'.join(url.split('/')[:2])) userpwd = url.split('/')[2].split('@')[0] uri = url.split('@')[1] data = [ "{}{}".format(proto, uri), userpwd.split(':')[0], userpwd.split(':')[1] ] # TODO: Store this relations in a redis-like cache self.cache = Cache(cache_data) self.cache.create_cache(self.DEST_RELS) self.cache.create_cache(self.DOM_RELS) self.cache.create_cache(self.SRC_RELS) self.cache.create_cache(self.SRCDST_RELS) self.cache.create_cache(self.SRCLOGIN_RELS) self.cache.create_cache(self.SESSIONS_RELS) self.cache.create_cache(self.FROM_SESSIONS) self.cache.create_cache(self.USER_LIST) self.cache.create_cache(self.DOM_LIST) self.cache.create_cache(self.SRV_LIST) self.cache.create_cache(self.SRVDOM_RELS) # setup neo4j self.drv = GraphDatabase.driver(data[0], auth=basic_auth(data[1], data[2])) self.neo = self.drv.session() self.neo.run("CREATE INDEX ON :User(sid)") self.neo.run("CREATE INDEX ON :Computer(name)") self.neo.run("CREATE INDEX ON :Domain(name)")
def _load_data(self): self.test_data = Cache.read(self.name) if self.test_data is not None: return self._init_connector() self.test_data = {} self._get_global_status() self._get_variables() if self.config.get("check_slave", True): self._get_slave_status() if self.config.get("check_master", True) and dict_has_item( self.test_data, "log_bin", "ON"): self._get_master_status() if self.config.get("check_procs", True): self._get_processlist() if self.config.get("check_innodb", True) and "innodb_version" in self.test_data: self._get_innodb_status() # Not yet implemented # if self.config.get("check_qrt", True): # self._get_percona_qrt() Cache.write(self.name, self.test_data)
def handle_channel_msg(self, data): # normal msg doesn't contain subtype => F**K, normal msg contain subtype now. #if data.get("subtype"): # return # filter mismatch channel c_id = data.get("vchannel_id") if c_id != self.bc_default_channel: return sender_id = "" name = "" # get sender if data.get("subtype") == "robot": sender_id = data.get("robot_id") name = Cache.get_robot_true_name(sender_id) else: sender_id = data.get("uid") name = Cache.get_user_en_name(sender_id) # filter sender if sender_id in self.id_filter: Logger.log("sender %s (%s) in the filter list, abort msg" % (name, sender_id)) return msg = data.get("text") # filter msg if msg.startswith(self.msg_enable_pre): msg = msg.split(self.msg_enable_pre, 1)[-1:][0] self.send_irc_msg(name, msg) else: Logger.log("bc msg (%s) was not the standardized format, abort forwarding" % (msg))
def initialize(self): self.db = mongo self.search_cache = Cache(master=False, db=config.Cache.searchdb) self.hot_image_cache = Cache(master=False, db=config.Cache.imagedb) self.queue = Queue() self.session = Session(self, MemcacheStore(), initializer={ 'nickname': None, 'uid': None, 'avatar': None, 'email': None, 'super': False, 'channel': None, 'login': False, 'net': None, 'reso': None, 'height': 0, 'width': 0, 'show_msg': None, 'hd': True }) self.session.processor(self) self.context = { 'se': self.session, 'static_server': config.Server.static_server, 'cdn': config.CDN.mhost, }
def check_for_updates(notify, notify_secs): cache = Cache(__path__, minutes=5) # Si está en caché continúa c_version = cache.load(_server_addon_xml_url, False) if c_version: return # No está en caché, comprueba la última versión try: xml = tools.get_web_page(_server_addon_xml_url) except WebSiteError as ex: tools.write_log('%s: %s' % (ex.title, ex.message)) return server_v = re.findall(r'version="([0-9]{1,5}\.[0-9]{1,5}\.[0-9]{1,5})"', xml, re.U) if server_v and type(server_v) == list and len(server_v) > 0: cache.save(_server_addon_xml_url, {'version': server_v[0]}) sv = server_v[0].split('.') lv = __version__.split('.') if float('%s.%s' % (sv[0], sv[1])) > float('%s.%s' % (lv[0], lv[1])) or \ (float('%s.%s' % (sv[0], sv[1])) == float('%s.%s' % (lv[0], lv[1])) and int(sv[2]) > int(lv[2])): tools.write_log('Server version: %s' % server_v[0]) tools.write_log('Installed version: %s' % __version__) if notify: tools.Notify().notify(u'Acestream Sports', u'Se está actualizando a la versión %s' % server_v[0], disp_time=notify_secs) xbmc.executebuiltin("UpdateAddonRepos") xbmc.executebuiltin("UpdateLocalAddons")
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "https://openphish.com/" paths = ["feed.txt"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): try: midle = line.split("//")[-1].split("/")[0] except: midle = None if self.type == "URL": if self.ioc in line: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) return elif self.type == "IPv4" and parse.is_valid_ipv4_address( midle): if self.ioc == midle: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) return elif self.type == "domain" and parse.is_valid_domain(midle): if midle == self.ioc: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) return
def get_channels(self, url): cache = Cache(self.__settings['path'], minutes=180) epg = EPG(self.__settings) # Busca los canales en cache channels = cache.load(url) if channels: # Actualiza la EPG de los canales epg.update_metadata(channels) return channels # No están en cache, los obtiene channels = [] # GET url page = tools.get_web_page(url) # Obtiene los nombres y urls de los canales chs = re.findall(r'#EXTINF:.*,(.*)<br\s/>\s(http[s]?://.*)<', page, re.U) if not chs: raise WebSiteError( u'Problema en Movistar+', u'No se han encontrado canales en la lista seleccionada', time=self.__settings['notify_secs']) # Añade los canales encontrados a la lista for ch in chs: ch_name = tools.str_sanitize(ch[0]) ch_link = tools.str_sanitize(ch[1]) if not (ch_link.endswith('.m3u8') or ch_link.endswith('.m3u')): channels.append({ 'name': ch_name, 'video': ch_link, 'icon': tools.build_path( self.__settings['path'], 'bein.png' if self.__sports else 'movistar.png'), 'fanart': tools.build_path( self.__settings['path'], 'bein_art.jpg' if self.__sports else 'movistar_art.jpg') }) if len(channels) == 0: raise WebSiteError( u'No hay canales', u'La lista no contiene canales que se puedan reproducir', time=self.__settings['notify_secs']) # Añade la EPG a los canales if not self.__sports: epg.add_metadata(channels) # Guarda los canales en caché y los devuelve cache.save(url, channels) return channels
def handle_msg(self, data): msg_type = data.get("type") if msg_type == "update_user": Cache.update() elif msg_type == "channel_message": self.handle_channel_msg(data)
def get_menu(self): """ Get MovistarTV channel lists :return: The list of MovistarTV channel lists :rtype: list """ cache = Cache(self.__settings['path'], minutes=60) # Busca las listas de canales en cache ch_lists = cache.load( self.__sports_url if self.__sports else self.__channels_url) if ch_lists: return ch_lists # No están en cache, los obtiene ch_lists = [] # GET http://iptv.filmover.com/category/spain/ page = tools.get_web_page( self.__sports_url if self.__sports else self.__channels_url) # Busca todas URL de listas de canales # Una por día, la primera es la lista más reciente urls = re.findall( r'<h2\s*class="entry-tit.*le">\s*<a href="(.*)"\s*rel="bookmark">(.*)</a></h2>', page, re.U) if not (urls and type(urls) == list and len(urls) > 0): raise WebSiteError(u'Lista de canales no encontrada', u'Los de Movistar+ han hecho cambios en la Web', time=self.__settings['notify_secs']) for url in urls: ch_lists.append({ 'name': tools.str_sanitize(url[1]), 'channel_url': tools.str_sanitize(url[0]), 'icon': tools.build_path( self.__settings['path'], 'bein.png' if self.__sports else 'movistar.png'), 'fanart': tools.build_path( self.__settings['path'], 'bein_art.jpg' if self.__sports else 'movistar_art.jpg') }) if len(ch_lists) == 0: raise WebSiteError( u'Problema en Movistar+', u'No se han encontrado listas de canales en la Web', time=self.__settings['notify_secs']) cache.save(self.__sports_url if self.__sports else self.__channels_url, ch_lists) return ch_lists
def _load_data(self): self.test_data = Cache.read(self.name, ttl=3600) if self.test_data is not None: return with open(self._get_statistics_file(), "r") as f: self.test_data = "".join(f.readlines()) Cache.write(self.name, self.test_data)
def _load_data(self): self.test_data = Cache.read(self.name) if self.test_data is not None: return self._refresh_stats() self._parse_stats() Cache.write(self.name, self.test_data)
def _load_data(self): self.test_data = Cache.read(self.name) if self.test_data is not None: return connector = MySQLConnector(host=self.config.get("host", "127.0.0.1"), port=self.config.get("port", 9306)) self.test_data = self._format_data(connector.get("SHOW STATUS")) Cache.write(self.name, self.test_data)
def _load_data(self): self.test_data = Cache.read(self.name) if self.test_data is not None: return connector = SocketConnector(socket_file=self.config.get("socket", "/var/lib/haproxy/stats.sock"), command="show info\nshow stat\n") self.test_data = connector.get() Cache.write(self.name, self.test_data)
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "http://mirror1.malwaredomains.com/files/" paths = ["immortal_domains.txt"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if line == self.ioc: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path))
def _load_data(self): self.test_data = Cache.read(self.name) if self.test_data is not None: return connector = SocketConnector(socket_file=self.config.get( "socket", "/var/lib/haproxy/stats.sock"), command="show info\nshow stat\n") self.test_data = connector.get() Cache.write(self.name, self.test_data)
def _load_data(self): self.test_data = Cache.read(self.name, 3000) if self.test_data is not None: return self.test_data with open(self._get_statistics_file(), "r") as f: self.test_data = f.readlines() Cache.write(self.name, self.test_data) return self.test_data
def test_cache_no_columns(): items_to_cache = set() cache = Cache(items_to_cache) data = [{'bla': 1, 'xyz': 2}, {'bla': 3, 'xyz': 4}] # Should not be cached cache.add('a', data) a_bla = cache.retrieve('a.bla') assert a_bla == []
def _load_data(self): self.test_data = Cache.read(self.name) if self.test_data is not None: return connector = SocketConnector(host=self.config.get("host", "127.0.0.1"), port=self.config.get("port", 9002), command="LOGIN %s %s\nSTATS .\nQUIT\n" % ( self.config.get("user", ""), self.config.get("passwd", ""))) self.test_data = connector.get() Cache.write(self.name, self.test_data)
def search(self): mod.display(self.module_name, "", "INFO", "Searching...") url = "http://vxvault.net/" paths = ["URL_List.php"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if self.ioc in line: mod.display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) return
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "https://www.malwaredomainlist.com/hostslist/" paths = ["hosts.txt"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if "127.0.0.1" in line: if self.ioc == line.split(" ")[1].strip(): display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path))
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "http://malwaredomains.lehigh.edu/files/" paths = ["domains.txt"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if line and line[0] != '#': base = line.split("\t\t")[1] if self.ioc == base.split("\t")[0]: display(self.module_name, self.ioc, "FOUND", "[%s] %s%s" % (base.split("\t")[1], url, path))
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "http://cybercrime-tracker.net/" paths = ["all.php"] if self.type == "URL": self.ioc = self.ioc.split("//")[1] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if self.ioc in line: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path))
def _load_data(self): self.test_data = Cache.read(self.name) if self.test_data is not None: return connector = self._get_connector() code, headers, self.test_data, error = connector.get() self.test_data += connector.get_load_time() if not code.startswith("200"): raise CheckFail("Unable to get response: %s (code: %s)" % (error, code)) Cache.write(self.name, self.test_data)
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "http://hosts-file.malwareteks.com/" paths = ["hosts.txt"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): try: if self.ioc == line.split("127.0.0.1")[1].strip(): display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) except: pass
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "http://www.nothink.org/blacklist/" paths = [ "blacklist_snmp_year.txt", "blacklist_ssh_year.txt", "blacklist_telnet_year.txt" ] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if self.ioc in line: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path))
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "https://sslbl.abuse.ch/blacklist/" paths = ["sslblacklist.csv"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if self.ioc in line: infos = line.split(',') display(self.module_name, self.ioc, "FOUND", "%s | %s%s" % (infos[2], url, path)) return
def __init__(self, bot): self.bot = bot config = configparser.ConfigParser() config.read(CONF_FILE) self.ignore_users = [] if "ignore_users" in config["bot"]: users = config["bot"]["ignore_users"] self.ignore_users = [u for u in users.split("\n") if len(u.strip()) > 0] # fill the bearychat cache Cache.init() self.bc = Bearychat(self.bot) self.emojis = Emojis()
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "https://ransomwaretracker.abuse.ch/feeds/" paths = ["csv"] content = Cache(self.module_name, url, paths[0], self.search_method).content for line in content.split("\n"): try: if self.ioc in line: display( self.module_name, self.ioc, "FOUND", "%s | %s%s" % (line.split(",")[2].replace( '"', '', 2), url, paths[0])) except: pass
def __init__(self, output, cache_data): # TODO: Store this relations in a redis-like cache self.cache = Cache(cache_data) self.cache.create_cache(self.DEST_RELS) self.cache.create_cache(self.DOM_RELS) self.cache.create_cache(self.SRC_RELS) self.cache.create_cache(self.SRCDST_RELS) self.cache.create_cache(self.SRCLOGIN_RELS) self.cache.create_cache(self.SESSIONS_RELS) self.cache.create_cache(self.FROM_SESSIONS) self.cache.create_cache(self.USER_LIST) self.cache.create_cache(self.DOM_LIST) self.cache.create_cache(self.SRV_LIST) self.cache.create_cache(self.SRVDOM_RELS) self.output = output self.graph = gv.Digraph()
def search(self): mod.display(self.module_name, "", "INFO", "Searching...") url = "http://malshare.com/" if "malshare_api_key" in self.config: if self.config["malshare_api_key"]: paths = [ "api.php?api_key=%s&action=details&hash=%s" % (self.config["malshare_api_key"], self.ioc) ] for path in paths: try: content = json.loads(Cache(self.module_name, url, path, self.search_method).content) safe_urls = [] for malware_url in content["SOURCES"]: safe_urls.append(malware_url.replace("http", "hxxp")) mod.display(self.module_name, self.ioc, "FOUND", "%s | %s%s" % (safe_urls, url, path)) return except: pass else: mod.display(self.module_name, message_type="ERROR", string="You must have a malshare api key to use this module ")
def search(self): mod.display(self.module_name, "", "INFO", "Searching...") url = "https://www.dshield.org/feeds/" paths = [ "suspiciousdomains_Low.txt", "suspiciousdomains_Medium.txt", "suspiciousdomains_High.txt" ] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): try: if line[0] != '#': if line.lower() == self.ioc.lower(): mod.display(self.module_name, self.ioc, "FOUND", "%s%s"%(url, path)) except: pass
def __init__(self, db, me, focus_users): self.db = db self.me = me self.focus_users = focus_users if not me in self.focus_users: self.focus_users.append(me) # setup app-wide singletons self.cache = Cache() self.threads = Threads(self.db, self.me)
def run_scraper(self, target_url, target_element_name): """ Run the scraper, check the cache, and log the differences. """ # fire up scraper and cache objects scraper = Scraper() cache = Cache() # define the target and cached content target_content = scraper.fetch_site_content( target_url, target_element_name ) cached_content = cache.fetch_cache(target_url) # check the cache and report our findings if target_content is not None: diff = cache.diff_cache(target_content, cached_content) if diff is not u'': logging.info('The target differs from the cache.') logging.info(diff) logging.info('Updating cache...') cache.update_cache(target_url, target_content) logging.info('Cache updated.') logging.info('Sending mail...') email_result = self.send_email(target_url, diff) logging.info(email_result) message = 'Success! Cache updated.' else: logging.info('The target and cache match. Not altering cache.') message = 'Success! Cache not altered.' else: logging.warn('Unable to fetch requested page! D:') logging.error('Scraping falure.') message = 'Failure!' logging.info('Scraper finished.') return message, diff
def _load_data(self): self.test_data = Cache.read(self.name) if self.test_data is not None: return self.test_data url = "%s://%s:%s%s" % ( self.config.get("proto", "http"), self.config.get("host", "localhost"), self.config.get("port", "80"), self.config.get("resource", "/nginx-status") ) connector = UrlConnector(url) data = connector.get() if data.status_code != 200: raise CheckFail("Unable to retrieve data (error code: %s)" % data.status_code) data = data.text.strip() Cache.write(self.name, data) return data.split("\n")
class HotImage: def __init__(self): self.maximg = 5000 self.name = config.Cache.hot_image_cache self.c = Cache(master=True, db=config.Cache.imagedb) def _get_hot_images(self, days): start = datetime.datetime.now() - datetime.timedelta(days=days) settinglog = mongo.setting_log.find({'atime': {'$gt': start}}) hot = {} for i in settinglog: if not hot.has_key(i['imgid']): hot[i['imgid']] = 1 else: hot[i['imgid']] += 1 return list(sorted(hot.items(), key=lambda x: x[1])) def cache_hot_images(self, days): from lib.encoder import MongoEncoder self.delete_cache() imgs = [i[0] for i in self._get_hot_images(days)] imgs.reverse() print 'hot image size', len(imgs) for i in imgs: try: img = mongo.image.find_one({'_id': objectid.ObjectId(i)}) if img: self.c.rpush(self.name, json.dumps(img, cls=MongoEncoder)) except: raise continue def delete_cache(self): print 'remove cache', self.name self.c.remove(self.name) def get(self, num): return self.c.find_list(self.name, 0, num)
def __get_all_events(self): """ Get all Torrent-TV.ru events :return: The list of Torrent-TV.ru events :rtype: list """ cache = Cache(self.__settings['path']) # Busca la agenda en cache events = cache.load(self.__agenda_url) if events: return events # No está en cache, la obtiene events = [] # GET http://super-pomoyka.us.to/trash/ttv-list/ttv.json channels = tools.get_web_page(self.__agenda_url) # Busca todas las etiquetas name, url y cat # y las guarda en una lista de tuplas ('etiqueta', 'valor') data = re.findall(r'(name|url|cat)":"([^"]*)"', channels, re.U) if not (data and type(data) == list and len(data) > 0): raise WebSiteError( u'Lista de canales no encontrada', u'Los de TorrentTV.ru han hecho cambios en la Web', time=self.__settings['notify_secs']) # Recorre la lista de 3 en 3 for x in range(0, len(data) / 3): name = data[x * 3][1] url = data[x * 3 + 1][1] cat = data[x * 3 + 2][1] events.append({'name': name, 'url': url, 'cat': cat}) if len(events) == 0: raise WebSiteError(u'Problema en TorrentTV', u'No hay canales o no hay enlaces en la Web', time=self.__settings['notify_secs']) cache.save(self.__agenda_url, events) return events