def initialize(self): self.db = mongo self.search_cache = Cache(master=False, db=config.Cache.searchdb) self.hot_image_cache = Cache(master=False, db=config.Cache.imagedb) self.queue = Queue() self.session = Session(self, MemcacheStore(), initializer={ 'nickname': None, 'uid': None, 'avatar': None, 'email': None, 'super': False, 'channel': None, 'login': False, 'net': None, 'reso': None, 'height': 0, 'width': 0, 'show_msg': None, 'hd': True }) self.session.processor(self) self.context = { 'se': self.session, 'static_server': config.Server.static_server, 'cdn': config.CDN.mhost, }
def check_for_updates(notify, notify_secs): cache = Cache(__path__, minutes=5) # Si está en caché continúa c_version = cache.load(_server_addon_xml_url, False) if c_version: return # No está en caché, comprueba la última versión try: xml = tools.get_web_page(_server_addon_xml_url) except WebSiteError as ex: tools.write_log('%s: %s' % (ex.title, ex.message)) return server_v = re.findall(r'version="([0-9]{1,5}\.[0-9]{1,5}\.[0-9]{1,5})"', xml, re.U) if server_v and type(server_v) == list and len(server_v) > 0: cache.save(_server_addon_xml_url, {'version': server_v[0]}) sv = server_v[0].split('.') lv = __version__.split('.') if float('%s.%s' % (sv[0], sv[1])) > float('%s.%s' % (lv[0], lv[1])) or \ (float('%s.%s' % (sv[0], sv[1])) == float('%s.%s' % (lv[0], lv[1])) and int(sv[2]) > int(lv[2])): tools.write_log('Server version: %s' % server_v[0]) tools.write_log('Installed version: %s' % __version__) if notify: tools.Notify().notify(u'Acestream Sports', u'Se está actualizando a la versión %s' % server_v[0], disp_time=notify_secs) xbmc.executebuiltin("UpdateAddonRepos") xbmc.executebuiltin("UpdateLocalAddons")
def search(self): mod.display(self.module_name, "", "INFO", "Searching...") url = "http://malshare.com/" if "malshare_api_key" in self.config: if self.config["malshare_api_key"]: paths = [ "api.php?api_key=%s&action=details&hash=%s" % (self.config["malshare_api_key"], self.ioc) ] for path in paths: try: content = json.loads(Cache(self.module_name, url, path, self.search_method).content) safe_urls = [] for malware_url in content["SOURCES"]: safe_urls.append(malware_url.replace("http", "hxxp")) mod.display(self.module_name, self.ioc, "FOUND", "%s | %s%s" % (safe_urls, url, path)) return except: pass else: mod.display(self.module_name, message_type="ERROR", string="You must have a malshare api key to use this module ")
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "https://openphish.com/" paths = ["feed.txt"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): try: midle = line.split("//")[-1].split("/")[0] except: midle = None if self.type == "URL": if self.ioc in line: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) return elif self.type == "IPv4" and parse.is_valid_ipv4_address( midle): if self.ioc == midle: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) return elif self.type == "domain" and parse.is_valid_domain(midle): if midle == self.ioc: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) return
def __init__(self, url, cache_data): proto = "{}/".format('/'.join(url.split('/')[:2])) userpwd = url.split('/')[2].split('@')[0] uri = url.split('@')[1] data = [ "{}{}".format(proto, uri), userpwd.split(':')[0], userpwd.split(':')[1] ] # TODO: Store this relations in a redis-like cache self.cache = Cache(cache_data) self.cache.create_cache(self.DEST_RELS) self.cache.create_cache(self.DOM_RELS) self.cache.create_cache(self.SRC_RELS) self.cache.create_cache(self.SRCDST_RELS) self.cache.create_cache(self.SRCLOGIN_RELS) self.cache.create_cache(self.SESSIONS_RELS) self.cache.create_cache(self.FROM_SESSIONS) self.cache.create_cache(self.USER_LIST) self.cache.create_cache(self.DOM_LIST) self.cache.create_cache(self.SRV_LIST) self.cache.create_cache(self.SRVDOM_RELS) # setup neo4j self.drv = GraphDatabase.driver(data[0], auth=basic_auth(data[1], data[2])) self.neo = self.drv.session() self.neo.run("CREATE INDEX ON :User(sid)") self.neo.run("CREATE INDEX ON :Computer(name)") self.neo.run("CREATE INDEX ON :Domain(name)")
def get_channels(self, url): cache = Cache(self.__settings['path'], minutes=180) epg = EPG(self.__settings) # Busca los canales en cache channels = cache.load(url) if channels: # Actualiza la EPG de los canales epg.update_metadata(channels) return channels # No están en cache, los obtiene channels = [] # GET url page = tools.get_web_page(url) # Obtiene los nombres y urls de los canales chs = re.findall(r'#EXTINF:.*,(.*)<br\s/>\s(http[s]?://.*)<', page, re.U) if not chs: raise WebSiteError( u'Problema en Movistar+', u'No se han encontrado canales en la lista seleccionada', time=self.__settings['notify_secs']) # Añade los canales encontrados a la lista for ch in chs: ch_name = tools.str_sanitize(ch[0]) ch_link = tools.str_sanitize(ch[1]) if not (ch_link.endswith('.m3u8') or ch_link.endswith('.m3u')): channels.append({ 'name': ch_name, 'video': ch_link, 'icon': tools.build_path( self.__settings['path'], 'bein.png' if self.__sports else 'movistar.png'), 'fanart': tools.build_path( self.__settings['path'], 'bein_art.jpg' if self.__sports else 'movistar_art.jpg') }) if len(channels) == 0: raise WebSiteError( u'No hay canales', u'La lista no contiene canales que se puedan reproducir', time=self.__settings['notify_secs']) # Añade la EPG a los canales if not self.__sports: epg.add_metadata(channels) # Guarda los canales en caché y los devuelve cache.save(url, channels) return channels
def get_menu(self): """ Get MovistarTV channel lists :return: The list of MovistarTV channel lists :rtype: list """ cache = Cache(self.__settings['path'], minutes=60) # Busca las listas de canales en cache ch_lists = cache.load( self.__sports_url if self.__sports else self.__channels_url) if ch_lists: return ch_lists # No están en cache, los obtiene ch_lists = [] # GET http://iptv.filmover.com/category/spain/ page = tools.get_web_page( self.__sports_url if self.__sports else self.__channels_url) # Busca todas URL de listas de canales # Una por día, la primera es la lista más reciente urls = re.findall( r'<h2\s*class="entry-tit.*le">\s*<a href="(.*)"\s*rel="bookmark">(.*)</a></h2>', page, re.U) if not (urls and type(urls) == list and len(urls) > 0): raise WebSiteError(u'Lista de canales no encontrada', u'Los de Movistar+ han hecho cambios en la Web', time=self.__settings['notify_secs']) for url in urls: ch_lists.append({ 'name': tools.str_sanitize(url[1]), 'channel_url': tools.str_sanitize(url[0]), 'icon': tools.build_path( self.__settings['path'], 'bein.png' if self.__sports else 'movistar.png'), 'fanart': tools.build_path( self.__settings['path'], 'bein_art.jpg' if self.__sports else 'movistar_art.jpg') }) if len(ch_lists) == 0: raise WebSiteError( u'Problema en Movistar+', u'No se han encontrado listas de canales en la Web', time=self.__settings['notify_secs']) cache.save(self.__sports_url if self.__sports else self.__channels_url, ch_lists) return ch_lists
def __init__(self, db, me, focus_users): self.db = db self.me = me self.focus_users = focus_users if not me in self.focus_users: self.focus_users.append(me) # setup app-wide singletons self.cache = Cache() self.threads = Threads(self.db, self.me)
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "https://palevotracker.abuse.ch/" paths = ["blocklists.php?download=domainblocklist"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content: if line == self.ioc: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path))
def test_cache_no_columns(): items_to_cache = set() cache = Cache(items_to_cache) data = [{'bla': 1, 'xyz': 2}, {'bla': 3, 'xyz': 4}] # Should not be cached cache.add('a', data) a_bla = cache.retrieve('a.bla') assert a_bla == []
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "http://mirror1.malwaredomains.com/files/" paths = ["immortal_domains.txt"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if line == self.ioc: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path))
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "http://torstatus.blutmagie.de/" paths = [ "ip_list_all.php/Tor_ip_list_ALL.csv", "query_export.php/Tor_query_EXPORT.csv", "ip_list_exit.php/Tor_ip_list_EXIT.csv" ] for path in paths: if self.ioc in Cache(self.module_name, url, path, self.search_method).content: display(self.module_name, self.ioc, "FOUND", "%s%s"%(url, path))
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "https://www.malwaredomainlist.com/hostslist/" paths = ["hosts.txt"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if "127.0.0.1" in line: if self.ioc == line.split(" ")[1].strip(): display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path))
def search(self): mod.display(self.module_name, "", "INFO", "Searching...") url = "http://vxvault.net/" paths = ["URL_List.php"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if self.ioc in line: mod.display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) return
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "http://malwaredomains.lehigh.edu/files/" paths = ["domains.txt"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if line and line[0] != '#': base = line.split("\t\t")[1] if self.ioc == base.split("\t")[0]: display(self.module_name, self.ioc, "FOUND", "[%s] %s%s" % (base.split("\t")[1], url, path))
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "http://cybercrime-tracker.net/" paths = ["all.php"] if self.type == "URL": self.ioc = self.ioc.split("//")[1] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if self.ioc in line: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path))
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "http://www.nothink.org/blacklist/" paths = [ "blacklist_snmp_year.txt", "blacklist_ssh_year.txt", "blacklist_telnet_year.txt" ] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if self.ioc in line: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path))
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "http://hosts-file.malwareteks.com/" paths = ["hosts.txt"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): try: if self.ioc == line.split("127.0.0.1")[1].strip(): display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) except: pass
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "https://sslbl.abuse.ch/blacklist/" paths = ["sslblacklist.csv"] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if self.ioc in line: infos = line.split(',') display(self.module_name, self.ioc, "FOUND", "%s | %s%s" % (infos[2], url, path)) return
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "https://ransomwaretracker.abuse.ch/feeds/" paths = ["csv"] content = Cache(self.module_name, url, paths[0], self.search_method).content for line in content.split("\n"): try: if self.ioc in line: display( self.module_name, self.ioc, "FOUND", "%s | %s%s" % (line.split(",")[2].replace( '"', '', 2), url, paths[0])) except: pass
def __init__(self, output, cache_data): # TODO: Store this relations in a redis-like cache self.cache = Cache(cache_data) self.cache.create_cache(self.DEST_RELS) self.cache.create_cache(self.DOM_RELS) self.cache.create_cache(self.SRC_RELS) self.cache.create_cache(self.SRCDST_RELS) self.cache.create_cache(self.SRCLOGIN_RELS) self.cache.create_cache(self.SESSIONS_RELS) self.cache.create_cache(self.FROM_SESSIONS) self.cache.create_cache(self.USER_LIST) self.cache.create_cache(self.DOM_LIST) self.cache.create_cache(self.SRV_LIST) self.cache.create_cache(self.SRVDOM_RELS) self.output = output self.graph = gv.Digraph()
def search(self): mod.display(self.module_name, "", "INFO", "Searching...") url = "https://www.dshield.org/feeds/" paths = [ "suspiciousdomains_Low.txt", "suspiciousdomains_Medium.txt", "suspiciousdomains_High.txt" ] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): try: if line[0] != '#': if line.lower() == self.ioc.lower(): mod.display(self.module_name, self.ioc, "FOUND", "%s%s"%(url, path)) except: pass
def test_cache(): items_to_cache = set((('a', 'bla'), ('a', 'foo'), ('b', 'foo'))) cache = Cache(items_to_cache) data = [{'bla': 1, 'xyz': 2}, {'bla': 3, 'xyz': 4}] # Should be cached cache.add('a', data) # Should not be cached cache.add('b', data) a_bla = cache.retrieve('a.bla') assert a_bla == [1, 3] b_bla = cache.retrieve('b.bla') assert b_bla == []
def conn_string(self, conn, data, addr): """ This method is called when a request is received form server listening. This works de request message and pass the message to receiver. :param conn: connection socket :param data: request data :param addr: socket address """ request, b = Parser().http_to_dict(data) if self.do_cache: has, cache = Cache().there_is_cache(data) else: has = False cache = '' if not has: try: url = request['path'] http_pos = url.find("://") # find the position of :// if http_pos == -1: temp = url else: temp = url[(http_pos + 3):] # get the rest of the url port_pos = temp.find(":") # find the port if any webserver_pos = temp.find("/") # find the end of the web server if webserver_pos == -1: webserver_pos = len(temp) if port_pos == -1 or webserver_pos < port_pos: # default port port = 80 webserver = temp[:webserver_pos] else: # specific port port = int((temp[(port_pos + 1):])[:webserver_pos - port_pos - 1]) webserver = temp[:port_pos] self.proxy_server(webserver, port, conn, data, addr) except Exception, e: print e pass
def __get_all_events(self): """ Get all Torrent-TV.ru events :return: The list of Torrent-TV.ru events :rtype: list """ cache = Cache(self.__settings['path']) # Busca la agenda en cache events = cache.load(self.__agenda_url) if events: return events # No está en cache, la obtiene events = [] # GET http://super-pomoyka.us.to/trash/ttv-list/ttv.json channels = tools.get_web_page(self.__agenda_url) # Busca todas las etiquetas name, url y cat # y las guarda en una lista de tuplas ('etiqueta', 'valor') data = re.findall(r'(name|url|cat)":"([^"]*)"', channels, re.U) if not (data and type(data) == list and len(data) > 0): raise WebSiteError( u'Lista de canales no encontrada', u'Los de TorrentTV.ru han hecho cambios en la Web', time=self.__settings['notify_secs']) # Recorre la lista de 3 en 3 for x in range(0, len(data) / 3): name = data[x * 3][1] url = data[x * 3 + 1][1] cat = data[x * 3 + 2][1] events.append({'name': name, 'url': url, 'cat': cat}) if len(events) == 0: raise WebSiteError(u'Problema en TorrentTV', u'No hay canales o no hay enlaces en la Web', time=self.__settings['notify_secs']) cache.save(self.__agenda_url, events) return events
def get_hashlink(url, settings, minutes=10): # ¿La URL contiene el hashlink? if 'http' not in url: ace_hash = re.findall(r'([a-f0-9]{40})', url, re.U) if ace_hash: return url[12:] else: write_log("URL mal formada: '%s'" % url, xbmc.LOGERROR) raise WebSiteError(u'Enlace mal formado', u'Puede que hayan hecho cambios en la Web', time=settings['notify_secs']) # Busca el hash en cache cache = Cache(settings['path'], minutes=minutes) c_hash = cache.load(url) if c_hash: return c_hash['hash'] # No está en cache, lo obtiene page = get_web_page(url) # Busca el hash de acestream ace_hash = find_hash(page) if not ace_hash: # No lo ha encontrado, busca una URL que pueda contener el hash hash_url = find_hash_url(page) if hash_url: # Hay URL, busca el hash en la nueva página page = get_web_page(hash_url) ace_hash = find_hash(page) if not ace_hash: write_log("Hashlink no encontrado en '%s'" % url, xbmc.LOGERROR) raise WebSiteError( u'Enlace no encontrado', u'El enlace está en otra página y no se puede llegar a él', time=settings['notify_secs']) # Guarda el hash en caché cache.save(url, {"hash": ace_hash}) return ace_hash
def run_scraper(self, target_url, target_element_name): """ Run the scraper, check the cache, and log the differences. """ # fire up scraper and cache objects scraper = Scraper() cache = Cache() # define the target and cached content target_content = scraper.fetch_site_content(target_url, target_element_name) cached_content = cache.fetch_cache(target_url) # check the cache and report our findings if target_content is not None: diff = cache.diff_cache(target_content, cached_content) if diff is not u'': logging.info('The target differs from the cache.') logging.info(diff) logging.info('Updating cache...') cache.update_cache(target_url, target_content) logging.info('Cache updated.') logging.info('Sending mail...') email_result = self.send_email(target_url, diff) logging.info(email_result) message = 'Success! Cache updated.' else: logging.info('The target and cache match. Not altering cache.') message = 'Success! Cache not altered.' else: logging.warn('Unable to fetch requested page! D:') logging.error('Scraping falure.') message = 'Failure!' logging.info('Scraper finished.') return message, diff
def search(self): mod.display(self.module_name, "", "INFO", "Searching...") url = "https://www.spamhaus.org/drop/" paths = [ "drop.txt", "edrop.txt", "dropv6.txt", ] for path in paths: content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): try: if line[0] != ';': if IPAddress(self.ioc) in IPNetwork( line.split(" ")[0]): mod.display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) except: pass
def search(self): display(self.module_name, self.ioc, "INFO", "Searching...") url = "https://zeustracker.abuse.ch/" paths = [ "blocklist.php?download=baddomains", "blocklist.php?download=ipblocklist", "blocklist.php?download=compromised" ] for path in paths: if self.type == "URL": try: self.ioc = self.ioc.split("://")[1] except: pass content = Cache(self.module_name, url, path, self.search_method).content for line in content.split("\n"): if path.split("=")[1] == "compromised": if self.type == "URL": if self.ioc == line: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) return else: line = line.split("/")[0] try: line = line.split(":")[0] except: pass if self.type == "domain" and parse.is_valid_domain( line.strip()): if line.strip() == self.ioc: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) return elif self.type == "IPv4" and parse.is_valid_ipv4_address( line.strip()): if line.strip() == self.ioc: display(self.module_name, self.ioc, "FOUND", "%s%s" % (url, path)) return
def __init__(self): libCache = Cache() # Reports self._rgoReport = [] self._rgoReport.append( TPReportEntry(1, 'Inventory by Location', 'inventory_by_location')) self._rgoReport.append( TPReportEntry(2, 'Inventory by Vendor', 'inventory_by_vendor')) self._rgoReport.append( TPReportEntry(3, 'Sales Commission', 'commission_sales')) self._rgoReport.append( TPReportEntry(4, 'Tech Commission', 'commission_tech')) self._rgoReport.append( TPReportEntry(5, 'Employee Time Off', 'employee_time_off')) self._rgoReport.append( TPReportEntry(6, 'Hourly Time Slips', 'time_slips')) self._rgoReport.append( TPReportEntry(7, 'Items to Purchase', 'items_to_purchase')) self._rgoReport.append( TPReportEntry(8, 'Command Price Book', 'command_price_book'))