def train(self): self.train_setup() self.sess.run(tf.global_variables_initializer()) if self.parameters.pretrain_file is not None: self.load(self.loader, self.parameters.pretrain_file) threads = tf.train.start_queue_runners(coord=self.coord, sess=self.sess) for step in range(self.parameters.num_steps + 1): start_time = time.time() feed_dict = {self.curr_step: step} if step % self.parameters.save_interval == 0: loss_value, images, labels, preds, summary, _ = self.sess.run( [ self.reduced_loss, self.image_batch, self.label_batch, self.pred, self.total_summary, self.train_op ], feed_dict=feed_dict) self.summary_writer.add_summary(summary, step) self.save(self.saver, step) else: loss_value, _ = self.sess.run( [self.reduced_loss, self.train_op], feed_dict=feed_dict) duration = time.time() - start_time print('step {:d} \t loss = {:.3f}, ({:.3f} sec/step)'.format( step, loss_value, duration)) write_log('{:d}, {:.3f}'.format(step, loss_value), self.parameters.logfile) self.coord.request_stop() self.coord.join(threads)
def lurk_rand_link(message): r = requests.get('https://lurkmore.to/Служебная:Random', headers=tools.rheader, proxies=tools.proxies) bot.send_message( message.chat.id, f'Случайная ссылка на Луркоморье:\n{urllib.parse.unquote(r.url)}') tools.write_log(message)
def __get_number_of_pages(page): pages = re.findall(r'>[Pp][Aa][Gg][Ee]\s[0-9]+\sof\s([0-9]+)<', page, re.U) if not pages: tools.write_log( u'LiveFootballVideo: No se encuentra el número de páginas') return None return int(pages[0])
def __init__(self, path, minutes=30): self.__path = path self.__minutes = minutes cache_dir = xbmc.translatePath(os.path.join(path, 'resources', 'cache')) if not os.path.isdir(cache_dir): tools.write_log("Creating Cache: '%s'" % cache_dir) os.mkdir(cache_dir)
def save(self, url, data): content = {} try: content['timestamp'] = time.time() content['data'] = data with open( tools.build_path( self.__path, '%s.json' % hashlib.sha224(url).hexdigest(), 'cache'), 'w') as fp: json.dump(content, fp, indent=4) tools.write_log("Cache: new '%s'" % url) except IOError: tools.write_log("Cache: can't write '%s'" % url, xbmc.LOGERROR)
def update_metadata(self, channels): # Obtiene la EPG epg = self.__get_epg_data() if not epg: return channels # Añade los datos tools.write_log('Updating EPG metadata') for channel in channels: chn = channel['name'].split(' [COLOR ')[0] # Busca la EPG del canal channel_epg = self.__get_by_channel_name(epg, chn) if not channel_epg: continue # Busca en la EPG del canal el programa que están emitiendo epg_info = self.__get_program_data(channel_epg['events']) if not epg_info: continue # Añade la información al canal # Compone el nombre, el icono y plot hora = datetime.datetime.fromtimestamp( epg_info['starts']).strftime('%H:%M') channel.update(epg_info) channel['name'] = '%s [COLOR yellow](%s %s)[/COLOR]' % ( chn, hora, epg_info['title']) if epg_info['image'] and epg_info['image'].startswith('http'): channel['icon'] = epg_info['image'] channel[ 'plot'] = '[B]%s %s[/B]%s[CR]Finaliza: %s[CR][CR][LIGHT]%s[/LIGHT]' % ( hora, epg_info['title'].encode('utf-8'), ' (%s)' % epg_info['tvshowtitle'].encode('utf-8') if epg_info['tvshowtitle'] else '', datetime.datetime.fromtimestamp( epg_info['ends']).strftime('%H:%Mh'), epg_info['plot'].encode('utf-8'), ) return channels
def load(self, url, log_read_ok=True): try: with open( tools.build_path( self.__path, '%s.json' % hashlib.sha224(url).hexdigest(), 'cache'), 'r') as fp: content = json.load(fp) if datetime.datetime.now() <= datetime.datetime.fromtimestamp(content['timestamp']) + \ datetime.timedelta(minutes=self.__minutes): if log_read_ok: tools.write_log("Cache: '%s' read ok" % url) return content['data'] else: tools.write_log("Cache: '%s' expired" % url) return None except IOError: return None
def __get_program_data(epg_programs): now = time.time() if type(epg_programs) == list: for event in epg_programs: # noinspection PyTypeChecker if event['eventid'] != 0 and float( event['starts']) <= now < float(event['ends']): return { 'starts': event['starts'], 'ends': event['ends'], 'title': event['title'], 'tvshowtitle': event['subtitle'], 'plot': event['description'], 'plotoutline': event['description2'], 'genre': event['genre'], 'image': event['image'].split('?')[0], 'cast': event['actor'].split(', '), 'director': event['director'], 'credits': event['creator'], 'aired': datetime.datetime.fromtimestamp( event['starts']).strftime('%Y-%m-%d'), 'duration': str(int(event['duration'] / 60)), 'mediatype': 'movie' if 'Cine' in event['genre'] else ('episode' if 'Serie' in event['genre'] else 'tvshow') } else: tools.write_log('EPG Events type:%s "%s"' % (type(epg_programs), epg_programs)) return None
def get_events_today_and_tomorrow(self): """ Get today and tomorrow Arenavision events :return: The list of Arenavision events :rtype: list """ today_tomorrow = [] today = datetime.datetime.now() events = self.get_all_events() for event in events: try: if int(event['date'][:2]) == int(today.strftime('%d')) or \ int(event['date'][:2]) == int((today + datetime.timedelta(days=1)).strftime('%d')): today_tomorrow.append(event) except ValueError: tools.write_log( "Fecha '%s' de '%s' incorrecta" % (event['date'], event['name']), xbmc.LOGERROR) return today_tomorrow
def __get_links(self, channels, urls): """ Get link list of channels URL's for a given string :param channels: The channels string :type: channels: str :return: The list of Arenavision events :rtype: list """ ch_list = re.findall(r'[\d-]+\s*\[[\w]+\]', channels, re.U) if not ch_list: tools.write_log('No se pueden extraer los enlaces: %s' % ch_list, xbmc.LOGERROR) return None chs = [] for ch_data in ch_list: ch_numbers = re.findall(r'[\d]+', ch_data, re.U) ch_lang = re.findall(r'\[[\w]+\]', ch_data, re.U) for ch_number in ch_numbers: chs.append({ 'name': 'AV%s %s' % (ch_number, ch_lang[0] if ch_lang else '[---]'), 'icon': tools.build_path(self.__settings['path'], 'arenavision.jpg'), 'fanart': tools.build_path(self.__settings['path'], 'arenavision_art.jpg'), 'link': urls[int(ch_number) - 1] }) return chs
def __get_epg_data(self): """ Get a list containing the EPG data :return: The list containing the EPG data :rtype: list """ cache = Cache(self.__settings['path'], minutes=180) # Busca la EPG en cache epg = cache.load(self.__channels_epg, log_read_ok=False) if epg: return epg # No está en cache, la obtiene epg = [] # GET url try: page = tools.get_web_page(self.__channels_epg) except WebSiteError as e: tools.write_log('%s: %s' % (e.title, e.message)) return epg # Busca los datos en formato json data = re.findall(r'data-json="(.*)"', page, re.U) if not data: tools.write_log('data-json= not found in %s' % self.__channels_epg) return epg # Carga y guarda la EPG try: epg = json.loads(tools.str_sanitize(data[0]))['channels'] cache.save(self.__channels_epg, epg) except (ValueError, IndexError, KeyError): tools.write_log('Malformed data-json= in %s' % self.__channels_epg) return epg
def sticker_id(message): print(f'File ID: {message.sticker.file_id}') tools.write_log(message)
def send_kek(message): bot.send_message(message.chat.id, 'KEK') tools.write_log(message)
def start_message(message): bot.send_message(message.chat.id, f'Привет, {message.from_user.first_name}!') bot.send_sticker(message.chat.id, open('stickers/cat.webp', 'rb')) tools.write_log(message)
from tools.key_extractor import KeyExtractor from tools.data_parsing import PostsParser from tools import write_log from settings.general import TAGS for tag in TAGS: mess = 'The parsing for tag: #' + tag + ' is started.' print(mess) write_log(mess) key_exctactor = KeyExtractor() key_exctactor.parse_insta_pages(tag, 50, True) post_parser = PostsParser() post_parser.parse_urls(tag, True)
def __init__(self, response): self.__is_cloudflare = True if not ('challenge-form' and 's,t,o,p,b,r,e,a,k,i,n,g,f' and 'jschl-answer' in response['data']): tools.write_log('[ERROR] CloudFlare ha cambiado de reto') self.__is_cloudflare = False return # noinspection PyBroadException try: self.__cfduid = re.findall(r'__cfduid=[\w\d]+', response['headers']['set-cookie'], re.U) self.__wait = int( re.findall(r'\}, ([\d]+)\);', response["data"], re.U)[0]) challenge = re.findall(r'challenge-form\'\);\s*(.*)a.v', response['data'], re.U)[0] javascript = re.findall( r'setTimeout\(function\(\){\s*.*?.*:(.*?)};', response['data'], re.U)[0] jschl_vc = re.findall(r'name="jschl_vc" value="(.+?)"/>', response['data'], re.U)[0] js_value = self.__decode(javascript) for line in challenge.split(';'): sections = line.split('=') value = self.__decode(sections[1]) js_value = int( eval("{0}{1}{2}".format(str(js_value), sections[0][-1], str(value)))) answer = js_value + len(urlparse.urlparse(response['url']).netloc) url = '/'.join(response['url'].split('/')[:-1]) if '<form id="challenge-form" action="/cdn' in response['data']: url = '/'.join(response['url'].split('/')[:3]) if 'type="hidden" name="pass"' in response['data']: self.__auth_url = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % ( url, urllib.quote_plus( re.compile('name="pass" value="(.*?)"').findall( response['data'])[0]), jschl_vc, answer) tools.write_log("WAIT %s..." % self.__wait) xbmc.sleep(self.__wait) else: self.__auth_url = urlparse.urljoin( response['url'], '/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (jschl_vc, answer)) except: tools.write_log( '[ERROR] CloudFlare ha reventado en pleno reto javascript') self.__is_cloudflare = False return if not self.__auth_url and "refresh" in response["headers"]: self.__header_data = {} # noinspection PyBroadException try: self.__wait = int(response["headers"]["refresh"].split(";")[0]) self.__auth_url = "%s%s?%s" % ( '/'.join(response['url'].split('/')[:-1]), response["headers"]["refresh"].split("=")[1].split("?")[0], urllib.urlencode( {'pass': response["headers"]["refresh"].split("=")[2] })) tools.write_log("WAIT %ss..." % self.__wait) xbmc.sleep(self.__wait) except: tools.write_log( '[ERROR] CloudFlare ha reventado en pleno reto (headers)') self.__is_cloudflare = False return