def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) table = common.table(data, 2) size = [table[x][3] for x in range(len(table))] # list the size lname = [common.parse_tag(table[x][1])[1][1] for x in range(len(table))] # list the size # seeds = re.findall('green center">(.*?)<', data) # list the seeds # peers = re.findall('red lasttd center">(.*?)<', data) # list the peers cont = 0 results = [] for cm, page in enumerate(re.findall('/torrent/(.*?)"', data)): torrent = "http://itorrents.org/torrent//torrent/" + page name = size[cm + 1] + ' - ' + lname[cm + 1] + ' - ' + settings.name_provider if filters.verify(name, size[cm + 1]): results.append({"name": name, "uri": torrent}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) table = common.table(data, 1) cont = 0 results = [] for cm, (page, name) in enumerate(re.findall('/torrent/(.*?)">(.*?)</a>', data, re.S)): size = table[cm][5] page = settings.url + "/torrent/" + page torrent = common.getlinks(page) name = size + ' - ' + name.replace('\n', '') + ' - ' + settings.name_provider if filters.verify(name, size): results.append({"name": name, "uri": torrent}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon) return []
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) table = common.table(data, 2) cont = 0 results = [] for cm, torrent in enumerate(re.findall(r'/torrent/(.*?)/(.*?).html', data)): size = table[cm + 1][2] name = size + ' - ' + torrent[1].replace('-', ' ').title() + ' - ' + settings.name_provider torrent = settings.url + '/down/' + torrent[0] + '.torrent' # create torrent to send Pulsar if filters.verify(name, size): results.append({"name": name, "uri": torrent}) # torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon) return []
#! /usr/local/bin/python from common import table from common import Stack import sys task2 = table(sys.argv[1]) mx = task2.construct() for line in open(sys.argv[2], 'rU'): trig = 0 stk = Stack() stk.push('S $') # print '***',stk.prt() # line=line[:-1] line = line[:-1] + '$' # line+='$' print '++', line index = 0 a = line[index] while not stk.is_empty(): print '--', stk.prt() # print '||',a X = stk.top() if X.isupper(): # print 'run 1','|',X,'|',a,'|',mx.get(X,a) if mx.get(X, a) != -1 and mx.get(X, a) != None: print mx.get(X, a) stk.pop() stk.push(mx.get(X, a)[0]) else: print 'reject 1' trig = 1
#! /usr/local/bin/python from common import table import sys task2=table(sys.argv[1]) mx=task2.construct() if mx is not None: task2.table_print(mx) else: print 'Grammar is not LL(1)!'